index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
999,800 | ef9648d46abe5293c5f28d35cdab59f2cc91ec69 | """
2) Crie um classe Agenda que pode armazenar 10 pessoas e seja capas de
realizar as seguintes operações:
. void armazenaPessoa(String nome, int idade, float altura);
. void removePessoa(String nome);
. int buscaPessoa(String nome); // informa em que posição da agenda está a pessoa
. void imprimeAgenda(); // imprime os dados de todas as pessoas da agenda
. void imprimePessoa(int index); // imprime os dados da pessoa que está na posição
"i" da agenda
"""
from secao16_orientacao_a_objetos.exercicios.questao1 import Pessoa
from verificacao import verificar_nome
class Agenda:
def __init__(self):
"""Construtor que inicia o atributo de instância que irá armazenar as pessoas"""
self.__pessoas = []
def armazena_pessoa(self, nome, idade, altura):
"""Armazena uma pessoa de acordo com as informações passadas por parâmetro"""
self.__pessoas.append(Pessoa(nome, idade, altura))
def remove_pessoa(self, nome):
"""Remove a pessoa de acordo com o nome passo por parâmetro"""
if verificar_nome(nome):
nova_lista = []
removido = False
for posicao in range(len(self.__pessoas)):
if nome.strip().title() == self.__pessoas[posicao].get_nome():
removido = True
else:
nova_lista.append(self.__pessoas[posicao])
if not removido:
print("\nNenhuma pessoa encontrada com esse nome")
else:
self.__pessoas = nova_lista
else:
print(f"\nNome inválido")
def busca_pessoa(self, nome):
"""Retorna a posição onde se encontra a pessoa com o nome passado por parâmetro"""
if verificar_nome(nome):
for posicao in range(len(self.__pessoas)):
if nome.strip().title() == self.__pessoas[posicao].get_nome():
return f"\n{posicao}"
return "\nNenhuma pessoa encontrada com esse nome"
return "\nNome inválido"
def imprimi_agenda(self):
"""Imprimi na tela as informações de cada pessoa da agenda"""
print()
for pessoa in self.__pessoas:
print(f"Nome: {pessoa.get_nome()}; Idade: {pessoa.get_idade()}; Altura: {pessoa.get_altura()}")
def imprimi_pessoa(self, index):
"""Imprime na tela as informações da pessoa que se encontra no índice informado"""
try:
if not type(index) == bool and not type(index) == float:
index = int(index)
nome = self.__pessoas[index].get_nome()
altura = self.__pessoas[index].get_altura()
idade = self.__pessoas[index].get_idade()
print(f"\nNome: {nome}; Idade: {idade}; Altura: {altura}")
else:
raise ValueError
except ValueError:
print("\nÍndice deve ser um inteiro")
except TypeError:
print("\nÍndice deve ser um inteiro")
except IndexError:
print("\nÍndice informado não existe")
if __name__ == "__main__":
agenda = Agenda()
agenda.armazena_pessoa("Pedro Henrique Gomes Lima", 21, 1.70)
agenda.armazena_pessoa("Áquila Rodrigues Menezes", 23, 1.75)
agenda.armazena_pessoa("Lucas Ravel Benicio Pinto", 21, 1.72)
agenda.armazena_pessoa("Marcos Vitor Bezerra", 29, 1.85)
agenda.armazena_pessoa("Rian Marlon Sousa da Silva", 25, 1.76)
agenda.armazena_pessoa("Só o Básico", 80, 1.70)
agenda.armazena_pessoa("Vitor Emanuel Sampaio Cavalcante", 29, 1.80)
agenda.armazena_pessoa("Raffa Muela Mano", 49, 1.90)
agenda.imprimi_agenda()
agenda.remove_pessoa("Só o básico")
agenda.imprimi_agenda()
agenda.remove_pessoa("raffa muela mano")
agenda.imprimi_agenda()
print(agenda.busca_pessoa("vitor emanuel sampaio cavalcante"))
agenda.imprimi_pessoa(5)
|
999,801 | e1118acddb52844d9a6464bf136a349228abe485 | from abstract_objects import BonusObject
class ShrinkBonus(BonusObject):
def __init__(self, x, y):
super().__init__(x, y)
def activate(self, game, paddle_flag=0):
if paddle_flag == 1:
game.paddle1.shrink()
elif paddle_flag == 2:
game.paddle2.shrink()
else:
game.paddle.shrink()
class FastBallBonus(BonusObject):
def __init__(self, x, y):
super().__init__(x, y)
def activate(self, game, paddle_flag=0):
game.ball.accelerate()
class DeathBonus(BonusObject):
def __init__(self, x, y):
super().__init__(x, y)
def activate(self, game, paddle_flag=0):
game.kill_player()
|
999,802 | 85afae55a2c026c24e3140057379a0a272370bc1 | # stack abstract data type
#
# 5/9/2021
# @author Jack Hangen
#
# follows last in first out (LIFO)s
class stack:
datatype = "stack"
def __init__ (self):
self.stackWorking = []
self.top = -1
# adds a new item to the top of the stack. It needs the item and returns nothing.
def push(self, item):
self.stackWorking.append(item)
self.top = self.top + 1
# removes the top item from the stack. It needs no parameters and returns the item. The stack is modified.
def pop(self):
temp = self.stackWorking[self.top]
self.stackWorking.pop(self.top)
self.top = self.top - 1
return temp
# returns the top item from the stack but does not remove it. It needs no parameters. The stack is not modified.
def peek(self):
return self.stackWorking[self.top]
# tests to see whether the stack is empty. It needs no parameters and returns a boolean value.
def isEmpty(self):
if size() == 0 :
return True
else:
return False
# returns the number of items on the stack. It needs no parameters and returns an integer.
def size(self):
return len(self.stackWorking)
# testing ADT
s = stack()
s.push('a')
s.push('b')
s.push('c')
s.push('d')
s.push('e')
print(s.size())
print(s.peek())
print(s.pop())
print(s.size()) |
999,803 | 01a40f126b9a84e550daee97d9e9b5511c531754 | from rest_framework import permissions
from rest_framework.views import APIView
from .models import Contact
from django.core.mail import send_mail
from rest_framework.response import Response
from .serializers import ContactSerializer
class ContactCreateView(APIView):
permission_classes = (permissions.AllowAny,)
serializer_class = ContactSerializer
def post(self, request, format=None):
data = self.request.data
subject = data.get("subject")
message = "Name: {} \nEmail: {}\n\nMessage:\n{}".format(
data.get("name"), data.get("email"), data.get("message"))
try:
send_mail(subject, message, "aminuolawaleji@gmail.com",
["aminuolawaleji@gmail.com"], fail_silently=False)
contact = Contact(name=data.get("name"), email=data.get(
"email"), subject=data.get("subject"), message=data.get("message"))
contact.save()
contact_data = ContactSerializer(contact)
return Response({"success": "Message Sent successfully", "data": contact_data.data})
except:
return Response({"error": "Message failed to send"})
|
999,804 | 02c903d86fe147a3cdb531da347037532127fa53 | '''
BobKaehms_1_4_7: Change pixels in an image.
This example first changes the background,
then mirrors in the X-axis, then the Y-axis
This example uses matplotlib to manipulate the image at the pixel level.
The next iteration will use the PIL Image library
'''
from PIL import Image
import matplotlib.pyplot as plt
import os.path
import numpy as np # "as" lets us use standard abbreviations
'''Read the image data'''
# Get the directory of this python script
directory = os.path.dirname(os.path.abspath(__file__))
# Build an absolute filename from directory + filename
#filename = os.path.join(directory, 'chrysler-top-bw1.jpg')
#filename = os.path.join(directory, 'chrysler-top-rgb-sm.jpg')
filename = os.path.join(directory, 'grads.jpg')
# Read the image data into an array
img = plt.imread(filename)
imgdest = plt.imread(filename)
fig, ax = plt.subplots(1, 1)
height = len(img)
width = len(img[0])
print ('width= ',width)
print ('height= ',height)
def changeBG(im,r,g,b,tol):
''' flip all the bits that are greater than a certain combined (rgb) value), usually background'''
for row in range(height):
for col in range(width):
if sum(im[row][col])>tol:
im[row][col] = [r,b,g]
def mirrorImgX(im):
''' mirror the image across the x-axis '''
for row in range(height):
for col in range(width/2):
# print (row,col,width-col,height-row)
r=im[row][col][0]
g=im[row][col][1]
b=im[row][col][2]
im[row][width-col-1][0]=r
im[row][width-col-1][1]=g
im[row][width-col-1][2]=b
def mirrorImgY(im):
''' mirror the image across the y-axis '''
for row in range(height/2):
for col in range(width):
# print (row,col,width-col,height-row)
r=im[row][col][0]
g=im[row][col][1]
b=im[row][col][2]
im[height - row-1][col][0]=r
im[height - row-1][col][1]=g
im[height - row-1][col][2]=b
def pixelateG(im,xr,yr):
''' sample the image at center of an x,y bounding rectangle, then fill all bits in that rectangle with the sampled rgb value'''
xstep=xr/2
xrange=width/xr
ystep=yr/2
yrange=height/yr
for nextX in range(xrange):
for nextY in range(yrange):
nextXc=(nextX-1) * xr + xstep
nextYc=(nextY-1) * yr + ystep
r=im[nextYc][nextXc][0]
g=im[nextYc][nextXc][1]
b=im[nextYc][nextXc][2]
for xfill in range(nextXc-xstep,nextXc+xstep):
for yfill in range(nextYc-ystep,nextYc+ystep):
im[xfill,yfill]=[r,g,b]
#changeBG(img,100,100,255,500)
changeBG(img,56,148,67,500)
#pixelateG(img,50,50)
mirrorImgY(img)
mirrorImgX(img)
# Show the image data in a subplot
ax.imshow(img, interpolation='none')
# Show the figure on the screen
#print(type(img))
fig.show() |
999,805 | a504456348c575fb7ba16bd5d962f255d6f3d962 | import requests
import re
'''
利用正则来爬去猫眼电影
1. url: http://maoyan.com/board
2. 把电影信息尽可能多的拿下来
分析
1. 一个影片的内容是以dd开是的单元
2. 在单元内存在一部电影的所有信息
思路:
1. 利用re把dd内容都给找到
2. 对应找到的每一个dd,用re挨个查找需要的信息
方法就是三步走:
1. 把页面down下来
2. 提取出dd单元为单位的内容
3. 对每一个dd,进行单独信息提取
'''
# 1 下载页面内容
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36",
}
url = "https://maoyan.com/board"
# req = request.Request(url, headers=headers)
rsp = requests.get(url, headers=headers)
html = rsp.text
# rsp = request.urlopen(req)
# html = rsp.read().decode()
# print(html)
# '.'匹配所有除\n,\r的任何字符
s = r'<dd>(.*?)</dd>'
# re.S表示匹配包括整个字符串包括
pattern = re.compile(s, re.S)
films = pattern.findall(html)
print(len(films))
for film in films:
s = r'<div.*?title="(.*?)"'
pattern = re.compile(s, re.S)
title = pattern.findall(film)[0]
print(title)
|
999,806 | b47f52fad0bd88d9b45b47ba61e2e952046400d5 | print("* "*5)
for i in range(5):
print(" "*4,end="")
print("*")
print("* "*3)
"""
* * * * *
*
*
*
*
*
* * *
"""
|
999,807 | 301b6107ba32904de00396c33d88a7fb33419011 | MIN_LENGTH = 10
password = input("Enter Password: ")
if len(password) >= MIN_LENGTH:
print(len(password) * "*")
else:
print("password not long enough")
|
999,808 | ddcd0c681c55cfb58adf43a72030a780d5e02bf7 |
from sklearn import cross_validation
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_selection import SelectPercentile, f_classif
import text
features_train , features_test, labels_train, labels_test = cross_validation.train_test_split(text.preload(), text.get_label(), test_size = 0.2, random_state = 50)
vectorizer = TfidfVectorizer(sublinear_tf = True, min_df=0.3, stop_words='english')
features_train_transformed = vectorizer.fit_transform(features_train)
features_test_transformed = vectorizer.transform(features_test)
selector = SelectPercentile(f_classif, percentile = 1)
selector.fit(features_train_transformed, labels_train)
features_train_transformed = selector.transform(features_train_transformed).toarray()
features_test_transformed = selector.transform(features_test_transformed).toarray()
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score
clf = GaussianNB()
clf.fit(features_train_transformed,labels_train)
prd = clf.predict(features_test_transformed)
accuraccy = accuracy_score(labels_test, prd)
print("Accuracy is ",accuraccy)
|
999,809 | 0c61efb7f476ab567d4670dc0eb017e64c5d5f8c | """
Question was "Given a pattern and a string input - find if the string follows the same pattern and return 0 or 1.
Examples:
1) Pattern : "abba", input: "redbluebluered" should return 1.
2) Pattern: "aaaa", input: "asdasdasdasd" should return 1.
3) Pattern: "aabb", input: "xyzabcxzyabc" should return 0.
"""
from collections import defaultdict
class PatternMatching(object):
def match(self, string, pattern, dictionary={}):
if len(string) == 0 and len(pattern) == 0:
return True
if len(string) == 0 or len(pattern) == 0:
return False
if pattern[0] in dictionary:
char_str = dictionary[pattern[0]]
if string[:len(char_str)] == char_str:
return self.match(string[len(char_str):], pattern[1:], dictionary)
else:
for i in xrange(1, len(string)):
aux_dict = dictionary.copy()
aux_dict[pattern[0]] = string[:i]
if self.match(string[i:], pattern[1:], aux_dict):
return True
return False
import unittest
class TestPatternMatching(unittest.TestCase):
def test_match(self):
pm = PatternMatching()
self.assertTrue(pm.match('redbluebluered', 'abba'))
pm = PatternMatching()
self.assertTrue(pm.match('asdasdasdasd', 'aaaa'))
pm = PatternMatching()
self.assertFalse(pm.match('xyzabcxzyabc', 'aabb'))
if __name__ == '__main__':
unittest.main()
|
999,810 | 951cc098e31ad03329157eb74b7c54d3079c7979 | # Question 8
# Level 2
#
# Question:
# Write a program that accepts a comma separated sequence of words as input and prints the words in a
# comma-separated sequence after sorting them alphabetically.
#
# Suppose the following input is supplied to the program:
# without,hello,bag,world
# Then, the output should be:
# bag,hello,without,world
#
# Hints:
# In case of input data being supplied to the question, it should be assumed to be a console input.
def print_sorted_words():
words = get_words()
sorted_words = sort_words(words)
print(','.join(sorted_words))
def get_words():
word_list = input('Enter the words to be sorted: ')
return word_list.split(',')
def sort_words(words):
words.sort()
return words
if __name__ == '__main__':
print_sorted_words() |
999,811 | 6d4e4c16078b1c37b4cc9b5212cee79cf11e8bc4 | """
snapshot
========
This module implements the :class:`~pynbody.snapshot.SimSnap` class which manages and stores snapshot data.
It also implements the :class:`~pynbody.snapshot.SubSnap` class (and relatives) which
represent different views of an existing :class:`~pynbody.snapshot.SimSnap`.
"""
import copy
import gc
import hashlib
import logging
import re
import threading
import traceback
import warnings
import weakref
from functools import reduce
import numpy as np
from .. import array, config, dependencytracker, family, filt, simdict, units, util
from ..units import has_units
from .snapshot_util import ContainerWithPhysicalUnitsOption
logger = logging.getLogger('pynbody.snapshot')
class SimSnap(ContainerWithPhysicalUnitsOption):
"""The class for managing simulation snapshots.
For most purposes, SimSnaps should be initialized through
:func:`~pynbody.load` or :func:`~pynbody.new`.
For a basic tutorial explaining how to load a file as a SimSnap
see :doc:`tutorials/data_access`.
*Getting arrays or subsnaps*
Once a :class:`SimSnap` object ``f`` is instantiated, it can
be used in various ways. The most common operation is to
access something with the code ``f[x]``. Depending on the
type of ``x``, various behaviours result:
- If ``x`` is a string, the array named by ``x`` is returned. If
no such array exists, the framework attempts to load or
derive an array of that name (in that order). If this is
unsuccessful, a `KeyError` is raised.
- If ``x`` is a python `slice` (e.g. ``f[5:100:3]``) or an array of
integers (e.g. ``f[[1,5,100,200]]``) a subsnap containing only the
mentioned particles is returned.
See :doc:`tutorials/data_access` for more information.
- If ``x`` is a numpy array of booleans, it is interpreted as a mask and
a subsnap containing only those particles for which x[i] is True.
This means that f[condition] is a shortcut for f[np.where(condition)].
- If ``x`` is a :class:`pynbody.filt.Filter` object, a subsnap
containing only the particles which pass the filter condition
is returned.
See :doc:`tutorials/data_access` for more information.
- If ``x`` is a :class:`pynbody.family.Family` object, a subsnap
containing only the particles in that family is returned. In practice
for most code it is more convenient to write e.g. ``f.dm`` in place of
the equivalent syntax f[pynbody.family.dm].
*Getting metadata*
The property `filename` gives the filename of a snapshot.
There is also a `properties` dictionary which
contains further metadata about the snapshot. See :ref:`subsnaps`.
"""
_derived_quantity_registry = {}
_decorator_registry = {}
_loadable_keys_registry = {}
_persistent = ["kdtree", "_immediate_cache", "_kdtree_derived_smoothing"]
# The following will be objects common to a SimSnap and all its SubSnaps
_inherited = ["_immediate_cache_lock",
"lazy_off", "lazy_derive_off", "lazy_load_off", "auto_propagate_off",
"properties", "_derived_array_names", "_family_derived_array_names",
"_dependency_tracker", "immediate_mode", "delay_promotion"]
# These 3D arrays get four views automatically created, one reflecting the
# full Nx3 data, the others reflecting Nx1 slices of it
#
# TO DO: This should probably be read in from a config file
_split_arrays = {'pos': ('x', 'y', 'z'),
'vel': ('vx', 'vy', 'vz')}
@classmethod
def _array_name_1D_to_ND(self, name):
"""Map a 1D array name to a corresponding 3D array name, or return None
if no such mapping is possible.
e.g. 'vy' -> 'vel'; 'acc_z' -> 'acc'; 'mass' -> None"""
for k, v in self._split_arrays.items():
if name in v:
return k
generic_match = re.findall("^(.+)_[xyz]$", name)
if len(generic_match) == 1 and generic_match[0] not in self._split_arrays:
return generic_match[0]
return None
@classmethod
def _array_name_ND_to_1D(self, array_name):
"""Give the 3D array names derived from a 3D array.
This routine makes no attempt to establish whether the array
name passed in should indeed be a 3D array. It just returns
the 1D slice names on the assumption that it is. This is an
important distinction between this procedure and the reverse
mapping as implemented by _array_name_1D_to_ND."""
if array_name in self._split_arrays:
array_name_1D = self._split_arrays[array_name]
else:
array_name_1D = [array_name + "_" + i for i in ('x', 'y', 'z')]
return array_name_1D
def _array_name_implies_ND_slice(self, array_name):
"""Returns True if, at best guess, the array name corresponds to a 1D slice
of a ND array, on the basis of names alone.
This routine first looks at special cases (pos -> x,y,z for example),
then looks for generic names such as acc_x - however this would only be
considered a "match" for a ND subslice if 'acc' is in loadable_keys().
"""
for v in self._split_arrays.values():
if array_name in v:
return True
generic_match = re.findall("^(.+)_[xyz]$", array_name)
loadable_keys = self.loadable_keys()
keys = list(self.keys())
if len(generic_match) == 1 and generic_match[0] not in self._split_arrays:
return generic_match[0] in loadable_keys or generic_match[0] in keys
return False
def __init__(self):
"""Initialize an empty, zero-length SimSnap.
For most purposes SimSnaps should instead be initialized through
:func:`~pynbody.load` or :func:`~pynbody.new`.
"""
super().__init__()
self._arrays = {}
self._num_particles = 0
self._family_slice = {}
self._family_arrays = {}
self._derived_array_names = []
self._family_derived_array_names = {}
for i in family._registry:
self._family_derived_array_names[i] = []
self._dependency_tracker = dependencytracker.DependencyTracker()
self._immediate_cache_lock = threading.RLock()
self._persistent_objects = {}
self._unifamily = None
# If True, when new arrays are created they are in shared memory by
# default
self._shared_arrays = False
self.lazy_off = util.ExecutionControl()
# use 'with lazy_off :' blocks to disable all hidden/lazy behaviour
self.lazy_derive_off = util.ExecutionControl()
# use 'with lazy_derive_off : ' blocks to disable lazy-derivation
self.lazy_load_off = util.ExecutionControl()
# use 'with lazy_load_off : ' blocks to disable lazy-loading
self.auto_propagate_off = util.ExecutionControl()
# use 'with auto_propagate_off : ' blocks to disable auto-flagging changes
# (i.e. prevent lazy-evaluated arrays from auto-re-evaluating when their
# dependencies change)
self.immediate_mode = util.ExecutionControl()
# use 'with immediate_mode: ' to always return actual numpy arrays, rather
# than IndexedSubArrays which point to sub-parts of numpy arrays
self.immediate_mode.on_exit = lambda: self._clear_immediate_mode()
self.delay_promotion = util.ExecutionControl()
# use 'with delay_promotion: ' to prevent any family arrays being promoted
# into simulation arrays (which can cause confusion because the array returned
# from create_family_array might have properties you don't expect)
self.delay_promotion.on_exit = lambda: self._delayed_array_promotions(
)
self.__delayed_promotions = []
self.properties = simdict.SimDict({})
self._file_units_system = []
############################################
# THE BASICS: SIMPLE INFORMATION
############################################
@property
def filename(self):
return self._filename
def __len__(self):
return self._num_particles
def __repr__(self):
if self._filename != "":
return "<SimSnap \"" + self._filename + "\" len=" + str(len(self)) + ">"
else:
return "<SimSnap len=" + str(len(self)) + ">"
def families(self):
"""Return the particle families which have representitives in this SimSnap.
The families are ordered by their appearance in the snapshot."""
out = []
start = {}
for fam in family._registry:
sl = self._get_family_slice(fam)
if sl.start != sl.stop:
out.append(fam)
start[fam] = (sl.start)
out.sort(key=start.__getitem__)
return out
############################################
# THE BASICS: GETTING AND SETTING
############################################
def __getitem__(self, i):
"""Return either a specific array or a subview of this simulation. See
the class documentation (:class:`SimSnap`) for more information."""
if isinstance(i, str):
return self._get_array_with_lazy_actions(i)
elif isinstance(i, slice):
return SubSnap(self, i)
elif isinstance(i, family.Family):
return FamilySubSnap(self, i)
elif isinstance(i, np.ndarray) and np.issubdtype(np.bool_, i.dtype):
return self._get_subsnap_from_mask_array(i)
elif isinstance(i, (list, tuple, np.ndarray, filt.Filter)):
return IndexedSubSnap(self, i)
elif isinstance(i, int) or isinstance(i, np.int32) or isinstance(i, np.int64):
return IndexedSubSnap(self, (i,))
raise TypeError
def __setitem__(self, name, item):
"""Set the contents of an array in this snapshot"""
if self.is_derived_array(name) and not self.auto_propagate_off:
raise RuntimeError("Derived array is not writable")
if isinstance(name, tuple) or isinstance(name, list):
index = name[1]
name = name[0]
else:
index = None
self._assert_not_family_array(name)
if isinstance(item, array.SimArray):
ax = item
else:
ax = np.asanyarray(item).view(array.SimArray)
if name not in list(self.keys()):
# Array needs to be created. We do this through the
# private _create_array method, so that if we are operating
# within a particle-specific subview we automatically create
# a particle-specific array
try:
ndim = len(ax[0])
except TypeError:
ndim = 1
except IndexError:
ndim = ax.shape[-1] if len(ax.shape) > 1 else 1
# The dtype will be the same as an existing family array if
# one exists, or the dtype of the source array we are copying
dtype = self._get_preferred_dtype(name)
if dtype is None:
dtype = getattr(item, 'dtype', None)
self._create_array(name, ndim, dtype=dtype)
# Copy in contents if the contents isn't actually pointing to
# the same data (which will be the case following operations like
# += etc, since these call __setitem__).
self._set_array(name, ax, index)
def __delitem__(self, name):
if name in self._family_arrays:
# mustn't have simulation-level array of this name
assert name not in self._arrays
del self._family_arrays[name]
for v in self._family_derived_array_names.values():
if name in v:
del v[v.index(name)]
else:
del self._arrays[name]
if name in self._derived_array_names:
del self._derived_array_names[
self._derived_array_names.index(name)]
def _get_subsnap_from_mask_array(self,mask_array):
if len(mask_array.shape) > 1 or mask_array.shape[0] > len(self):
raise ValueError("Incorrect shape for masking array")
else:
return self[np.where(mask_array)]
def _get_array_with_lazy_actions(self, name):
if name in list(self.keys()):
self._dependency_tracker.touching(name)
# Ensure that any underlying dependencies on 1D positions and velocities
# are forwarded to 3D dependencies as well
nd_name = self._array_name_1D_to_ND(name)
if nd_name is not None:
self._dependency_tracker.touching(nd_name)
return self._get_array(name)
with self._dependency_tracker.calculating(name):
self.__resolve_obscuring_family_array(name)
if not self.lazy_off:
if not self.lazy_load_off:
self.__load_if_required(name)
if not self.lazy_derive_off:
self.__derive_if_required(name)
return self._get_array(name)
def __load_if_required(self, name):
if name not in list(self.keys()):
try:
self.__load_array_and_perform_postprocessing(name)
except OSError:
pass
def __derive_if_required(self, name):
if name not in list(self.keys()):
self._derive_array(name)
def __resolve_obscuring_family_array(self, name):
if name in self.family_keys():
self.__remove_family_array_if_derived(name)
if name in self.family_keys():
self.__load_remaining_families_if_loadable(name)
if name in self.family_keys():
in_fam, out_fam = self.__get_included_and_excluded_families_for_array(name)
raise KeyError("""{!r} is a family-level array for {}. To use it over the whole simulation you need either to delete it first, or create it separately for {}.""".format(
name, in_fam, out_fam))
def __get_included_and_excluded_families_for_array(self,name):
in_fam = []
out_fam = []
for x in self.families():
if name in self[x]:
in_fam.append(x)
else:
out_fam.append(x)
return in_fam, out_fam
def __remove_family_array_if_derived(self, name):
if self.is_derived_array(name):
del self.ancestor[name]
def __load_remaining_families_if_loadable(self, name):
in_fam, out_fam = self.__get_included_and_excluded_families_for_array(name)
try:
for fam in out_fam:
self.__load_array_and_perform_postprocessing(name, fam=fam)
except OSError:
pass
def _get_persist(self, hash, name):
try:
return self._persistent_objects[hash][name]
except:
return None
def _set_persist(self, hash, name, obj=None):
if hash not in self._persistent_objects:
self._persistent_objects[hash] = {}
self._persistent_objects[hash][name] = obj
def _clear_immediate_mode(self):
for k, v in self._persistent_objects.items():
if '_immediate_cache' in v:
del v['_immediate_cache']
def __getattr__(self, name):
"""This function overrides the behaviour of f.X where f is a SimSnap object.
It serves two purposes; first, it provides the family-handling behaviour
which makes f.dm equivalent to f[pynbody.family.dm]. Second, it implements
persistent objects -- properties which are shared between two equivalent SubSnaps."""
if name in SimSnap._persistent:
obj = self.ancestor._get_persist(self._inclusion_hash, name)
if obj:
return obj
try:
return self[family.get_family(name)]
except ValueError:
pass
raise AttributeError("{!r} object has no attribute {!r}".format(
type(self).__name__, name))
def __setattr__(self, name, val):
"""This function overrides the behaviour of setting f.X where f is a SimSnap object.
It serves two purposes; first it prevents overwriting of family names (so you can't
write to, for instance, f.dm). Second, it implements persistent objects -- properties
which are shared between two equivalent SubSnaps."""
if name in family.family_names():
raise AttributeError("Cannot assign family name " + name)
if name in SimSnap._persistent:
self.ancestor._set_persist(self._inclusion_hash, name, val)
else:
return object.__setattr__(self, name, val)
def __delattr__(self, name):
"""This function allows persistent objects (as shared between two equivalent SubSnaps)
to be permanently deleted."""
if name in SimSnap._persistent:
obj = self.ancestor._get_persist(self._inclusion_hash, name)
if obj:
self.ancestor._set_persist(self._inclusion_hash, name, None)
try:
object.__delattr__(self, name)
except AttributeError:
pass
return
object.__delattr__(self, name)
############################################
# DICTIONARY EMULATION FUNCTIONS
############################################
def keys(self):
"""Return the directly accessible array names (in memory)"""
return list(self._arrays.keys())
def has_key(self, name):
"""Returns True if the array name is accessible (in memory)"""
return name in list(self.keys())
def values(self):
"""Returns a list of the actual arrays in memory"""
x = []
for k in list(self.keys()):
x.append(self[k])
return x
def items(self):
"""Returns a list of tuples describing the array
names and their contents in memory"""
x = []
for k in list(self.keys()):
x.append((k, self[k]))
return x
def get(self, key, alternative=None):
"""Standard python get method, returns self[key] if
key in self else alternative"""
try:
return self[key]
except KeyError:
return alternative
def iterkeys(self):
yield from list(self.keys())
__iter__ = iterkeys
def itervalues(self):
for k in self:
yield self[k]
def iteritems(self):
for k in self:
yield (k, self[k])
############################################
# DICTIONARY-LIKE FUNCTIONS
# (not in the normal interface for dictionaries,
# but serving similar purposes)
############################################
def has_family_key(self, name):
"""Returns True if the array name is accessible (in memory) for at least one family"""
return name in self.family_keys()
def loadable_keys(self, fam=None):
"""Returns a list of arrays which can be lazy-loaded from
an auxiliary file."""
return []
def derivable_keys(self):
"""Returns a list of arrays which can be lazy-evaluated."""
res = []
for cl in type(self).__mro__:
if cl in self._derived_quantity_registry:
res += list(self._derived_quantity_registry[cl].keys())
return res
def all_keys(self):
"""Returns a list of all arrays that can be either lazy-evaluated
or lazy loaded from an auxiliary file."""
return self.derivable_keys() + self.loadable_keys()
def family_keys(self, fam=None):
"""Return list of arrays which are not accessible from this
view, but can be accessed from family-specific sub-views.
If *fam* is not None, only those keys applying to the specific
family will be returned (equivalent to self.fam.keys())."""
if fam is not None:
return [x for x in self._family_arrays if fam in self._family_arrays[x]]
else:
return list(self._family_arrays.keys())
############################################
# ANCESTRY FUNCTIONS
############################################
def is_ancestor(self, other):
"""Returns true if other is a subview of self"""
if other is self:
return True
elif hasattr(other, 'base'):
return self.is_ancestor(other.base)
else:
return False
def is_descendant(self, other):
"""Returns true if self is a subview of other"""
return other.is_ancestor(self)
@property
def ancestor(self):
"""The original SimSnap from which this view is derived (potentially self)"""
if hasattr(self, 'base'):
return self.base.ancestor
else:
return self
def get_index_list(self, relative_to, of_particles=None):
"""Get a list specifying the index of the particles in this view relative
to the ancestor *relative_to*, such that relative_to[get_index_list(relative_to)]==self."""
# Implementation for base snapshot
if self is not relative_to:
raise RuntimeError("Not a descendant of the specified simulation")
if of_particles is None:
of_particles = np.arange(len(self))
return of_particles
############################################
# SET-LIKE OPERATIONS FOR SUBSNAPS
############################################
def intersect(self, other, op=np.intersect1d):
"""Returns the set intersection of this simulation view with another view
of the same simulation"""
anc = self.ancestor
if not anc.is_ancestor(other):
raise RuntimeError("Parentage is not suitable")
a = self.get_index_list(anc)
b = other.get_index_list(anc)
return anc[op(a, b)]
def union(self, other):
"""Returns the set union of this simulation view with another view
of the same simulation"""
return self.intersect(other, op=np.union1d)
def setdiff(self, other):
"""Returns the set difference of this simulation view with another view
of the same simulation"""
return self.intersect(other, op=np.setdiff1d)
############################################
# UNIT MANIPULATION
############################################
def conversion_context(self):
"""Return a dictionary containing a (scalefactor) and h
(Hubble constant in canonical units) for this snapshot, ready for
passing into unit conversion functions."""
d = {}
wanted = ['a', 'h']
for x in wanted:
if x in self.properties:
d[x] = self.properties[x]
return d
def _override_units_system(self):
"""Look for and process a text file with a custom units system for this snapshot.
The text file should be named <filename>.units and contain unit specifications, one-per-line, e.g.
pos: kpc a
vel: km s^-1
mass: Msol
This override functionality needs to be explicitly called by a subclass after it has initialised
its best guess at the units.
"""
try:
f = open(self.filename+".units")
except OSError:
return
name_mapping = {'pos': 'distance', 'vel': 'velocity'}
units_dict = {}
for line in f:
if (not line.startswith("#")):
if ":" not in line:
raise OSError("Unknown format for units file %r"%(self.filename+".units"))
else:
t, u = list(map(str.strip,line.split(":")))
t = name_mapping.get(t,t)
units_dict[t] = u
self.set_units_system(**units_dict)
def set_units_system(self, velocity=None, distance=None, mass=None, temperature=None):
"""Set the unit system for the snapshot by specifying any or
all of `velocity`, `distance`, `mass` and `temperature`
units. The units can be given as strings or as pynbody `Unit`
objects.
If any of the units are not specified and a previous
`file_units_system` does not exist, the defaults are used.
"""
import configparser
from .. import config_parser
# if the units system doesn't exist (if this is a new snapshot), create
# one
if len(self._file_units_system) < 3:
warnings.warn("Previous unit system incomplete -- using defaults")
self._file_units_system = [
units.Unit(x) for x in ('G', '1 kpc', '1e10 Msol')]
else:
# we want to change the base units -- so convert to original
# units first and then set all arrays to new unit system
self.original_units()
# if any are missing, work them out from what we already have:
if velocity is None:
velocity = self.infer_original_units('km s^-1')
if distance is None:
distance = self.infer_original_units('kpc')
if mass is None:
mass = self.infer_original_units('Msol')
if temperature is None:
temperature = self.infer_original_units('K')
new_units = []
for x in [velocity, distance, mass, temperature]:
if x is not None:
new_units.append(units.Unit(x))
self._file_units_system = new_units
# set new units for all known arrays
for arr_name in list(self.keys()):
arr = self[arr_name]
# if the array has units, then use the current units, else
# check if a default dimension for this array exists in
# the configuration
if arr.units != units.NoUnit():
ref_unit = arr.units
else:
try:
ref_unit = config_parser.get(
'default-array-dimensions', arr_name)
except configparser.NoOptionError:
# give up -- no applicable dimension found
continue
arr.set_units_like(ref_unit)
def original_units(self):
"""Converts all arrays'units to be consistent with the units of
the original file."""
self.physical_units(distance=self.infer_original_units('km'),
velocity=self.infer_original_units('km s^-1'),
mass=self.infer_original_units('Msol'), persistent=False)
def infer_original_units(self, dimensions):
"""Given a unit (or string) `dimensions`, returns a unit with the same
physical dimensions which is in the unit schema of the current file."""
dimensions = units.Unit(dimensions)
d = dimensions.dimensional_project(
self._file_units_system + ["a", "h"])
new_unit = reduce(lambda x, y: x * y, [
a ** b for a, b in zip(self._file_units_system, d)])
return new_unit
def _default_units_for(self, array_name):
"""Attempt to construct and return the units for the named array
on disk, using what we know about the purpose of arrays (in config.ini)
and the original unit system (via infer_original_units)."""
array_name = self._array_name_1D_to_ND(array_name) or array_name
u = units._default_units.get(array_name, None)
if u is not None:
u = self.infer_original_units(u)
return u
def halos(self, *args, **kwargs):
"""Tries to instantiate a halo catalogue object for the given
snapshot, using the first available method (as defined in the
configuration files)."""
from .. import halo
for c in config['halo-class-priority']:
try:
if c._can_load(self, *args, **kwargs):
return c(self, *args, **kwargs)
except TypeError:
pass
for c in config['halo-class-priority']:
try:
if c._can_run(self, *args, **kwargs):
return c(self, *args, **kwargs)
except TypeError:
pass
raise RuntimeError("No halo catalogue found for %r" % str(self))
def bridge(self, other):
"""Tries to construct a bridge function between this SimSnap
and another one.
This function calls :func:`pynbody.bridge.bridge_factory`. For
more information see :ref:`bridge-tutorial`, or the reference
documentation for :py:mod:`pynbody.bridge`.
"""
from .. import bridge
return bridge.bridge_factory(self, other)
def load_copy(self):
"""Tries to load a copy of this snapshot, using partial loading to select
only a subset of particles corresponding to a given SubSnap"""
if getattr(self.ancestor,'partial_load',False):
raise NotImplementedError("Cannot load a copy of data that was itself partial-loaded")
return load(self.ancestor.filename, take=self.get_index_list(self.ancestor))
############################################
# HELPER FUNCTIONS FOR LAZY LOADING
############################################
def _load_array(self, array_name, fam=None):
"""This function is called by the framework to load an array
from disk and should be overloaded by child classes.
If *fam* is not None, the array should be loaded only for the
specified family.
"""
raise OSError("No lazy-loading implemented")
def __load_array_and_perform_postprocessing(self, array_name, fam=None):
"""Calls _load_array for the appropriate subclass, but also attempts to convert
units of anything that gets loaded and automatically loads the whole ND array
if this is a subview of an ND array"""
array_name = self._array_name_1D_to_ND(array_name) or array_name
# keep a record of every array in existence before load (in case it
# triggers loading more than we expected, e.g. coupled pos/vel fields
# etc)
anc = self.ancestor
pre_keys = set(anc.keys())
# the following function builds a dictionary mapping families to a set of the
# named arrays defined for them.
fk = lambda: {fami: {k for k in list(anc._family_arrays.keys()) if fami in anc._family_arrays[k]}
for fami in family._registry}
pre_fam_keys = fk()
with self.delay_promotion:
# delayed promotion is required here, otherwise units get messed up when
# a simulation array gets promoted mid-way through our loading process.
#
# see the gadget unit test, test_unit_persistence
if fam is not None:
self._load_array(array_name, fam)
else:
try:
self._load_array(array_name, fam)
except OSError:
for fam_x in self.families():
self._load_array(array_name, fam_x)
# Find out what was loaded
new_keys = set(anc.keys()) - pre_keys
new_fam_keys = fk()
for fami in new_fam_keys:
new_fam_keys[fami] = new_fam_keys[fami] - pre_fam_keys[fami]
# If the loader hasn't given units already, try to determine the defaults
# Then, attempt to convert what was loaded into friendly units
for v in new_keys:
if not units.has_units(anc[v]):
anc[v].units = anc._default_units_for(v)
anc._autoconvert_array_unit(anc[v])
for f, vals in new_fam_keys.items():
for v in vals:
if not units.has_units(anc[f][v]):
anc[f][v].units = anc._default_units_for(v)
anc._autoconvert_array_unit(anc[f][v])
############################################
# VECTOR TRANSFORMATIONS OF THE SNAPSHOT
############################################
def transform(self, matrix):
from .. import transformation
return transformation.transform(self, matrix)
def _transform(self, matrix):
"""Transforms the snapshot according to the 3x3 matrix given."""
for x in list(self.keys()):
ar = self[x]
if len(ar.shape) == 2 and ar.shape[1] == 3:
self[x] = np.dot(matrix, ar.transpose()).transpose()
def rotate_x(self, angle):
"""Rotates the snapshot about the current x-axis by 'angle' degrees."""
angle *= np.pi / 180
return self.transform(np.matrix([[1, 0, 0],
[0, np.cos(angle), -np.sin(angle)],
[0, np.sin(angle), np.cos(angle)]]))
def rotate_y(self, angle):
"""Rotates the snapshot about the current y-axis by 'angle' degrees."""
angle *= np.pi / 180
return self.transform(np.matrix([[np.cos(angle), 0, np.sin(angle)],
[0, 1, 0],
[-np.sin(angle), 0, np.cos(angle)]]))
def rotate_z(self, angle):
"""Rotates the snapshot about the current z-axis by 'angle' degrees."""
angle *= np.pi / 180
return self.transform(np.matrix([[np.cos(angle), -np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1]]))
def wrap(self, boxsize=None, convention='center'):
"""Wraps the positions of the particles in the box to lie between
[-boxsize/2, boxsize/2].
If no boxsize is specified, self.properties["boxsize"] is used."""
if boxsize is None:
boxsize = self.properties["boxsize"]
if isinstance(boxsize, units.UnitBase):
boxsize = float(boxsize.ratio(self[
"pos"].units, **self.conversion_context()))
if convention=='center':
for coord in "x", "y", "z":
self[coord][np.where(self[coord] < -boxsize / 2)] += boxsize
self[coord][np.where(self[coord] > boxsize / 2)] -= boxsize
elif convention=='upper':
for coord in "x", "y", "z":
self[coord][np.where(self[coord] < 0)] += boxsize
self[coord][np.where(self[coord] > boxsize)] -= boxsize
else:
raise ValueError("Unknown wrapping convention")
############################################
# WRITING FUNCTIONS
############################################
def write(self, fmt=None, filename=None, **kwargs):
if filename is None and "<" in self.filename:
raise RuntimeError(
'Cannot infer a filename; please provide one (use obj.write(filename="filename"))')
if fmt is None:
if not hasattr(self, "_write"):
raise RuntimeError(
'Cannot infer a file format; please provide one (e.g. use obj.write(filename="filename", fmt=pynbody.tipsy.TipsySnap)')
self._write(self, filename, **kwargs)
else:
fmt._write(self, filename, **kwargs)
def write_array(self, array_name, fam=None, overwrite=False, **kwargs):
"""
Write out the array with the specified name.
Some of the functionality is available via the
:func:`pynbody.array.SimArray.write` method, which calls the
present function with appropriate arguments.
**Input**
*array_name* - the name of the array to write
**Optional Keywords**
*fam* (None) - Write out only one family; or provide a list to
write out a set of families.
"""
# Determine whether this is a write or an update
if fam is None:
fam = self.families()
# It's an update if we're not fully replacing the file on
# disk, i.e. there exists a family f in self.families() but
# not in fam for which array_name is loadable
is_update = any([array_name in self[
f].loadable_keys() and f not in fam for f in self.families()])
if not hasattr(self, "_write_array"):
raise OSError(
"The underlying file format class does not support writing individual arrays back to disk")
if is_update and not hasattr(self, "_update_array"):
raise OSError(
"The underlying file format class does not support updating arrays on disk")
# It's an overwrite if we're writing over something loadable
is_overwriting = any([array_name in self[
f].loadable_keys() for f in fam])
if is_overwriting and not overwrite:
# User didn't specifically say overwriting is OK
raise OSError(
"This operation would overwrite existing data on disk. Call again setting overwrite=True if you want to enable this behaviour.")
if is_update:
self._update_array(array_name, fam=fam, **kwargs)
else:
self._write_array(self, array_name, fam=fam, **kwargs)
############################################
# LOW-LEVEL ARRAY MANIPULATION
############################################
def _get_preferred_dtype(self, array_name):
"""Return the 'preferred' numpy datatype for a named array.
This is mainly useful when creating family arrays for new families, to be
sure the datatype chosen matches"""
if hasattr(self, 'base'):
return self.base._get_preferred_dtype(array_name)
elif array_name in list(self.keys()):
return self[array_name].dtype
elif array_name in self.family_keys():
return self._family_arrays[array_name][list(self._family_arrays[array_name].keys())[0]].dtype
else:
return None
def _create_array(self, array_name, ndim=1, dtype=None, zeros=True, derived=False, shared=None):
"""Create a single snapshot-level array of dimension len(self) x ndim, with
a given numpy dtype.
*kwargs*:
- *ndim*: the number of dimensions for each particle
- *dtype*: a numpy datatype for the new array
- *zeros*: if True, zeros the array (which takes a bit of time); otherwise
the array is uninitialized
- *derived*: if True, this new array will be flagged as a derived array
which makes it read-only
- *shared*: if True, the array will be built on top of a shared-memory array
to make it possible to access from another process
"""
# Does this actually correspond to a slice into a 3D array?
NDname = self._array_name_1D_to_ND(array_name)
if NDname:
self._create_array(
NDname, ndim=3, dtype=dtype, zeros=zeros, derived=derived)
return
if ndim == 1:
dims = self._num_particles
else:
dims = (self._num_particles, ndim)
if shared is None:
shared = self._shared_arrays
new_array = array._array_factory(dims, dtype, zeros, shared)
new_array._sim = weakref.ref(self)
new_array._name = array_name
new_array.family = None
# new_array.set_default_units(quiet=True)
self._arrays[array_name] = new_array
if derived:
if array_name not in self._derived_array_names:
self._derived_array_names.append(array_name)
if ndim == 3:
array_name_1D = self._array_name_ND_to_1D(array_name)
for i, a in enumerate(array_name_1D):
self._arrays[a] = new_array[:, i]
self._arrays[a]._name = a
def _create_family_array(self, array_name, family, ndim=1, dtype=None, derived=False, shared=None):
"""Create a single array of dimension len(self.<family.name>) x ndim,
with a given numpy dtype, belonging to the specified family. For arguments
other than *family*, see the documentation for :func:`~pynbody.snapshot.SimSnap._create_array`.
Warning: Do not assume that the family array will be available after
calling this funciton, because it might be a 'completion' of existing
family arrays, at which point the routine will actually be creating
a simulation-level array, e.g.
sim._create_family_array('bla', dm)
sim._create_family_array('bla', star)
'bla' in sim.family_keys() # -> True
'bla' in sim.keys() # -> False
sim._create_family_array('bla', gas)
'bla' in sim.keys() # -> True
'bla' in sim.family_keys() # -> False
sim[gas]['bla'] *is* guaranteed to exist, however, it just might
be a view on a simulation-length array.
"""
NDname = self._array_name_1D_to_ND(array_name)
if NDname:
self._create_family_array(
NDname, family, ndim=3, dtype=dtype, derived=derived)
return
self_families = self.families()
if len(self_families) == 1 and family in self_families:
# If the file has only one family, just go ahead and create
# a normal array
self._create_array(
array_name, ndim=ndim, dtype=dtype, derived=derived)
return
if ndim == 1:
dims = self[family]._num_particles
else:
dims = (self[family]._num_particles, ndim)
# Determine what families already have an array of this name
fams = []
dtx = None
try:
fams = list(self._family_arrays[array_name].keys())
dtx = self._family_arrays[array_name][fams[0]].dtype
except KeyError:
pass
fams.append(family)
if dtype is not None and dtx is not None and dtype != dtx:
# We insist on the data types being the same for, e.g. sim.gas['my_prop'] and sim.star['my_prop']
# This makes promotion to simulation-level arrays possible.
raise ValueError("Requested data type {!r} is not consistent with existing data type {!r} for family array {!r}".format(
str(dtype), str(dtx), array_name))
if all([x in fams for x in self_families]):
# If, once we created this array, *all* families would have
# this array, just create a simulation-level array
if self._promote_family_array(array_name, ndim=ndim, derived=derived, shared=shared) is not None:
return None
# if we get here, either the array cannot be promoted to simulation level, or that would
# not be appropriate, so actually go ahead and create the family array
if shared is None:
shared = self._shared_arrays
new_ar = array._array_factory(dims, dtype, False, shared)
new_ar._sim = weakref.ref(self)
new_ar._name = array_name
new_ar.family = family
def sfa(n, v):
try:
self._family_arrays[n][family] = v
except KeyError:
self._family_arrays[n] = dict({family: v})
sfa(array_name, new_ar)
if derived:
if array_name not in self._family_derived_array_names[family]:
self._family_derived_array_names[family].append(array_name)
if ndim == 3:
array_name_1D = self._array_name_ND_to_1D(array_name)
for i, a in enumerate(array_name_1D):
sfa(a, new_ar[:, i])
self._family_arrays[a][family]._name = a
def _del_family_array(self, array_name, family):
"""Delete the array with the specified name for the specified family"""
del self._family_arrays[array_name][family]
if len(self._family_arrays[array_name]) == 0:
del self._family_arrays[array_name]
derive_track = self._family_derived_array_names[family]
if array_name in derive_track:
del derive_track[derive_track.index(array_name)]
def _get_from_immediate_cache(self, name, fn):
"""Retrieves the named numpy array from the immediate cache associated
with this snapshot. If the array does not exist in the immediate
cache, function fn is called with no arguments and must generate
it."""
with self._immediate_cache_lock:
if not hasattr(self, '_immediate_cache'):
self._immediate_cache = [{}]
cache = self._immediate_cache[0]
hx = hash(name)
if hx not in cache:
cache[hx] = fn()
return cache[hx]
def _get_array(self, name, index=None, always_writable=False):
"""Get the array of the specified *name*, optionally
for only the particles specified by *index*.
If *always_writable* is True, the returned array is
writable. Otherwise, it is still normally writable, but
not if the array is flagged as derived by the framework."""
x = self._arrays[name]
if x.derived and not always_writable:
x = x.view()
x.flags['WRITEABLE'] = False
if index is not None:
if type(index) is slice:
ret = x[index]
else:
ret = array.IndexedSimArray(x, index)
ret.family = None
return ret
else:
return x
def _get_family_array(self, name, fam, index=None, always_writable=False):
"""Get the family-level array with specified *name* for the family *fam*,
optionally for only the particles specified by *index* (relative to the
family slice).
If *always_writable* is True, the returned array is writable. Otherwise
it is still normally writable, but not if the array is flagged as derived
by the framework.
"""
try:
x = self._family_arrays[name][fam]
except KeyError:
raise KeyError("No array " + name + " for family " + fam.name)
if x.derived and not always_writable:
x = x.view()
x.flags['WRITEABLE'] = False
if index is not None:
if type(index) is slice:
x = x[index]
else:
if _subarray_immediate_mode or self.immediate_mode:
x = self._get_from_immediate_cache(name,
lambda: x[index])
else:
x = array.IndexedSimArray(x, index)
return x
def _set_array(self, name, value, index=None):
"""Update the contents of the snapshot-level array to that
specified by *value*. If *index* is not None, update only that
subarray specified."""
util.set_array_if_not_same(self._arrays[name], value, index)
def _set_family_array(self, name, family, value, index=None):
"""Update the contents of the family-level array to that
specified by *value*. If *index* is not None, update only that
subarray specified."""
util.set_array_if_not_same(self._family_arrays[name][family],
value, index)
def _create_arrays(self, array_list, ndim=1, dtype=None, zeros=True):
"""Create a set of arrays *array_list* of dimension len(self) x ndim, with
a given numpy dtype."""
for array in array_list:
self._create_array(array, ndim, dtype, zeros)
def _get_family_slice(self, fam):
"""Turn a specified Family object into a concrete slice which describes
which particles in this SimSnap belong to that family."""
try:
return self._family_slice[fam]
except KeyError:
return slice(0, 0)
def _family_index(self):
"""Return an array giving the family number of each particle in this snapshot,
something like 0,0,0,0,1,1,2,2,2, ... where 0 means self.families()[0] etc"""
if hasattr(self, "_family_index_cached"):
return self._family_index_cached
ind = np.empty((len(self),), dtype='int8')
for i, f in enumerate(self.ancestor.families()):
ind[self._get_family_slice(f)] = i
self._family_index_cached = ind
return ind
def _assert_not_family_array(self, name):
"""Raises a ValueError if the specified array name is connected to
a family-specific array"""
if name in self.family_keys():
raise KeyError("Array " + name + " is a family-level property")
def _delayed_array_promotions(self):
"""Called automatically to catch up with pending array promotions"""
for x in self.__delayed_promotions:
self._promote_family_array(*x)
self.__delayed_promotions = []
def _promote_family_array(self, name, ndim=1, dtype=None, derived=False, shared=None):
"""Create a simulation-level array (if it does not exist) with
the specified name. Copy in any data from family-level arrays
of the same name."""
if ndim == 1 and self._array_name_1D_to_ND(name):
return self._promote_family_array(self._array_name_1D_to_ND(name), 3, dtype)
if self.delay_promotion:
# if array isn't already scheduled for promotion, do so now
if not any([x[0] == name for x in self.__delayed_promotions]):
self.__delayed_promotions.append(
[name, ndim, dtype, derived, shared])
return None
if dtype is None:
try:
x = list(self._family_arrays[name].keys())[0]
dtype = self._family_arrays[name][x].dtype
for x in list(self._family_arrays[name].values()):
if x.dtype != dtype:
warnings.warn("Data types of family arrays do not match; assuming " + str(
dtype), RuntimeWarning)
except IndexError:
pass
dmap = [name in self._family_derived_array_names[
i] for i in self._family_arrays[name]]
some_derived = any(dmap)
all_derived = all(dmap)
if derived:
some_derived = True
if not derived:
all_derived = False
if name not in self._arrays:
self._create_array(
name, ndim=ndim, dtype=dtype, derived=all_derived, shared=shared)
try:
for fam in self._family_arrays[name]:
if has_units(self._family_arrays[name][fam]) and not has_units(self._arrays[name]):
self._arrays[name].units = self._family_arrays[
name][fam].units
# inherits the units from the first dimensional family array found.
# Note that future copies, once the units are set, invoke the correct conversion
# and raise a UnitsException if such a conversion is
# impossible.
try:
self._arrays[name][self._get_family_slice(
fam)] = self._family_arrays[name][fam]
except units.UnitsException:
# There is a problem getting everything into the same units. The trouble is
# that having got here if we let the exception propagate, we're going to
# end up with the SimSnap in an inconsistent state. So force the copy
# ignoring the units and raise a warning
warnings.warn(
"When conjoining family arrays to create a snapshot level array, the units could not be unified. You will now have a snapshot-level array %r with inconsistent unit information" % name)
self._arrays[name].base[self._get_family_slice(
fam)] = self._family_arrays[name][fam].base
del self._family_arrays[name]
if ndim == 3:
for v in self._array_name_ND_to_1D(name):
del self._family_arrays[v]
gc.collect()
except KeyError:
pass
if some_derived:
if all_derived:
self._derived_array_names.append(name)
else:
warnings.warn(
"Conjoining derived and non-derived arrays. Assuming result is non-derived, so no further updates will be made.", RuntimeWarning)
for v in self._family_derived_array_names.values():
if name in v:
del v[v.index(name)]
return self._arrays[name]
############################################
# DERIVED ARRAY SYSTEM
############################################
@classmethod
def derived_quantity(cl, fn):
if cl not in SimSnap._derived_quantity_registry:
SimSnap._derived_quantity_registry[cl] = {}
SimSnap._derived_quantity_registry[cl][fn.__name__] = fn
fn.__stable__ = False
return fn
@classmethod
def stable_derived_quantity(cl, fn):
if cl not in SimSnap._derived_quantity_registry:
SimSnap._derived_quantity_registry[cl] = {}
SimSnap._derived_quantity_registry[cl][fn.__name__] = fn
fn.__stable__ = True
return fn
def _find_deriving_function(self, name):
for cl in type(self).__mro__:
if cl in self._derived_quantity_registry \
and name in self._derived_quantity_registry[cl]:
return self._derived_quantity_registry[cl][name]
else:
return None
def _derive_array(self, name, fam=None):
"""Calculate and store, for this SnapShot, the derivable array 'name'.
If *fam* is not None, derive only for the specified family.
This searches the registry of @X.derived_quantity functions
for all X in the inheritance path of the current class.
"""
global config
calculated = False
fn = self._find_deriving_function(name)
if fn:
logger.info("Deriving array %s" % name)
with self.auto_propagate_off:
if fam is None:
result = fn(self)
ndim = result.shape[-1] if len(
result.shape) > 1 else 1
self._create_array(
name, ndim, dtype=result.dtype, derived=not fn.__stable__)
write_array = self._get_array(
name, always_writable=True)
else:
result = fn(self[fam])
ndim = result.shape[-1] if len(
result.shape) > 1 else 1
# check if a family array already exists with a different dtype
# if so, cast the result to the existing dtype
# numpy version < 1.7 does not support doing this in-place
if self._get_preferred_dtype(name) != result.dtype \
and self._get_preferred_dtype(name) is not None:
if int(np.version.version.split('.')[1]) > 6 :
result = result.astype(self._get_preferred_dtype(name),copy=False)
else :
result = result.astype(self._get_preferred_dtype(name))
self[fam]._create_array(
name, ndim, dtype=result.dtype, derived=not fn.__stable__)
write_array = self[fam]._get_array(
name, always_writable=True)
self.ancestor._autoconvert_array_unit(result)
write_array[:] = result
if units.has_units(result):
write_array.units = result.units
def _dirty(self, name):
"""Declare a given array as changed, so deleting any derived
quantities which depend on it"""
name = self._array_name_1D_to_ND(name) or name
if name=='pos':
for v in self.ancestor._persistent_objects.values():
if 'kdtree' in v:
del v['kdtree']
if not self.auto_propagate_off:
for d_ar in self._dependency_tracker.get_dependents(name):
if d_ar in self or self.has_family_key(d_ar):
if self.is_derived_array(d_ar):
del self[d_ar]
self._dirty(d_ar)
def is_derived_array(self, name, fam=None):
"""Returns True if the array or family array of given name is
auto-derived (and therefore read-only)."""
fam = fam or self._unifamily
if fam:
return (name in self._family_derived_array_names[fam]) or name in self._derived_array_names
elif name in list(self.keys()):
return name in self._derived_array_names
elif name in self.family_keys():
return all([name in self._family_derived_array_names[i] for i in self._family_arrays[name]])
else:
return False
def unlink_array(self, name):
"""If the named array is auto-derived, this destroys the link so that
the array becomes editable but no longer auto-updates."""
if self.is_derived_array(name):
if name in self.family_keys():
for fam in self._family_arrays[name]:
track = self._family_derived_array_names[fam]
if name in track:
del track[track.index(name)]
else:
del self._derived_array_names[
self._derived_array_names.index(name)]
else:
raise RuntimeError("Not a derived array")
############################################
# CONVENIENCE FUNCTIONS
############################################
def mean_by_mass(self, name):
"""Calculate the mean by mass of the specified array."""
m = np.asanyarray(self["mass"])
ret = array.SimArray(
(self[name].transpose() * m).transpose().mean(axis=0) / m.mean(), self[name].units)
return ret
############################################
# SNAPSHOT DECORATION
############################################
@classmethod
def decorator(cl, fn):
if cl not in SimSnap._decorator_registry:
SimSnap._decorator_registry[cl] = []
SimSnap._decorator_registry[cl].append(fn)
return fn
def _decorate(self):
for cl in type(self).__mro__:
if cl in self._decorator_registry:
for fn in self._decorator_registry[cl]:
fn(self)
############################################
# HASHING AND EQUALITY TESTING
############################################
@property
def _inclusion_hash(self):
try:
rval = self.__inclusion_hash
except AttributeError:
try:
index_list = self.get_index_list(self.ancestor)
hash = hashlib.md5(index_list.data)
self.__inclusion_hash = hash.digest()
except:
logging.warn(
"Encountered a problem while calculating your inclusion hash. %s" % traceback.format_exc())
rval = self.__inclusion_hash
return rval
def __hash__(self):
return hash((object.__hash__(self.ancestor), self._inclusion_hash))
def __eq__(self, other):
"""Equality test for Snapshots. Returns true if both sides of the
== operator point to the same data."""
if self is other:
return True
return hash(self) == hash(other)
############################################
# COPYING
############################################
def __deepcopy__(self, memo=None):
create_args = {}
for fam in family._registry:
sl = self._get_family_slice(fam)
if sl.start != sl.stop:
create_args[fam.name] = len(self[fam])
new_snap = new(**create_args)
# ordering fix
for k in copy.copy(list(new_snap._family_slice.keys())):
new_snap._family_slice[k] = copy.copy(self._get_family_slice(k))
for k in list(self.keys()):
new_snap[k] = self[k]
for k in self.family_keys():
for fam in family._registry:
if len(self[fam]) > 0:
self_fam = self[fam]
if k in list(self_fam.keys()) and not self_fam.is_derived_array(k):
new_snap[fam][k] = self_fam[k]
new_snap.properties = copy.deepcopy(self.properties, memo)
new_snap._file_units_system = copy.deepcopy(self._file_units_system, memo)
return new_snap
_subarray_immediate_mode = False
# Set this to True to always get copies of data when indexing is
# necessary. This is mainly a bug testing/efficiency checking mode --
# shouldn't be necessary
class SubSnap(SimSnap):
"""Represent a sub-view of a SimSnap, initialized by specifying a
slice. Arrays accessed through __getitem__ are automatically
sub-viewed using the given slice."""
def __init__(self, base, _slice):
self.base = base
self._file_units_system = base._file_units_system
self._unifamily = base._unifamily
self._inherit()
if isinstance(_slice, slice):
# Various slice logic later (in particular taking
# subsnaps-of-subsnaps) requires having positive
# (i.e. start-relative) slices, so if we have been passed a
# negative (end-relative) index, fix that now.
if _slice.start is None:
_slice = slice(0, _slice.stop, _slice.step)
if _slice.start < 0:
_slice = slice(len(
base) + _slice.start, _slice.stop, _slice.step)
if _slice.stop is None or _slice.stop > len(base):
_slice = slice(_slice.start, len(base), _slice.step)
if _slice.stop < 0:
_slice = slice(_slice.start, len(
base) + _slice.stop, _slice.step)
self._slice = _slice
descriptor = "[" + str(_slice.start) + ":" + str(_slice.stop)
if _slice.step is not None:
descriptor += ":" + str(_slice.step)
descriptor += "]"
else:
raise TypeError("Unknown SubSnap slice type")
self._num_particles = util.indexing_length(_slice)
self._descriptor = descriptor
def _inherit(self):
for x in self._inherited:
setattr(self, x, getattr(self.base, x))
def _get_array(self, name, index=None, always_writable=False):
if _subarray_immediate_mode or self.immediate_mode:
return self._get_from_immediate_cache(name,
lambda: self.base._get_array(
name, None, always_writable)[self._slice])
else:
ret = self.base._get_array(name, util.concatenate_indexing(
self._slice, index), always_writable)
ret.family = self._unifamily
return ret
def _set_array(self, name, value, index=None):
self.base._set_array(
name, value, util.concatenate_indexing(self._slice, index))
def _get_family_array(self, name, fam, index=None, always_writable=False):
base_family_slice = self.base._get_family_slice(fam)
sl = util.relative_slice(base_family_slice,
util.intersect_slices(self._slice, base_family_slice, len(self.base)))
sl = util.concatenate_indexing(sl, index)
if _subarray_immediate_mode or self.immediate_mode:
return self._get_from_immediate_cache((name, fam),
lambda: self.base._get_family_array(
name, fam, None, always_writable)[sl])
else:
return self.base._get_family_array(name, fam, sl, always_writable)
def _set_family_array(self, name, family, value, index=None):
fslice = self._get_family_slice(family)
self.base._set_family_array(
name, family, value, util.concatenate_indexing(fslice, index))
def _promote_family_array(self, *args, **kwargs):
self.base._promote_family_array(*args, **kwargs)
def __delitem__(self, name):
# is this the right behaviour?
raise RuntimeError("Arrays can only be deleted from the base snapshot")
def _del_family_array(self, name, family):
# is this the right behaviour?
raise RuntimeError("Arrays can only be deleted from the base snapshot")
@property
def _filename(self):
return self.base._filename + ":" + self._descriptor
def keys(self):
return list(self.base.keys())
def loadable_keys(self, fam=None):
if self._unifamily:
return self.base.loadable_keys(self._unifamily)
else:
return self.base.loadable_keys(fam)
def derivable_keys(self):
return self.base.derivable_keys()
def infer_original_units(self, *args):
"""Return the units on disk for a quantity with the specified dimensions"""
return self.base.infer_original_units(*args)
def _get_family_slice(self, fam):
sl = util.relative_slice(self._slice,
util.intersect_slices(self._slice, self.base._get_family_slice(fam), len(self.base)))
return sl
def _load_array(self, array_name, fam=None, **kwargs):
self.base._load_array(array_name, fam)
def write_array(self, array_name, fam=None, **kwargs):
fam = fam or self._unifamily
if not fam or self._get_family_slice(fam) != slice(0, len(self)):
raise OSError(
"Array writing is available for entire simulation arrays or family-level arrays, but not for arbitrary subarrays")
self.base.write_array(array_name, fam=fam, **kwargs)
def _derive_array(self, array_name, fam=None):
self.base._derive_array(array_name, fam)
def family_keys(self, fam=None):
return self.base.family_keys(fam)
def _create_array(self, *args, **kwargs):
self.base._create_array(*args, **kwargs)
def _create_family_array(self, *args, **kwargs):
self.base._create_family_array(*args, **kwargs)
def physical_units(self, *args, **kwargs):
self.base.physical_units(*args, **kwargs)
def is_derived_array(self, v, fam=None):
return self.base.is_derived_array(v)
def unlink_array(self, name):
self.base.unlink_array(name)
def get_index_list(self, relative_to, of_particles=None):
if of_particles is None:
of_particles = np.arange(len(self))
if relative_to is self:
return of_particles
return self.base.get_index_list(relative_to, util.concatenate_indexing(self._slice, of_particles))
class IndexedSubSnap(SubSnap):
"""Represents a subset of the simulation particles according
to an index array.
Parameters
----------
base : SimSnap object
The base snapshot
index_array : integer array or None
The indices of the elements that define the sub snapshot. Set to None to use iord-based instead.
iord_array : integer array or None
The iord of the elements that define the sub snapshot. Set to None to use index-based instead.
This may be computationally expensive. See note below.
Notes
-----
`index_array` and `iord_array` arguments are mutually exclusive.
In the case of `iord_array`, an sorting operation is required that may take
a significant time and require O(N) memory.
"""
def __init__(self, base, index_array=None, iord_array=None):
self._descriptor = "indexed"
self.base = base
self._inherit()
self._unifamily = base._unifamily
self._file_units_system = base._file_units_system
if index_array is None and iord_array is None:
raise ValueError(
"Cannot define a subsnap without an index_array or iord_array.")
if index_array is not None and iord_array is not None:
raise ValueError(
"Cannot define a subsnap without both and index_array and iord_array.")
if iord_array is not None:
index_array = self._iord_to_index(iord_array)
if isinstance(index_array, filt.Filter):
self._descriptor = index_array._descriptor
index_array = index_array.where(base)[0]
elif isinstance(index_array, tuple):
if isinstance(index_array[0], np.ndarray):
index_array = index_array[0]
else:
index_array = np.array(index_array)
else:
index_array = np.asarray(index_array)
findex = base._family_index()[index_array]
# Check the family index array is monotonically increasing
# If not, the family slices cannot be implemented
if not all(np.diff(findex) >= 0):
raise ValueError(
"Families must retain the same ordering in the SubSnap")
self._slice = index_array
self._family_slice = {}
self._family_indices = {}
self._num_particles = len(index_array)
# Find the locations of the family slices
for i, fam in enumerate(self.ancestor.families()):
ids = np.where(findex == i)[0]
if len(ids) > 0:
new_slice = slice(ids.min(), ids.max() + 1)
self._family_slice[fam] = new_slice
self._family_indices[fam] = np.asarray(index_array[
new_slice]) - base._get_family_slice(fam).start
def _iord_to_index(self, iord):
# Maps iord to indices. Note that this requires to perform an argsort (O(N log N) operations)
# and a binary search (O(M log N) operations) with M = len(iord) and N = len(self.base).
if not util.is_sorted(iord) == 1:
raise Exception('Expected iord to be sorted in increasing order.')
# Find index of particles using a search sort
iord_base = self.base['iord']
iord_base_argsort = self.base['iord_argsort']
index_array = util.binary_search(iord, iord_base, sorter=iord_base_argsort)
# Check that the iord match
if np.any(index_array == len(iord_base)):
raise Exception('Some of the requested ids cannot be found in the dataset.')
return index_array
def _get_family_slice(self, fam):
# A bit messy: jump out the SubSnap inheritance chain
# and call SimSnap method directly...
return SimSnap._get_family_slice(self, fam)
def _get_family_array(self, name, fam, index=None, always_writable=False):
sl = self._family_indices.get(fam,slice(0,0))
sl = util.concatenate_indexing(sl, index)
return self.base._get_family_array(name, fam, sl, always_writable)
def _set_family_array(self, name, family, value, index=None):
self.base._set_family_array(name, family, value,
util.concatenate_indexing(self._family_indices[family], index))
def _create_array(self, *args, **kwargs):
self.base._create_array(*args, **kwargs)
class FamilySubSnap(SubSnap):
"""Represents a one-family portion of a parent snap object"""
def __init__(self, base, fam):
self.base = base
self._inherit()
self._slice = base._get_family_slice(fam)
self._unifamily = fam
self._descriptor = ":" + fam.name
# Use the slice attributes to find sub array length
self._num_particles = self._slice.stop - self._slice.start
self._file_units_system = base._file_units_system
def __delitem__(self, name):
if name in list(self.base.keys()):
raise ValueError(
"Cannot delete global simulation property from sub-view")
elif name in self.base.family_keys(self._unifamily):
self.base._del_family_array(name, self._unifamily)
def keys(self):
global_keys = list(self.base.keys())
family_keys = self.base.family_keys(self._unifamily)
return list(set(global_keys).union(family_keys))
def family_keys(self, fam=None):
# We now define there to be no family-specific subproperties,
# because all properties can be accessed through standard
# __setitem__, __getitem__ methods
return []
def _get_family_slice(self, fam):
if fam is self._unifamily:
return slice(0, len(self))
else:
return slice(0, 0)
def _get_array(self, name, index=None, always_writable=False):
try:
return SubSnap._get_array(self, name, index, always_writable)
except KeyError:
return self.base._get_family_array(name, self._unifamily, index, always_writable)
def _create_array(self, array_name, ndim=1, dtype=None, zeros=True, derived=False, shared=None):
# Array creation now maps into family-array creation in the parent
self.base._create_family_array(
array_name, self._unifamily, ndim, dtype, derived, shared)
def _set_array(self, name, value, index=None):
if name in list(self.base.keys()):
self.base._set_array(
name, value, util.concatenate_indexing(self._slice, index))
else:
self.base._set_family_array(name, self._unifamily, value, index)
def _create_family_array(self, array_name, family, ndim, dtype, derived, shared):
self.base._create_family_array(
array_name, family, ndim, dtype, derived, shared)
def _promote_family_array(self, *args, **kwargs):
pass
def _load_array(self, array_name, fam=None, **kwargs):
if fam is self._unifamily or fam is None:
self.base._load_array(array_name, self._unifamily)
def _derive_array(self, array_name, fam=None):
if fam is self._unifamily or fam is None:
self.base._derive_array(array_name, self._unifamily)
def load(filename, *args, **kwargs):
"""Loads a file using the appropriate class, returning a SimSnap
instance."""
for c in config['snap-class-priority']:
if c._can_load(filename):
logger.info("Loading using backend %s" % str(c))
return c(filename, *args, **kwargs)
raise OSError(
"File %r: format not understood or does not exist" % filename)
def new(n_particles=0, order=None, **families):
"""Create a blank SimSnap, with the specified number of particles.
Position, velocity and mass arrays are created and filled
with zeros.
By default all particles are taken to be dark matter.
To specify otherwise, pass in keyword arguments specifying
the number of particles for each family, e.g.
f = new(dm=50, star=25, gas=25)
The order in which the different families appear in the snapshot
is unspecified unless you add an 'order' argument:
f = new(dm=50, star=25, gas=25, order='star,gas,dm')
guarantees the stars, then gas, then dark matter particles appear
in sequence.
"""
if len(families) == 0:
families = {'dm': n_particles}
t_fam = []
tot_particles = 0
if order is None:
for k, v in list(families.items()):
assert isinstance(v, int)
t_fam.append((family.get_family(k), v))
tot_particles += v
else:
for k in order.split(","):
v = families[k]
assert isinstance(v, int)
t_fam.append((family.get_family(k), v))
tot_particles += v
x = SimSnap()
x._num_particles = tot_particles
x._filename = "<created>"
x._create_arrays(["pos", "vel"], 3)
x._create_arrays(["mass"], 1)
rt = 0
for k, v in t_fam:
x._family_slice[k] = slice(rt, rt + v)
rt += v
x._decorate()
return x
def _get_snap_classes():
from . import ascii, gadget, gadgethdf, grafic, nchilada, ramses, tipsy
_snap_classes = [gadgethdf.GadgetHDFSnap, gadgethdf.SubFindHDFSnap, gadgethdf.EagleLikeHDFSnap,
nchilada.NchiladaSnap, gadget.GadgetSnap,
tipsy.TipsySnap, ramses.RamsesSnap, grafic.GrafICSnap,
ascii.AsciiSnap]
return _snap_classes
|
999,812 | b3e2bb0ecdefa0b9e80ba426cf8ee8c0a04ae479 | from IntrinsicAnalysis.clustering.AC_model import ACModel
def analyse_paragraphs(paragraphs):
answer_pairs=[]
ac = ACModel(None, None)
results = ac.analyse_paragraphs(paragraphs)
indicis = results['suspicious_parts']
for index in range(len(paragraphs)):
answer_pairs.append((paragraphs[index], index in indicis))
return answer_pairs
|
999,813 | e653d1d6c38530e88698f957ab472df206a67e18 | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import news
from .models import workers
from .models import CartItem
from .models import Cars
admin.site.register(news)
admin.site.register(workers)
admin.site.register(CartItem)
admin.site.register(Cars) |
999,814 | 8b4452a4c917d5b0080016b26a31152453af0353 | #Uppgift 7
"""
Vad är siffersumman av 2¹⁰⁰⁰?
"""
#Svar: 1366
siffersumma = 0
num = 2**1000
for number in str(num):
siffersumma += int(number)
print(siffersumma)
|
999,815 | 4083774fd41faf063d96540e986b5df8f437591e | import os
import shutil
BASE_URL = "https://ru.wikipedia.org"
NESTED_LINK_REGEXP = "^/wiki/"
INDEX_PATH = "output/index.json"
TEXT_DOCUMENTS_PATH = "output/text_documents"
LEMMATIZED_TEXTS_PATH = "output/lemmatized_texts"
INVERTED_INDEX_PATH = "output/inverted_index.json"
TF_IDF_PATH = "output/td-idf-calculation.json"
def prepare_output_directory(path):
""" Очищает папку output от файлов предыдущего запуска """
try:
shutil.rmtree(path)
except OSError:
print("Directory %s is deleted" % path)
try:
os.mkdir(path)
except OSError:
print("Creation of the directory %s failed" % path)
else:
print("Successfully created the directory '%s' " % path)
def save_text_in_file(text_file_path, text):
text_file = open(text_file_path, "w")
text_file.write(text)
text_file.close() |
999,816 | 481fcbdb7cd7905992475cb7e9f4371dce5be643 | from django.contrib import admin
from .models import MusicInstrument
# Register your models here.
class MusicInstrumentAdmin(admin.ModelAdmin):
model = MusicInstrument
fieldsets= [
(None,{'fields':['MusicInstrument']})
]
admin.site.register(MusicInstrument,MusicInstrumentAdmin)
|
999,817 | 2314a48e6ce9acac943071b0b365380c6c164856 | from django.contrib import admin
from django.urls import path
from blog.views import TopView, WorkView, BlogView, WorkDetailView, BlogDetailView, AboutView, CategoryListView, CategoryDetailView
urlpatterns = [
path('', TopView.as_view(), name='top'),
path('bloglist/', BlogView.as_view(), name='bloglist'),
path('worklist/', WorkView.as_view(), name='worklist'),
path('work/<int:pk>/', WorkDetailView.as_view(), name='workdetail'),
path('blog/<int:pk>/', BlogDetailView.as_view(), name='blogdetail'),
path('about/', AboutView.as_view(), name='about'),
path('categories/', CategoryListView.as_view(), name='categorylist'),
path('category/<str:slug>/', CategoryDetailView.as_view(), name='categorydetail'),
]
|
999,818 | f1987ee193dfb3de661869a51d2ebeac0af4c926 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import re
import time
import logging
# semi-standard modules
try:
import pexpect
except ImportError:
sys.stderr.write("Module pexpect is not available. It can be downloaded from http://pexpect.sourceforge.net/\n")
raise
class telnet(object):
def __init__(self, ip, port, timeout=60):
self.host = ip
self.port = port
self.opts = ["Escape character is",
"Connection refused",
"Username:",
"Password:",
"Selection:",
"loader>",
"root@sup1:~#",
pexpect.EOF,
pexpect.TIMEOUT]
self.timeout = timeout
def login(self, port=""):
self.tn = pexpect.spawn('telnet %s %s' % (self.host, port))
while True:
index = self.tn.expect(self.opts, self.timeout)
if index == 0:
self.tn.sendline("")
elif index == 1:
self.clear_line():
self.login(port)
elif index == 2:
self.tn.sendline(self.port_username)
elif index == 3:
self.tn.sendline(self.port_password)
elif index == 4:
self.tn.sendline("x")
elif index == 5:
return True
else:
return False
def login2port(self):
if self.login(self.host, self.port):
return True
else:
return False
def clear_line(self):
if self.login(self.host):
self.send('enable', 'Password', 6)
self.send(self.server_password, '[0-9a-zA-z\-]*[>#]', 6)
line = 'clear line %s' % (self.port - 2000)
self.send(line, '\[confirm\]', 6)
self.send('', '[0-9a-zA-z\-]*[>#]', 6)
self.send(line, '\[confirm\]', 6)
self.send('', '[0-9a-zA-z\-]*[>#]', 6)
self.tn.close()
return True
else:
print "Failed to clear line."
return False
def send(self, command, pattern, timeout):
self.tn.sendline(command)
index = self.tn.expect([pexpect.TIMEOUT, pattern], timeout)
if index == 0:
print "Failed send command: %s"%command
return True
return False
def login_out(self):
self.tn.close()
return
def interact(self):
self.tn.interact()
return
class ssh(object):
def __init__(self, ip, username, password, timeout=60):
self.host = ip
self.user = username
self.pw = password
self.opts = ['[0-9a-zA-z\-]*[>#]',
pexpect.EOF,
pexpect.TIMEOUT]
self.timeout = timeout
def login(self):
self.tn = pexpect.spawn('ssh %s@%s' % (self.user, self.host))
while True:
index = self.tn.expect(self.opts, self.timeout)
if index == 0:
return True
else:
return False
def send(self, command, pattern, timeout=60):
self.tn.sendline(command)
index = self.tn.expect([pexpect.TIMEOUT, pattern], timeout)
if index == 0:
print "Failed send command: %s"%command
return True
return False
def login_out(self):
self.tn.close()
return
def interact(self):
self.tn.interact()
return
class setup(object):
def __init__(self, platform):
self.power_ip = 10.74.126.5
self.power_port = 2008
self.console_ip = 10.74.126.5
self.console_port = 2008
self.ssh_ip = 10.124.11.108
self.ssh_user = 'diag'
self.ssh_pw = ''
def power_cycle(self):
self.power_cycle_terminal = telnet(self.power_ip, self.power_port)
self.power_cycle_terminal.clear_line()
self.power_cycle_terminal.login2port()
def console(self):
self.tty = telnet(self.console_ip, self.console_port)
self.tty.login2port()
def ssh(self):
self.ssh = ssh(self.ssh_ip, self.ssh_user, self.ssh_pw)
self.ssh.login()
def send_by_ssh(self, command):
self.ssh.send(command, '[0-9a-zA-z\-]*[>#]')
def send_by_console(self, command):
self.ssh.send(command, '[0-9a-zA-z\-]*[>#]')
def interact(self):
self.ssh.interact()
def saveLog () :
log_file = time.strftime("/auto/crdc_dcbu_diag_users/falu/log/%a_%d_%b_%Y_%H:%M:%S.txt", time.localtime())
fout = file(log_file,'w')
return fout
if __name__=="__main__":
p = setup('Yushan' )
p.ssh()
p.send_by_ssh()
p.interact()
|
999,819 | b218b53e0a559cf414fd71809ef7756ab7b61636 | import os
import torch
from speech_command_classifier.trainer import Trainer
from speech_command_classifier.data import (SpeechCommandDataset,
collate_fn,
ALL_LABELS)
from speech_command_classifier.model import Model
TRAIN_META = '/home/workspace/metadata/metadata_train.csv'
VAL_META = '/home/workspace/metadata/metadata_val.csv'
train_dataset = SpeechCommandDataset(TRAIN_META,
subset='train',
config={})
val_dataset = SpeechCommandDataset(VAL_META,
subset='validation',
config={})
train_dataloader = torch.utils.data.DataLoader(
train_dataset,
batch_size=4,
shuffle=True,
collate_fn=collate_fn)
val_dataloader = torch.utils.data.DataLoader(
val_dataset,
batch_size=4,
shuffle=True,
collate_fn=collate_fn)
model = Model(n_input=1, n_output=len(ALL_LABELS))
os.makedirs('/temp/checkpoints', exist_ok=True)
os.makedirs('/temp/models', exist_ok=True)
def test_trainer_build():
trainer = Trainer({},
'/temp',
train_dataloader,
val_dataloader,
model,
'cpu')
assert(trainer)
CHECKPOINT = '/home/workspace/pretrained/checkpoints/best-accuracy-89432.pth'
def test_trainer_load():
trainer = Trainer({},
'/temp',
train_dataloader,
val_dataloader,
model,
'cpu')
trainer.load_checkpoints(CHECKPOINT)
assert(trainer.steps == 89432)
assert(trainer.epochs == 55)
os.removedirs('/temp/checkpoints')
os.removedirs('/temp/models')
|
999,820 | 93147e70a60d3ff156698cca1013a074d22c323d | from sproc import sproc
import numpy as np
import time,sys
t0 = time.time()
verbose=False
ctr=0
if 'verbose' in sys.argv:
verbose=True
# This loop test 1D and 2D array conversions.
while 1:
# Test 1D float array
np_array = np.random.random(size=(1000000)).astype(np.float32)
# geneate C++ vector
cpp_vec = sproc.pyutil.as_float32_vector(np_array)
# generte np array back from C++ vector
ap_array = sproc.pyutil.as_ndarray(cpp_vec)
# if verbose, print out the size and mean
if verbose:
print(len(np_array),len(cpp_vec),len(ap_array))
print(np.mean(np_array),np.mean(cpp_vec),np.mean(ap_array))
# assert: since this is only conversion, neither size nor mean value should change
assert len(np_array) == len(cpp_vec) and len(np_array) == len(ap_array)
assert np.mean(np_array) == np.mean(ap_array)
# Test 1D bool array
np_brray = (np_array < 0.5)
# generate C++ vector
cpp_bec = sproc.pyutil.as_bool_vector(np_brray)
# generate np array back from C++ vector
ap_brray = sproc.pyutil.as_ndarray(cpp_bec)
if verbose:
print(np_brray.sum(),ap_brray.sum())
# assert: make sure same elements are true/false
assert (np_brray == ap_brray).sum() == len(np_brray)
# Test 2D float array
np_array_2d = np.random.random(size=(1000,1000)).astype(np.float32)
# geneate C++ vector
cpp_vec_2d = sproc.pyutil.as_float32_vector_2d(np_array_2d)
# generte np array back from C++ vector
ap_array_2d = sproc.pyutil.as_ndarray(cpp_vec_2d)
# if verbose, print out the size and mean
if verbose:
print(len(np_array_2d),len(cpp_vec),len(ap_array_2d))
print(np.mean(np_array_2d),np.mean(cpp_vec_2d),np.mean(ap_array_2d))
# assert: since this is only conversion, neither size nor mean value should change
assert len(np_array_2d) == len(cpp_vec_2d) and len(np_array_2d) == len(ap_array_2d)
assert np_array_2d.mean() == ap_array_2d.mean()
# Test 2D bool array
np_brray_2d = (np_array_2d < 0.5)
# generate C++ vector
cpp_bec_2d = sproc.pyutil.as_bool_vector_2d(np_brray_2d)
# generate np array back from C++ vector
ap_brray_2d = sproc.pyutil.as_ndarray(cpp_bec_2d)
if verbose:
print(np_brray_2d.sum(),ap_brray_2d.sum())
# assert: make sure same elements are true/false
assert (np_brray_2d == ap_brray_2d).sum() == np.prod(np_brray_2d.shape)
t1 = time.time()
ctr+=1
if t1 - t0 > 20: break
print('Tested',ctr,'times during',t1-t0,'seconds!')
|
999,821 | 3e4e79c2689f92b4a7f1eccb1cf2cdebf62b0d98 | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 13 17:04:32 2019
@author: corra
"""
import pandas as pd
import pyreadstat
import pymongo
from pymongo import MongoClient
client = MongoClient('localhost', 27017)
#--------- Import db from sav ---------------------------------------
df_WV2, meta_WV2 = pyreadstat.read_sav("WV2.sav", apply_value_formats=True)
df_WV3, meta_WV3 = pyreadstat.read_sav("WV3.sav", apply_value_formats=True)
df_WV4, meta_WV4 = pyreadstat.read_sav("WV4.sav", apply_value_formats=True)
df_WV5, meta_WV5 = pyreadstat.read_sav("WV5.sav", apply_value_formats=True)
df_WV6, meta_WV6 = pyreadstat.read_sav("WV6.sav", apply_value_formats=True)
#--------- call local host and create new data-----------------------
db = client.new_data
spss = db.spss
#--------- labels -----------------------------------------------------
df_WV2.columns = meta_WV2.column_labels
df_WV3.columns = meta_WV3.column_labels
df_WV4.columns = meta_WV4.column_labels
df_WV5.columns = meta_WV5.column_labels
df_WV6.columns = meta_WV6.column_labels
#--------------orient to records---------------------------------------
df_WV2_records_2 = df_WV2.to_dict(orient = 'records')
spss = db.spss.insert_many(df_WV2_records_2)
df_WV3_records_3 = df_WV3.to_dict(orient = 'records')
spss = db.spss.insert_many(df_WV3_records_3)
df_WV4_records_4 = df_WV4.to_dict(orient = 'records')
spss = db.spss.insert_many(df_WV4_records_4)
df_WV5_records_5 = df_WV5.to_dict(orient = 'records')
spss = db.spss.insert_many(df_WV5_records_5)
df_WV6_records_6 = df_WV6.to_dict(orient = 'records')
spss = db.spss.insert_many(df_WV6_records_6)
#---------- aggregate ------------------------------------------------
spss = db.spss.aggregate([
{
'$group': {
'_id': '$Country/region'
}
}, {
'$unwind': {
'path': '$Country/region'
}
}, {
'$lookup': {
'from': 'spss',
'localField': 'Country/field',
'foreignField': 'Country',
'as': 'CountryFeatures'
}
}
])
|
999,822 | 8f1a164cceb19ead679fd70d92dae4b639025f72 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 19 13:31:21 2018
@author: rgugg
"""
from numpy import ndarray
import numpy as np
import artacs.tools as tools
import logging
logger = logging.Logger(__name__)
# %%
class StepwiseRemover():
def __init__(self, fs=1000, freq=None, period_steps=2,
epsilon=0.01, max_iterations=10, verbose=True):
self.verbose = verbose
self.true_fs = fs
self.freq = freq
if freq is not None:
self.true_period = fs/freq
self.resample_flag = (self.true_period != int(fs/freq))
else:
self.true_period = None
self.epsilon = epsilon
self.max_iterations = max_iterations
self.period_steps = period_steps
def calc_seeds(self, period):
'derive seedpoints for starting the cutting into periods'
seeds = np.unique(np.linspace(0, period, self.period_steps+1,
dtype='int32'))
seeds = seeds[seeds!=period]
return seeds
def inbound_resample(self, indata):
'resample so that (artifact_period* artifact_frequency) is an integer'
if self.resample_flag:
period = int(np.ceil(self.true_period))
fs = int(np.ceil(period * self.freq))
data = tools.resample_by_fs(indata,
up=fs,
down=self.true_fs,
axis=0)
self.sample_count = indata.shape[0]
else:
data = indata
fs = self.true_fs
period = int(self.true_period)
return data, period, fs
def outbound_resample(self, outdata, fs):
'reverse an earlier resampling, if it was necessary'
if self.resample_flag:
outdata = tools.resample_by_count(outdata,
self.sample_count,
axis=0)
return outdata
def prepare_data(self, indata):
'resample and derive seedpoints'
valid_data = indata[np.invert(np.isnan(indata))]
data, period, fs = self.inbound_resample(valid_data)
seeds = self.calc_seeds(period)
return data, period, fs, seeds
def __call__(self, indata:ndarray) -> ndarray:
return self.process(indata)
def process(self, indata:ndarray):
'process all channels of a dataset'
if self.true_period is None:
print('Invalid period length, skipping artifact removal')
return indata
if len(indata.shape) == 1:
num_channels, num_samples = 1, indata.shape[0]
indata = np.atleast_2d(indata)
elif len(indata.shape) == 2:
num_channels, num_samples = indata.shape
else:
raise ValueError('Unspecified dimensionality of the dataset')
outdata = np.empty((indata.shape))
outdata.fill(np.nan)
if self.verbose:
print('[',end='')
for chan_idx, chan_data in enumerate(indata):
outdata[chan_idx,:] = self.process_channel(chan_data)
if self.verbose:
print('.',end='')
if self.verbose:
print(']',end='\n')
return np.squeeze(outdata)
def process_channel(self, indata:ndarray) -> ndarray:
'process a single channels of data'
if self.true_period is None:
print('Invalid period length, skipping artifact removal')
return indata
data, period, fs, seeds = self.prepare_data(indata)
outdata = np.empty((data.shape[0], seeds.shape[0]+1))
outdata.fill(np.nan)
for seed_idx, seed in enumerate(seeds):
idx, fdata = self._process(data, period, fs, seed)
outdata[idx, seed_idx] = fdata
missing_part = idx[-1]+period != outdata.shape[0]
if missing_part: #perform filtering in time-reversed data
idx, fdata = self._process(data[::-1], period, fs, seed=0)
outdata[outdata.shape[0]-idx[-1]-1:, -1] = fdata[::-1]
outdata = np.nanmean(outdata, axis=1)
outdata = self.outbound_resample(outdata, fs)
return outdata
def _process(self, data:ndarray, period:int, fs:int, seed:int):
converged = False
iteration = 0
fdata = data.copy()
while not converged:
period_data, put_where = tools.signal2periods(fdata,
period, offset=seed)
component, score, l = tools.pca_largest(period_data)
template = tools.comp2trace(component, score, kind='cubic')
amplitude = tools.estimate_artifact_peakiness(template,
fs, self.freq)
iteration += 1
if amplitude < self.epsilon:
converged = True
else:
fdata[put_where] -= template
return put_where, fdata[put_where]
|
999,823 | cac173dbf9d43b7c89577e8bf43395d51ca96e6d | /home/vikassharma/anaconda3/lib/python3.6/sre_constants.py |
999,824 | 81a33e4651aa9f0b37208621e9d05520a1c3c042 | import beeline
import os
from django.apps import AppConfig
class HelloConfig(AppConfig):
name = 'hello'
def ready(self):
beeline.init(
# Get this via https://ui.honeycomb.io/account after signing up for Honeycomb
writekey=os.environ.get("HONEYCOMB_API_KEY"),
api_host=os.environ.get('HONEYCOMB_API_ENDPOINT', 'https://api.honeycomb.io:443'),
# The name of your app is a good choice to start with
# dataset='my-django-app', # only needed for classic
service_name=os.environ.get('SERVICE_NAME', 'my-django-app'),
debug=True, # enable to see telemetry in console
) |
999,825 | df37dab0f38d4294224fbef310e0d7d423983a29 | import gpt_2_simple as gpt2
from torch.nn.functional import softmax
from transformers import BertForNextSentencePrediction,BertTokenizer
from qnautils import *
from fetch_google import *
import nltk
nltk.download('vader_lexicon')
from nltk.sentiment.vader import SentimentIntensityAnalyzer
sid = SentimentIntensityAnalyzer()
import re
def generate_candidates(input,sess):
print("GPT2 generating for :",input)
generated_text = gpt2.generate(sess,
length=100,
run_name='run1_topical_token',
return_as_list=True,
temperature=0.7,
prefix=input,
nsamples=15,
truncate = '.',
batch_size=5,
top_k = 5,
include_prefix = False)
def clean(input_st, sub):
return input_st.replace(sub, '').lstrip()
cleaned = []
for text in generated_text:
cleaned.append(re.sub(r"^\W+", "",clean(text,'<|endoftext|>')))
return cleaned
def top_result(seq_A,seq_B,model,tokenizer):
# print(seq_B)
response = seq_B[0]
max_prob = -1
for seq in seq_B:
encoded = tokenizer.encode_plus(seq_A, text_pair=seq, return_tensors='pt')
seq_relationship_logits = model(**encoded)[0]
probs = softmax(seq_relationship_logits, dim=1)
if probs[0][0] > max_prob:
max_prob = probs[0][0]
response = seq
if max_prob >= 0.97:
return response
else:
return -1
def master_GPT2(inp,model,tokenizer,sess):
generation_cleaned = generate_candidates(inp,sess)
# print(generate)
# generation_cleaned = []
# # print(generate)
# for gen in generate[5:14:2]:
# generation_cleaned.append(re.sub(r"^\W+", "",clean(clean(gen,inp),'<|endoftext|>')))
# print(generation_cleaned)
candidates = []
for gen in generation_cleaned:
if len(ner(gen)) == 0:
# print(gen)
candidates.append(gen)
print(candidates)
res = top_result(inp,candidates,model,tokenizer)
polarity = sid.polarity_scores(res)['compound']
# print(res)
# print(polarity)
return res,polarity
def regeneration(inp,words,model,tokenizer,sess):
flg = 1
resp = ""
pol = ""
while(flg == 1):
flg = 0
resp,pol = master_GPT2(inp,model,tokenizer,sess)
txt,keywords = keys(resp)
print(resp,pol)
if pol > 0.0:
polarity = 'positive'
elif pol < 0.0:
polarity = 'negative'
else:
polarity = 'neutral'
for x in keywords:
if x in words.keys() and polarity != words[x]:
flg = 1
return resp
|
999,826 | 04e1064f7fbec8894e253bb3aa885da3f4900402 | from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
from sklearn.preprocessing import MinMaxScaler
import tensorflow as tf
import matplotlib.pylab as plt
from tensorflow.contrib import rnn
from sklearn.model_selection import train_test_split
from pca import PCA
import json
import matplotlib.mlab as mlab
import collections
#将用ppca补全的数据进行预测。
#偏差数据进行预测。
#加上实际的主成分。
def split_dataset(dataset,time_step):
days,ndim = dataset.shape
dataX=[]
dataY=[]
for i in range(0,days-time_step):
dataX.append(dataset[i:i+time_step])
dataY.append(dataset[i+time_step:i+time_step+1])
return np.array(dataX),np.array(dataY)
def use_pca(data):
pca_obj = PCA(data,3)
return pca_obj.main_x,pca_obj.rest_x
def get_metrics(y,pred_y):
y_mean=np.mean(y)
y[y==0.00]=y_mean
mre = np.mean(np.abs(y - pred_y) / np.abs(y))
mae = np.mean(np.abs(y - pred_y))
rmse = np.sqrt(np.mean(np.square(y-pred_y)))
return mre,mae,rmse
def flatten(x):
result = []
for el in x:
result.extend(el)
return result
def print_res_index(realY,predY,func):
mre,mae,rmse = func(np.array(realY),np.array(predY))
print('mre:',mre)
print('mae:',mae)
print('rmse:',rmse)
# f= open("../data/inputationdata/ppca_imputation0050000.txt",'rb')
with open(r"../data/imputationdata/ppca_imputation005.txt", encoding="utf-8") as f:
d=json.load(f)
speed_data=np.array(d)
m = speed_data.reshape(53,-1) # 53*288
data = m
pca_obj = PCA(data,3)
data_main,data_rest=pca_obj.main_x,pca_obj.rest_x
def drew_hist(lengths):
data = lengths
mu = np.mean(data)
sigma = np.std(data)
n,bins,patches = plt.hist(data,60,normed=1,histtype='bar',facecolor='darkblue',alpha=0.75)
plt.title('The distribution of the residuals')
plt.xlabel('Probability density')
y = mlab.normpdf(bins,mu,sigma)
plt.plot(bins,y,'r')
plt.show()
one_dim_data = flatten(m)
print(one_dim_data)
plt.plot(one_dim_data[:2000])
plt.ylim((-10,100))
plt.show()
plt.close()
# for i in range(31):
# drew_hist(data_rest[i])
|
999,827 | 6a17417533bb06a4780f99bd370bcb0c65c72df5 | #!/usr/bin/python
# Cut tree at given %id using an alignment
import os, sys, time, glob
from matchmaker.shmm_shmm_lib import *
def kerf_already_completed(seed_id):
"""
Must be run from kerf results directory.
"""
# if .summary file does not exist, return False
kerf_summary_filename = "kerf.summary"
if not os.path.exists(kerf_summary_filename): return False
#print "%s exists" % kerf_summary_filename
# read last line of summary file and get subfam #
# if last line just has dashes, no subfams were created
kerf_summary_file = open(kerf_summary_filename, "r")
line = kerf_summary_file.readline()
prev_line = line
while (line):
prev_line = line
line = kerf_summary_file.readline()
#print "line = %s" % line
#print "prev_line = %s" % prev_line
tokens = prev_line.split()
#print "tokens: ", tokens
if len(tokens) == 0:
#print "No tokens"
return False
if (tokens[0].startswith("----")):
summary_count = 0
else:
try:
summary_count = int(tokens[0]) + 1
except:
return False
# count number of subfamily files
tree_count = len(glob.glob("%s.kerf*.tre" % seed_id))
msa_count = len(glob.glob("%s.kerf*.fa" % seed_id))
#print "summary, tree, msa counts" , summary_count, tree_count, msa_count
# if counts match, return true
if (summary_count == tree_count) and (tree_count == msa_count):
return True
else:
return False
def main():
if len(sys.argv) < 3:
print "Usage: %s <percent_id> <seed_id>" % sys.argv[0]
sys.exit(0)
percent_id = int(sys.argv[1])
seed_id = sys.argv[2]
root = "%s/%s" % (cur_dir(), seed_id)
out_dir = os.path.join(root, "kerf%d" % percent_id)
make_dir_exist(out_dir)
os.chdir(out_dir)
if kerf_already_completed(seed_id):
print "kerf%d previously completed for %s" % (percent_id, seed_id)
else:
cmd = '%s "-1" %d ../%s.nj ../%s.a2m' \
% (kerf_cmd(), percent_id, seed_id, seed_id)
print "Running:", cmd
os.system(cmd)
cmd = 'mmv -d "subfam*.*" "%s.kerf%d.sf#1.#2"' % (seed_id, percent_id)
print "Running:", cmd
os.system(cmd)
if __name__ == "__main__":
main()
|
999,828 | 348ef8df0e27fe0aed5f558d7650f3c75a14b71d | from libcity.config import ConfigParser
from libcity.data import get_dataset
from libcity.utils import get_executor, get_model, get_logger
from libcity.data.utils import generate_dataloader
# from geopy import distance
from math import radians, cos, sin, asin, sqrt
import numpy as np
import pickle
from collections import defaultdict
from tqdm import tqdm
# import json
f = open('./raw_data/foursquare_cut_one_day.pkl', 'rb')
data = pickle.load(f)
# 要把它的数据放到 Batch 里面
"""
data_neural: {
uid: {
sessions: {
session_id: [
[loc, tim]
]
}
}
}
vid_lookup 来进行距离的计算,所以还是在这里完成 encode 操作吧
"""
data_neural = data['data_neural']
user_set = data['data_neural'].keys()
vid_lookup = data['vid_lookup']
tim_max = 47
pad_item = {
'current_loc': 9296,
'current_tim': tim_max+1
}
def geodistance(lat1, lng1, lat2, lng2):
lng1, lat1, lng2, lat2 = map(radians, [float(lng1), float(lat1), float(lng2), float(lat2)])
dlon = lng2-lng1
dlat = lat2-lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
distance = 2*asin(sqrt(a))*6371*1000
distance = round(distance/1000, 3)
return distance
def _create_dilated_rnn_input(current_loc):
current_loc.reverse()
sequence_length = len(current_loc)
session_dilated_rnn_input_index = [0] * sequence_length
for i in range(sequence_length - 1):
current_poi = current_loc[i]
poi_before = current_loc[i + 1:]
lon_cur, lat_cur = vid_lookup[current_poi][1], vid_lookup[current_poi][0]
distance_row_explicit = []
for target in poi_before:
lon, lat = vid_lookup[target][1], vid_lookup[target][0]
distance_row_explicit.append(geodistance(lat_cur, lon_cur, lat, lon))
index_closet = np.argmin(distance_row_explicit).item()
# reverse back
session_dilated_rnn_input_index[sequence_length - i - 1] = sequence_length - 2 - index_closet - i
current_loc.reverse()
return session_dilated_rnn_input_index
def _gen_distance_matrix(current_loc, history_loc_central):
# 使用 profile 计算当前位置与历史轨迹中心点之间的距离
history_avg_distance = [] # history_session_count
now_loc = current_loc[-1]
lon_cur, lat_cur = vid_lookup[now_loc][1], vid_lookup[now_loc][0]
for central in history_loc_central:
dis = geodistance(central[0], central[1], lat_cur, lon_cur)
if dis < 1:
dis = 1
history_avg_distance.append(dis)
return history_avg_distance
encoded_data = {}
feature_dict = {'history_loc': 'array of int', 'history_tim': 'array of int',
'current_loc': 'int', 'current_tim': 'int', 'dilated_rnn_input_index': 'no_pad_int',
'history_avg_distance': 'no_pad_float',
'target': 'int', 'uid': 'int'}
time_checkin_set = defaultdict(set)
for uid in tqdm(user_set, desc="encoding data"):
history_loc = []
history_tim = []
history_loc_central = []
encoded_trajectories = []
sessions = data_neural[uid]['sessions']
for session_id in sessions.keys():
current_session = sessions[session_id]
current_loc = []
current_tim = []
for p in current_session:
current_loc.append(p[0])
current_tim.append(p[1])
if p[1] > tim_max:
print('tim overleaf error')
break
if p[1] not in time_checkin_set:
time_checkin_set[p[1]] = set()
time_checkin_set[p[1]].add(p[0])
if session_id == 0:
history_loc.append(current_loc)
history_tim.append(current_tim)
lon = []
lat = []
for poi in current_loc:
lon_cur = vid_lookup[poi][1]
lat_cur = vid_lookup[poi][0]
lon.append(lon_cur)
lat.append(lat_cur)
history_loc_central.append((np.mean(lat), np.mean(lon)))
continue
trace = []
target = current_loc[-1]
dilated_rnn_input_index = _create_dilated_rnn_input(current_loc[:-1])
history_avg_distance = _gen_distance_matrix(current_loc[:-1], history_loc_central)
trace.append(history_loc.copy())
trace.append(history_tim.copy())
trace.append(current_loc[:-1])
trace.append(current_tim[:-1])
trace.append(dilated_rnn_input_index)
trace.append(history_avg_distance)
trace.append(target)
trace.append(uid)
encoded_trajectories.append(trace)
history_loc.append(current_loc)
history_tim.append(current_tim)
# calculate current_loc
lon = []
lat = []
for poi in current_loc:
lon_cur, lat_cur = vid_lookup[poi][1], vid_lookup[poi][0]
lon.append(lon_cur)
lat.append(lat_cur)
history_loc_central.append((np.mean(lat), np.mean(lon)))
encoded_data[str(uid)] = encoded_trajectories
sim_matrix = np.zeros((tim_max+1, tim_max+1))
for i in range(tim_max+1):
sim_matrix[i][i] = 1
for j in range(i+1, tim_max+1):
set_i = time_checkin_set[i]
set_j = time_checkin_set[j]
if len(set_i | set_j) != 0:
jaccard_ij = len(set_i & set_j) / len(set_i | set_j)
sim_matrix[i][j] = jaccard_ij
sim_matrix[j][i] = jaccard_ij
# with open('./lstpm_test_data.json', 'w') as f:
# json.dump({
# 'encoded_data': encoded_data,
# 'sim_matrix': sim_matrix
# }, f)
config = ConfigParser('traj_loc_pred', 'LSTPM', 'foursquare_tky', other_args={"history_type": 'cut_off', "gpu_id": 0,
"metrics": ["Recall", "NDCG"], "topk": 5})
logger = get_logger(config)
dataset = get_dataset(config)
dataset.data = {
'encoded_data': encoded_data
}
dataset.pad_item = pad_item
train_data, eval_data, test_data = dataset.divide_data()
train_data, eval_data, test_data = generate_dataloader(train_data, eval_data, test_data,
feature_dict, config['batch_size'],
config['num_workers'], pad_item,
{})
data_feature = {
'loc_size': 9297,
'tim_size': tim_max + 2,
'uid_size': 934,
'loc_pad': 9296,
'tim_pad': tim_max + 1,
'tim_sim_matrix': sim_matrix.tolist()
}
model = get_model(config, data_feature)
# batch = train_data.__iter__().__next__()
# batch.to_tensor(config['device'])
executor = get_executor(config, model)
executor.train(train_data, eval_data)
executor.evaluate(test_data)
|
999,829 | 9842bd54bc0bbcb3c5b1a2c4a8f805d35556ab5e | #!/usr/bin/env python3
x=float(input('Enter the value of x:'))
n=term=num=1
result=1.0
while n<=100:
term*=x/n #这里term后的*是什么意思我还不清楚
result+=term
n+=1
if term<0.0001:
break
print('No of Times={} and Sum={}'.format(n,result))
|
999,830 | b5b469c162f653faeca55d5d5c1d03526362e914 | from typing import Any, TypeVar
from pathlib import Path
import numpy as np
import numpy.typing as npt
_SCT = TypeVar("_SCT", bound=np.generic, covariant=True)
class SubClass(np.ndarray[Any, np.dtype[_SCT]]): ...
i8: np.int64
A: npt.NDArray[np.float64]
B: SubClass[np.float64]
C: list[int]
def func(i: int, j: int, **kwargs: Any) -> SubClass[np.float64]: ...
reveal_type(np.empty_like(A)) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.empty_like(B)) # E: SubClass[{float64}]
reveal_type(np.empty_like([1, 1.0])) # E: ndarray[Any, dtype[Any]]
reveal_type(np.empty_like(A, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
reveal_type(np.empty_like(A, dtype='c16')) # E: ndarray[Any, dtype[Any]]
reveal_type(np.array(A)) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.array(B)) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.array(B, subok=True)) # E: SubClass[{float64}]
reveal_type(np.array([1, 1.0])) # E: ndarray[Any, dtype[Any]]
reveal_type(np.array(A, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
reveal_type(np.array(A, dtype='c16')) # E: ndarray[Any, dtype[Any]]
reveal_type(np.array(A, like=A)) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.zeros([1, 5, 6])) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.zeros([1, 5, 6], dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
reveal_type(np.zeros([1, 5, 6], dtype='c16')) # E: ndarray[Any, dtype[Any]]
reveal_type(np.empty([1, 5, 6])) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.empty([1, 5, 6], dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
reveal_type(np.empty([1, 5, 6], dtype='c16')) # E: ndarray[Any, dtype[Any]]
reveal_type(np.concatenate(A)) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.concatenate([A, A])) # E: Any
reveal_type(np.concatenate([[1], A])) # E: ndarray[Any, dtype[Any]]
reveal_type(np.concatenate([[1], [1]])) # E: ndarray[Any, dtype[Any]]
reveal_type(np.concatenate((A, A))) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.concatenate(([1], [1]))) # E: ndarray[Any, dtype[Any]]
reveal_type(np.concatenate([1, 1.0])) # E: ndarray[Any, dtype[Any]]
reveal_type(np.concatenate(A, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
reveal_type(np.concatenate(A, dtype='c16')) # E: ndarray[Any, dtype[Any]]
reveal_type(np.concatenate([1, 1.0], out=A)) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.asarray(A)) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.asarray(B)) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.asarray([1, 1.0])) # E: ndarray[Any, dtype[Any]]
reveal_type(np.asarray(A, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
reveal_type(np.asarray(A, dtype='c16')) # E: ndarray[Any, dtype[Any]]
reveal_type(np.asanyarray(A)) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.asanyarray(B)) # E: SubClass[{float64}]
reveal_type(np.asanyarray([1, 1.0])) # E: ndarray[Any, dtype[Any]]
reveal_type(np.asanyarray(A, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
reveal_type(np.asanyarray(A, dtype='c16')) # E: ndarray[Any, dtype[Any]]
reveal_type(np.ascontiguousarray(A)) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.ascontiguousarray(B)) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.ascontiguousarray([1, 1.0])) # E: ndarray[Any, dtype[Any]]
reveal_type(np.ascontiguousarray(A, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
reveal_type(np.ascontiguousarray(A, dtype='c16')) # E: ndarray[Any, dtype[Any]]
reveal_type(np.asfortranarray(A)) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.asfortranarray(B)) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.asfortranarray([1, 1.0])) # E: ndarray[Any, dtype[Any]]
reveal_type(np.asfortranarray(A, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
reveal_type(np.asfortranarray(A, dtype='c16')) # E: ndarray[Any, dtype[Any]]
reveal_type(np.fromstring("1 1 1", sep=" ")) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.fromstring(b"1 1 1", sep=" ")) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.fromstring("1 1 1", dtype=np.int64, sep=" ")) # E: ndarray[Any, dtype[{int64}]]
reveal_type(np.fromstring(b"1 1 1", dtype=np.int64, sep=" ")) # E: ndarray[Any, dtype[{int64}]]
reveal_type(np.fromstring("1 1 1", dtype="c16", sep=" ")) # E: ndarray[Any, dtype[Any]]
reveal_type(np.fromstring(b"1 1 1", dtype="c16", sep=" ")) # E: ndarray[Any, dtype[Any]]
reveal_type(np.fromfile("test.txt", sep=" ")) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.fromfile("test.txt", dtype=np.int64, sep=" ")) # E: ndarray[Any, dtype[{int64}]]
reveal_type(np.fromfile("test.txt", dtype="c16", sep=" ")) # E: ndarray[Any, dtype[Any]]
with open("test.txt") as f:
reveal_type(np.fromfile(f, sep=" ")) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.fromfile(b"test.txt", sep=" ")) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.fromfile(Path("test.txt"), sep=" ")) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.fromiter("12345", np.float64)) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.fromiter("12345", float)) # E: ndarray[Any, dtype[Any]]
reveal_type(np.frombuffer(A)) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.frombuffer(A, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
reveal_type(np.frombuffer(A, dtype="c16")) # E: ndarray[Any, dtype[Any]]
reveal_type(np.arange(False, True)) # E: ndarray[Any, dtype[signedinteger[Any]]]
reveal_type(np.arange(10)) # E: ndarray[Any, dtype[signedinteger[Any]]]
reveal_type(np.arange(0, 10, step=2)) # E: ndarray[Any, dtype[signedinteger[Any]]]
reveal_type(np.arange(10.0)) # E: ndarray[Any, dtype[floating[Any]]]
reveal_type(np.arange(start=0, stop=10.0)) # E: ndarray[Any, dtype[floating[Any]]]
reveal_type(np.arange(np.timedelta64(0))) # E: ndarray[Any, dtype[timedelta64]]
reveal_type(np.arange(0, np.timedelta64(10))) # E: ndarray[Any, dtype[timedelta64]]
reveal_type(np.arange(np.datetime64("0"), np.datetime64("10"))) # E: ndarray[Any, dtype[datetime64]]
reveal_type(np.arange(10, dtype=np.float64)) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.arange(0, 10, step=2, dtype=np.int16)) # E: ndarray[Any, dtype[{int16}]]
reveal_type(np.arange(10, dtype=int)) # E: ndarray[Any, dtype[Any]]
reveal_type(np.arange(0, 10, dtype="f8")) # E: ndarray[Any, dtype[Any]]
reveal_type(np.require(A)) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.require(B)) # E: SubClass[{float64}]
reveal_type(np.require(B, requirements=None)) # E: SubClass[{float64}]
reveal_type(np.require(B, dtype=int)) # E: ndarray[Any, Any]
reveal_type(np.require(B, requirements="E")) # E: ndarray[Any, Any]
reveal_type(np.require(B, requirements=["ENSUREARRAY"])) # E: ndarray[Any, Any]
reveal_type(np.require(B, requirements={"F", "E"})) # E: ndarray[Any, Any]
reveal_type(np.require(B, requirements=["C", "OWNDATA"])) # E: SubClass[{float64}]
reveal_type(np.require(B, requirements="W")) # E: SubClass[{float64}]
reveal_type(np.require(B, requirements="A")) # E: SubClass[{float64}]
reveal_type(np.require(C)) # E: ndarray[Any, Any]
reveal_type(np.linspace(0, 10)) # E: ndarray[Any, dtype[floating[Any]]]
reveal_type(np.linspace(0, 10j)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
reveal_type(np.linspace(0, 10, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
reveal_type(np.linspace(0, 10, dtype=int)) # E: ndarray[Any, dtype[Any]]
reveal_type(np.linspace(0, 10, retstep=True)) # E: Tuple[ndarray[Any, dtype[floating[Any]]], floating[Any]]
reveal_type(np.linspace(0j, 10, retstep=True)) # E: Tuple[ndarray[Any, dtype[complexfloating[Any, Any]]], complexfloating[Any, Any]]
reveal_type(np.linspace(0, 10, retstep=True, dtype=np.int64)) # E: Tuple[ndarray[Any, dtype[{int64}]], {int64}]
reveal_type(np.linspace(0j, 10, retstep=True, dtype=int)) # E: Tuple[ndarray[Any, dtype[Any]], Any]
reveal_type(np.logspace(0, 10)) # E: ndarray[Any, dtype[floating[Any]]]
reveal_type(np.logspace(0, 10j)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
reveal_type(np.logspace(0, 10, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
reveal_type(np.logspace(0, 10, dtype=int)) # E: ndarray[Any, dtype[Any]]
reveal_type(np.geomspace(0, 10)) # E: ndarray[Any, dtype[floating[Any]]]
reveal_type(np.geomspace(0, 10j)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
reveal_type(np.geomspace(0, 10, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
reveal_type(np.geomspace(0, 10, dtype=int)) # E: ndarray[Any, dtype[Any]]
reveal_type(np.zeros_like(A)) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.zeros_like(C)) # E: ndarray[Any, dtype[Any]]
reveal_type(np.zeros_like(A, dtype=float)) # E: ndarray[Any, dtype[Any]]
reveal_type(np.zeros_like(B)) # E: SubClass[{float64}]
reveal_type(np.zeros_like(B, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
reveal_type(np.ones_like(A)) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.ones_like(C)) # E: ndarray[Any, dtype[Any]]
reveal_type(np.ones_like(A, dtype=float)) # E: ndarray[Any, dtype[Any]]
reveal_type(np.ones_like(B)) # E: SubClass[{float64}]
reveal_type(np.ones_like(B, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
reveal_type(np.full_like(A, i8)) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.full_like(C, i8)) # E: ndarray[Any, dtype[Any]]
reveal_type(np.full_like(A, i8, dtype=int)) # E: ndarray[Any, dtype[Any]]
reveal_type(np.full_like(B, i8)) # E: SubClass[{float64}]
reveal_type(np.full_like(B, i8, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
reveal_type(np.ones(1)) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.ones([1, 1, 1])) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.ones(5, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
reveal_type(np.ones(5, dtype=int)) # E: ndarray[Any, dtype[Any]]
reveal_type(np.full(1, i8)) # E: ndarray[Any, dtype[Any]]
reveal_type(np.full([1, 1, 1], i8)) # E: ndarray[Any, dtype[Any]]
reveal_type(np.full(1, i8, dtype=np.float64)) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.full(1, i8, dtype=float)) # E: ndarray[Any, dtype[Any]]
reveal_type(np.indices([1, 2, 3])) # E: ndarray[Any, dtype[{int_}]]
reveal_type(np.indices([1, 2, 3], sparse=True)) # E: tuple[ndarray[Any, dtype[{int_}]], ...]
reveal_type(np.fromfunction(func, (3, 5))) # E: SubClass[{float64}]
reveal_type(np.identity(10)) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.identity(10, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
reveal_type(np.identity(10, dtype=int)) # E: ndarray[Any, dtype[Any]]
reveal_type(np.atleast_1d(A)) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.atleast_1d(C)) # E: ndarray[Any, dtype[Any]]
reveal_type(np.atleast_1d(A, A)) # E: list[ndarray[Any, dtype[Any]]]
reveal_type(np.atleast_1d(A, C)) # E: list[ndarray[Any, dtype[Any]]]
reveal_type(np.atleast_1d(C, C)) # E: list[ndarray[Any, dtype[Any]]]
reveal_type(np.atleast_2d(A)) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.atleast_3d(A)) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.vstack([A, A])) # E: ndarray[Any, Any]
reveal_type(np.vstack([A, A], dtype=np.float64)) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.vstack([A, C])) # E: ndarray[Any, dtype[Any]]
reveal_type(np.vstack([C, C])) # E: ndarray[Any, dtype[Any]]
reveal_type(np.hstack([A, A])) # E: ndarray[Any, Any]
reveal_type(np.hstack([A, A], dtype=np.float64)) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.stack([A, A])) # E: Any
reveal_type(np.stack([A, A], dtype=np.float64)) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.stack([A, C])) # E: ndarray[Any, dtype[Any]]
reveal_type(np.stack([C, C])) # E: ndarray[Any, dtype[Any]]
reveal_type(np.stack([A, A], axis=0)) # E: Any
reveal_type(np.stack([A, A], out=B)) # E: SubClass[{float64}]
reveal_type(np.block([[A, A], [A, A]])) # E: ndarray[Any, dtype[Any]]
reveal_type(np.block(C)) # E: ndarray[Any, dtype[Any]]
|
999,831 | e231cae9783d4e62abaf0001f2a50371ae19bc0b | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 19.01.2021
@author: Feliks Kiszkurno
"""
import joblib
import os
import settings
import slopestabilityML
import slopestabilitytools
def run_classification(test_training, test_prediction, test_results, clf, clf_name, *, hyperparameters=False, batch_name=''):
result_training = {}
clf_result_file_ext = clf_name + '_result.sav'
clf_result_file = os.path.join(settings.settings['clf_folder'], clf_result_file_ext)
if clf_name not in settings.settings['clf_trained']:
result_class_training, depth_estim_training, depth_true_training, depth_estim_accuracy_training, \
depth_estim_labels_training, accuracy_score_training, accuracy_labels_training, num_feat = \
slopestabilityML.classification_train(test_training, test_results, clf, clf_name)
result_training = {'result_class': result_class_training,
'accuracy_score': accuracy_score_training,
'accuracy_labels': accuracy_labels_training,
'depth_estim': depth_estim_training,
'depth_true': depth_true_training,
'depth_estim_accuracy': depth_estim_accuracy_training,
'depth_estim_labels': depth_estim_labels_training,
'num_feat': num_feat}
settings.settings['clf_trained'].append(clf_name)
joblib.dump(result_training, clf_result_file)
else:
result_training = joblib.load(clf_result_file)
#if settings.settings['retrain_clf'] is False:
# settings.settings['clf_trained'] = True
# if settings.settings['clf_trained'] is True & settings.settings['clf_trained'] is True:
# clf_file_name = os.path.join(settings.settings['clf_folder'], clf_name, '.sav')
# clf = joblib.load(clf_file_name)
result_prediction = {}
result_class_summary = {}
if settings.settings['use_batches'] is True:
for batch in test_prediction:
slopestabilitytools.folder_structure.create_folder_structure(batch_names=[batch])
result_class, accuracy_labels, accuracy_result, depth_estim, depth_true, \
depth_estim_accuracy, depth_estim_labels = \
slopestabilityML.classification_predict(test_prediction[batch], test_results, clf_name, num_feat,
batch_name=batch)
result_prediction[batch] = {'result_class': result_class,
'accuracy_score': accuracy_result,
'accuracy_labels': accuracy_labels,
'depth_estim': depth_estim,
'depth_true': depth_true,
'depth_estim_accuracy': depth_estim_accuracy,
'depth_estim_labels': depth_estim_labels,
}
result_class_summary[batch] = result_class
else:
result_class, accuracy_labels, accuracy_result, depth_estim, depth_true, \
depth_estim_accuracy, depth_estim_labels = \
slopestabilityML.classification_train(test_prediction, test_results)
result_prediction['no_batch'] = {'result_class': result_class,
'accuracy_labels': accuracy_labels,
'accuracy_score': accuracy_result,
'depth_estim': depth_estim,
'depth_true': depth_true,
'depth_estim_accuracy': depth_estim_accuracy,
'depth_estim_labels': depth_estim_labels,
}
results = {'training': result_training,
'prediction': result_prediction}
return results, result_class_summary
|
999,832 | 34b70abed22a57f1ef15e37f31387ae9f2e8eb81 | """Enables a more complex manipulation with Runs."""
from sacredboard.app.data import DataStorage
class RunFacade:
"""Enables a more complex manipulation with Runs."""
def __init__(self, datastorage: DataStorage):
self.datastorage = datastorage
def delete_run(self, run_id):
"""
Delete run of the given run_id.
:raise NotImplementedError If not supported by the backend.
:raise DataSourceError General data source error.
:raise NotFoundError The run was not found. (Some backends may succeed even if the run does not exist.
"""
ds = self.datastorage
ds.get_metrics_dao().delete(run_id)
# TODO: implement
# ds.get_artifact_dao().delete(run_id)
# ds.get_resource_dao().delete(run_id)
ds.get_run_dao().delete(run_id)
|
999,833 | 54108f050a42741422285493e0b565160291a0e0 | import os
import pandas as pd
import numpy as np
import torch
from torch.utils.data.dataset import Dataset
class Dataset(object):
def __init__(self, path, timesteps):
self.data_path = path
self.timesteps = timesteps
self.train_data_raw = pd.read_excel(os.path.join(path, "hour_ahead/train_in.xlsx")).values
self.train_target_raw = pd.read_excel(os.path.join(path, "hour_ahead/train_out.xlsx")).values
self.test_data_raw = pd.read_excel(os.path.join(path, "hour_ahead/test_in.xlsx")).values
self.test_target_raw = pd.read_excel(os.path.join(path, "hour_ahead/test_out.xlsx")).values
self.train_data, self.train_target = self.process_train()
self.test_data, self.test_target = self.process_test()
min_max = pd.read_excel(os.path.join(self.data_path, "hour_ahead/max_min.xls"))
self._min = float(min_max["pmin"][0])
self._max = float(round(min_max["pmax"][0], 2))
def process_train(self):
train_data, temp_row = list(), list()
for i in range(len(self.train_data_raw) - (self.timesteps - 1)):
for j in range(self.timesteps):
temp_row.append(list(self.train_data_raw[i + j]))
train_data.append(temp_row)
temp_row = list()
train_target = self.train_target_raw[self.timesteps-1:]
return train_data, train_target
def process_test(self):
test_data, temp_row = list(), list()
for i in range(len(self.test_data_raw) - (self.timesteps - 1)):
for j in range(self.timesteps):
temp_row.append(list(self.test_data_raw[i + j]))
test_data.append(temp_row)
temp_row = list()
test_target = self.test_target_raw[self.timesteps-1:]
return test_data, test_target
class energyDataset(Dataset):
def __init__(self, data, target):
self.data = data
self.target = target
def __getitem__(self, index):
return torch.from_numpy(self.data[index]).float(), torch.from_numpy(self.target[index]).float()
def __len__(self):
return len(self.data) |
999,834 | 75262e0f3b11457043f0420de489125aa5cfb9a9 |
import argparse
import signal
import sys
from src.bfs import BreadthFirstSearch
from src.digraph import Digraph
class SAP(object):
def __init__(self, graph):
self.graph = graph
def _ancestor(self, v, w, bfs_v, bfs_w):
ancestors = []
for x in range(self.graph.V): # TODO: iterate over v ancestors
if bfs_v.has_path(x) and bfs_w.has_path(x):
ancestors.append(x)
if ancestors:
return min(ancestors,
key=lambda x: bfs_v.dist_to[x] + bfs_w.dist_to[x])
else:
return -1
def length(self, v, w):
bfs_v = BreadthFirstSearch(self.graph, v)
bfs_w = BreadthFirstSearch(self.graph, w)
ancestor = self._ancestor(v, w, bfs_v, bfs_w)
if ancestor == -1:
return -1
return bfs_v.dist_to[ancestor] + bfs_w.dist_to[ancestor]
def ancestor(self, v, w):
bfs_v = BreadthFirstSearch(self.graph, v)
bfs_w = BreadthFirstSearch(self.graph, w)
return self._ancestor(v, w, bfs_v, bfs_w)
def hyperlength(self, vs, ws):
pass
def hyperancestor(self, vs, ws):
pass
if __name__ == '__main__':
signal.signal(signal.SIGINT, lambda s, frame: sys.exit(0))
usage = 'python -m programming.wordnet.sap -f data/digraph1.txt'
parser = argparse.ArgumentParser(description='SAP', usage=usage)
parser.add_argument('-f', '--fname')
args = vars(parser.parse_args())
graph = Digraph.from_file(args['fname'])
sap = SAP(graph)
def process_input(prompt):
try:
return int(prompt)
except ValueError:
return [int(i) for i in prompt.split(" ")]
while True:
v = process_input(raw_input("Insert a vertex or vertices\n"))
w = process_input(raw_input("Insert a vertex or vertices\n"))
length = sap.length(v, w)
ancestor = sap.ancestor(v, w)
print("length = %d, ancestor = %d" % (length, ancestor))
|
999,835 | d87346980dfdc9be519036c951d13f74dd9586a3 |
import numpy as np
import pandas as pd
import os
import cv2
from sklearn.model_selection import train_test_split
# from keras.applications.vgg19 import VGG19
from keras.applications.resnet50 import ResNet50
from keras.optimizers import *
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, Model
from keras.layers import Input, Activation, Dropout, Flatten, Dense, GlobalAveragePooling2D
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
from keras.callbacks import EarlyStopping, ModelCheckpoint
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
#数値に変換
def trans_from_cup_to_int(name_value):
name_array = ["A","B","C","D","E","F","G","H以上"]
return name_array.index(name_value)
#データの読み込み
def load_data():
print("start loading...")
path = "data"
name_list = [i for i in os.listdir(path) if i != '.DS_Store']
pic_num = 0
x_data = []
y_label_data = []
for name in name_list:
label_value = trans_from_cup_to_int(name)
pic_folder_path = path + "/" + name
pic_list = [i for i in os.listdir(pic_folder_path) if i != '.DS_Store']
for pic_name in pic_list:
pic_path = pic_folder_path+"/"+pic_name
img = cv2.imread(pic_path)
x_data.append(img)
y_label_data.append(label_value)
x_data = np.array(x_data)
y_label_data = np.array(y_label_data)
print("loading has finished!")
return x_data, y_label_data
def resize_picture(images):
changed_images = []
for img in images:
img = cv2.resize(img, dsize=(224, 224))
changed_images.append(img)
changed_images = np.array(changed_images)
return changed_images
def build_model():
input_tensor = Input(shape=(224, 224, 3))
resnet = ResNet50(include_top=False, weights='imagenet',input_tensor=input_tensor)
top_model = Sequential()
top_model.add(Flatten(input_shape=resnet.output_shape[1:]))
top_model.add(Dense(1))
model = Model(input=resnet.input, output=top_model(resnet.output))
model.compile(loss='mean_absolute_error',
optimizer=optimizers.SGD(lr=1e-3, momentum=0.9))
return model
def fit_model(model, X_train, Y_train, X_val, Y_val):
batch_size=16
epochs = 20
#とりあえずぼかし以外
datagen = ImageDataGenerator(
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
zoom_range=0.3,
horizontal_flip=True
)
#ここから下はこれを参照https://lp-tech.net/articles/Y56uo
early_stopping = EarlyStopping(monitor='val_loss', patience=1 , verbose=1)
model.fit_generator(
datagen.flow(X_train, Y_train, batch_size=batch_size),
epochs = epochs,
validation_data=(X_val,Y_val),
callbacks=[early_stopping]
)
return model
def make_model():
skf = StratifiedKFold(5, random_state = 0, shuffle = True)
scores = []
i = 0
for k_fold, (tr_inds, val_inds) in enumerate(skf.split(X,Y)):
X_train,Y_train = X[tr_inds],Y[tr_inds]
X_val,Y_val = X[val_inds],Y[val_inds]
model = build_model()
model = fit_model(model,X_train,Y_train,X_val,Y_val)
model.save('use_models/model'+str(i)+'.h5')
i += 1
del model
def make_dir():
if not os.path.exists('use_models'):
os.mkdir("use_models")
make_dir()
X, Y = load_data()
X = resize_picture(X)
make_model()
|
999,836 | 81d51a290d075e1608ce303e0d0c3f8a6f5e6178 | from shock.handlers import InterSCity
from shock.core import Shock
import importlib
import sys
sck = Shock(InterSCity)
|
999,837 | c6506cd0b3de7f1f649dec17ad5543ddb7f9ad23 | # -*- Mode: python; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 40 -*-
# vim: set filetype=python:
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
EXPORTS.mozilla.dom.cache += [
'Action.h',
'ActorChild.h',
'ActorUtils.h',
'AutoUtils.h',
'Cache.h',
'CacheChild.h',
'CacheOpChild.h',
'CacheOpParent.h',
'CacheParent.h',
'CachePushStreamChild.h',
'CachePushStreamParent.h',
'CacheStorage.h',
'CacheStorageChild.h',
'CacheStorageParent.h',
'CacheStreamControlChild.h',
'CacheStreamControlParent.h',
'Connection.h',
'Context.h',
'DBAction.h',
'DBSchema.h',
'Feature.h',
'FileUtils.h',
'IPCUtils.h',
'Manager.h',
'ManagerId.h',
'PrincipalVerifier.h',
'QuotaClient.h',
'ReadStream.h',
'SavedTypes.h',
'StreamControl.h',
'StreamList.h',
'Types.h',
'TypeUtils.h',
]
UNIFIED_SOURCES += [
'Action.cpp',
'ActorChild.cpp',
'AutoUtils.cpp',
'Cache.cpp',
'CacheChild.cpp',
'CacheOpChild.cpp',
'CacheOpParent.cpp',
'CacheParent.cpp',
'CachePushStreamChild.cpp',
'CachePushStreamParent.cpp',
'CacheStorage.cpp',
'CacheStorageChild.cpp',
'CacheStorageParent.cpp',
'CacheStreamControlChild.cpp',
'CacheStreamControlParent.cpp',
'Connection.cpp',
'Context.cpp',
'DBAction.cpp',
'DBSchema.cpp',
'Feature.cpp',
'FileUtils.cpp',
'Manager.cpp',
'ManagerId.cpp',
'PrincipalVerifier.cpp',
'QuotaClient.cpp',
'ReadStream.cpp',
'StreamControl.cpp',
'StreamList.cpp',
'TypeUtils.cpp',
]
IPDL_SOURCES += [
'CacheTypes.ipdlh',
'PCache.ipdl',
'PCacheOp.ipdl',
'PCachePushStream.ipdl',
'PCacheStorage.ipdl',
'PCacheStreamControl.ipdl',
]
include('/ipc/chromium/chromium-config.mozbuild')
LOCAL_INCLUDES += [
'../workers',
]
FAIL_ON_WARNINGS = True
FINAL_LIBRARY = 'xul'
MOCHITEST_MANIFESTS += [
'test/mochitest/mochitest.ini',
]
|
999,838 | 0179a7c5871d7ae61ee5d657a5a79ac3172e526e | default_app_config = 'messages_app.apps.MessagesConfig'
|
999,839 | c84a97446790a232b3e65ac281c5ead396eecbaf | import sys
import re
try:
import polyinterface
except ImportError:
import pgc_interface as polyinterface
from copy import deepcopy
LOGGER = polyinterface.LOGGER
modeMap = {
'off': 0,
'heat': 1,
'cool': 2,
'auto': 3
}
climateMap = {
'away': 0,
'home': 1,
'sleep': 2,
'smart1': 3,
'smart2': 4,
'smart3': 5,
'smart4': 6,
'smart5': 7,
'smart6': 8,
'smart7': 9,
'unknown': 10
}
equipmentStatusMap = {
'off': 0,
'heatPump': 1,
'compCool1': 2,
'heatPump2': 3,
'heatPump3': 4,
'compCool2': 5,
'auxHeat1': 6,
'auxHeat2': 7,
'auxHeat3': 8
}
windMap = {
'0': 0,
'N': 1,
'NNE': 2,
'NE': 3,
'ENE': 4,
'E': 5,
'ESE': 6,
'SE': 7,
'SSE': 8,
'S': 9,
'SSW': 10,
'SW': 11,
'WSW': 12,
'W': 13,
'WNW': 14,
'NW': 15,
'NNW': 16
}
transitionMap = {
'running': 0,
'nextTransition': 1,
'indefinite': 2
}
driversMap = {
'EcobeeF': [
{ 'driver': 'ST', 'value': 0, 'uom': '17' },
{ 'driver': 'CLISPH', 'value': 0, 'uom': '17' },
{ 'driver': 'CLISPC', 'value': 0, 'uom': '17' },
{ 'driver': 'CLIMD', 'value': 0, 'uom': '67' },
{ 'driver': 'CLIHUM', 'value': 0, 'uom': '22' },
{ 'driver': 'CLIHCS', 'value': 0, 'uom': '25' },
{ 'driver': 'CLIFRS', 'value': 0, 'uom': '80' },
{ 'driver': 'GV1', 'value': 0, 'uom': '22' },
{ 'driver': 'CLISMD', 'value': 0, 'uom': '25' },
{ 'driver': 'GV4', 'value': 0, 'uom': '25' },
{ 'driver': 'GV3', 'value': 0, 'uom': '25' },
{ 'driver': 'GV5', 'value': 0, 'uom': '22' },
{ 'driver': 'GV6', 'value': 0, 'uom': '25' },
{ 'driver': 'GV7', 'value': 0, 'uom': '25' }
],
'EcobeeC': [
{ 'driver': 'ST', 'value': 0, 'uom': '4' },
{ 'driver': 'CLISPH', 'value': 0, 'uom': '4' },
{ 'driver': 'CLISPC', 'value': 0, 'uom': '4' },
{ 'driver': 'CLIMD', 'value': 0, 'uom': '67' },
{ 'driver': 'CLIHUM', 'value': 0, 'uom': '22' },
{ 'driver': 'CLIHCS', 'value': 0, 'uom': '25' },
{ 'driver': 'CLIFRS', 'value': 0, 'uom': '80' },
{ 'driver': 'GV1', 'value': 0, 'uom': '22' },
{ 'driver': 'CLISMD', 'value': 0, 'uom': '25' },
{ 'driver': 'GV4', 'value': 0, 'uom': '25' },
{ 'driver': 'GV3', 'value': 0, 'uom': '25' },
{ 'driver': 'GV5', 'value': 0, 'uom': '22' },
{ 'driver': 'GV6', 'value': 0, 'uom': '25' },
{ 'driver': 'GV7', 'value': 0, 'uom': '25' }
],
'EcobeeSensorF': [
{ 'driver': 'ST', 'value': 0, 'uom': '17' },
{ 'driver': 'GV1', 'value': 0, 'uom': '25' }
],
'EcobeeSensorC': [
{ 'driver': 'ST', 'value': 0, 'uom': '4' },
{ 'driver': 'GV1', 'value': 0, 'uom': '25' }
],
'EcobeeWeatherF': [
{ 'driver': 'ST', 'value': 0, 'uom': '17' },
{ 'driver': 'GV1', 'value': 0, 'uom': '22' },
{ 'driver': 'GV2', 'value': 0, 'uom': '22' },
{ 'driver': 'GV3', 'value': 0, 'uom': '17' },
{ 'driver': 'GV4', 'value': 0, 'uom': '17' },
{ 'driver': 'GV5', 'value': 0, 'uom': '48' },
{ 'driver': 'GV6', 'value': 0, 'uom': '25' },
{ 'driver': 'GV7', 'value': 0, 'uom': '25' },
{ 'driver': 'GV8', 'value': 0, 'uom': '25' },
{ 'driver': 'GV9', 'value': 0, 'uom': '25' }
],
'EcobeeWeatherC': [
{ 'driver': 'ST', 'value': 0, 'uom': '4' },
{ 'driver': 'GV1', 'value': 0, 'uom': '22' },
{ 'driver': 'GV2', 'value': 0, 'uom': '22' },
{ 'driver': 'GV3', 'value': 0, 'uom': '4' },
{ 'driver': 'GV4', 'value': 0, 'uom': '4' },
{ 'driver': 'GV5', 'value': 0, 'uom': '48' },
{ 'driver': 'GV6', 'value': 0, 'uom': '25' },
{ 'driver': 'GV7', 'value': 0, 'uom': '25' },
{ 'driver': 'GV8', 'value': 0, 'uom': '25' },
{ 'driver': 'GV9', 'value': 0, 'uom': '25' }
],
}
def toC(tempF):
# Round to the nearest .5
return round(((tempF - 32) / 1.8) * 2) / 2
def toF(tempC):
# Round to nearest whole degree
return int(round(tempC * 1.8) + 32)
"""
Address scheme:
Devices: n<profile>_t<thermostatId> e.g. n003_t511892759243
Sensors: n<profile>_s<sensor code> e.g. n003_sr6dr
Current Weather: n<profile>_w<thermostatId> e.g. n003_w511892759243
Forecast Weather: n<profile>_f<thermostatId> e.g. n003_f511892759243
"""
class Thermostat(polyinterface.Node):
def __init__(self, controller, primary, address, name, revData, fullData, useCelsius):
super().__init__(controller, primary, address, name)
self.controller = controller
self.name = name
self.tstat = fullData['thermostatList'][0]
self.program = self.tstat['program']
self.settings = self.tstat['settings']
self.useCelsius = useCelsius
self.type = 'thermostat'
self.id = 'EcobeeC' if self.useCelsius else 'EcobeeF'
self.drivers = self._convertDrivers(driversMap[self.id]) if self.controller._cloud else deepcopy(driversMap[self.id])
self.revData = revData
self.fullData = fullData
def start(self):
self.update(self.revData, self.fullData)
def update(self, revData, fullData):
self.revData = revData
self.fullData = fullData
self.tstat = fullData['thermostatList'][0]
self.program = self.tstat['program']
events = self.tstat['events']
equipmentStatus = self.tstat['equipmentStatus'].split(',')
self.settings = self.tstat['settings']
runtime = self.tstat['runtime']
clihcs = 0
for status in equipmentStatus:
if status in equipmentStatusMap:
clihcs = equipmentStatusMap[status]
break
clismd = 0
if len(events) > 0 and events[0]['type'] == 'hold' and events[0]['running']:
clismd = 1 if self.settings['holdAction'] == 'nextPeriod' else 2
tempCurrent = runtime['actualTemperature'] / 10 if runtime['actualTemperature'] != 0 else 0
tempHeat = runtime['desiredHeat'] / 10
tempCool = runtime['desiredCool'] / 10
if (self.useCelsius):
tempCurrent = toC(tempCurrent)
tempHeat = toC(tempHeat)
tempCool = toC(tempCool)
updates = {
'ST': tempCurrent,
'CLISPH': tempHeat,
'CLISPC': tempCool,
'CLIMD': modeMap[self.settings['hvacMode']],
'CLIHUM': runtime['actualHumidity'],
'CLIHCS': clihcs,
'CLIFRS': 1 if 'fan' in equipmentStatus else 0,
'GV1': runtime['desiredHumidity'],
'CLISMD': clismd,
'GV4': self.settings['fanMinOnTime'],
'GV3': climateMap[self.program['currentClimateRef']],
'GV5': runtime['desiredDehumidity'],
'GV6': 1 if self.settings['autoAway'] else 0,
'GV7': 1 if self.settings['followMeComfort'] else 0
}
for key, value in updates.items():
self.setDriver(key, value)
for address, node in self.controller.nodes.items():
if node.primary == self.address and node.type == 'sensor':
for sensor in self.tstat['remoteSensors']:
if 'id' in sensor:
sensorId = re.sub('\:', '', sensor['id']).lower()[:12]
if node.address == sensorId:
node.update(sensor)
if node.primary == self.address and (node.type == 'weather' or node.type == 'forecast'):
weather = self.tstat['weather']
if weather:
node.update(weather)
def query(self, command=None):
self.reportDrivers()
def cmdSetPoint(self, cmd):
if cmd['cmd'] == 'CLISPH':
cmdtype = 'heatTemp'
driver = 'CLISPH'
else:
cmdtype = 'coolTemp'
driver = 'CLISPC'
LOGGER.info('Setting {} {} Set Point to {}{}'.format(self.name, cmdtype, cmd['value'], 'C' if self.useCelsius else 'F'))
currentProgram = deepcopy(self.program)
for climate in currentProgram['climates']:
if climate['climateRef'] == currentProgram['currentClimateRef']:
if self.useCelsius:
climate[cmdtype] = toF(float(cmd['value'])) * 10
else:
climate[cmdtype] = int(cmd['value']) * 10
if self.controller.ecobeePost(self.address, {'thermostat': {'program': currentProgram}}):
self.setDriver(driver, cmd['value'])
def cmdSetMode(self, cmd):
if self.getDriver(cmd['cmd']) != cmd['value']:
LOGGER.info('Setting Thermostat {} to mode: {}'.format(self.name, [*modeMap][int(cmd['value'])]))
if self.controller.ecobeePost(self.address, {'thermostat': {'settings': {'hvacMode': [*modeMap][int(cmd['value'])]}}}):
self.setDriver(cmd['cmd'], cmd['value'])
def cmdSetScheduleMode(self, cmd):
if self.getDriver(cmd['cmd']) != cmd['value']:
func = {}
if cmd['value'] == '0':
func['type'] = 'resumeProgram'
func['params'] = {
'resumeAll': False
}
else:
func['type'] = 'setHold'
heatHoldTemp = int(self.getDriver('CLISPH'))
coolHoldTemp = int(self.getDriver('CLISPH'))
if self.useCelsius:
headHoldTemp = toF(heatHoldTemp)
coolHoldTemp = toF(coolHoldTemp)
func['params'] = {
'holdType': 'nextTransition' if cmd['value'] == "1" else 'indefinite',
'heatHoldTemp': heatHoldTemp * 10,
'coolHoldTemp': coolHoldTemp * 10
}
if self.controller.ecobeePost(self.address, {'functions': [func]}):
self.setDriver('CLISMD', cmd['value'])
def cmdSetClimate(self, cmd):
if self.getDriver(cmd['cmd']) != cmd['value']:
command = {
'functions': [{
'type': 'setHold',
'params': {
'holdType': 'indefinite',
'holdClimateRef': [*climateMap][int(cmd['value'])]
}
}]
}
if self.controller.ecobeePost(self.address, command):
self.setDriver(cmd['cmd'], cmd['value'])
def cmdSetFanOnTime(self, cmd):
if self.getDriver(cmd['cmd']) != cmd['value']:
command = {
'thermostat': {
'settings': {
'fanMinOnTime': cmd['value']
}
}
}
if self.controller.ecobeePost(self.address, command):
self.setDriver(cmd['cmd'], cmd['value'])
def cmdSmartHome(self, cmd):
if self.getDriver(cmd['cmd']) != cmd['value']:
command = {
'thermostat': {
'settings': {
'autoAway': True if cmd['value'] == '1' else False
}
}
}
if self.controller.ecobeePost(self.address, command):
self.setDriver(cmd['cmd'], cmd['value'])
def cmdFollowMe(self, cmd):
if self.getDriver(cmd['cmd']) != cmd['value']:
command = {
'thermostat': {
'settings': {
'followMeComfort': True if cmd['value'] == '1' else False
}
}
}
if self.controller.ecobeePost(self.address, command):
self.setDriver(cmd['cmd'], cmd['value'])
def setPoint(self, cmd):
LOGGER.debug(cmd)
currentProgram = deepcopy(self.program)
for climate in currentProgram['climates']:
if climate['climateRef'] == currentProgram['currentClimateRef']:
cmdtype = 'coolTemp'
driver = 'CLISPC'
value = 1
if self.settings['hvacMode'] == 'heat' or self.settings['hvacMode'] == 'auto':
cmdtype = 'heatTemp'
driver = 'CLISPH'
currentValue = float(self.getDriver(driver))
if 'value' in cmd:
value = float(cmd['value'])
if cmd['cmd'] == 'BRT':
newTemp = currentValue + value
else:
newTemp = currentValue - value
if self.useCelsius:
climate[cmdtype] = toF(float(newTemp)) * 10
else:
climate[cmdtype] = int(newTemp) * 10
LOGGER.debug('{} {} {} {} {}'.format(cmdtype, driver, self.getDriver(driver), newTemp, climate[cmdtype]))
if self.controller.ecobeePost(self.address, {'thermostat': {'program': currentProgram}}):
self.setDriver(driver, newTemp)
def getDriver(self, driver):
if driver in self.drivers:
return self.drivers[driver]['value']
commands = { 'QUERY': query,
'CLISPH': cmdSetPoint,
'CLISPC': cmdSetPoint,
'CLIMD': cmdSetMode,
'CLISMD': cmdSetScheduleMode,
'GV3': cmdSetClimate,
'GV4': cmdSetFanOnTime,
'GV6': cmdSmartHome,
'GV7': cmdFollowMe,
'BRT': setPoint,
'DIM': setPoint
}
class Sensor(polyinterface.Node):
def __init__(self, controller, primary, address, name, useCelsius):
super().__init__(controller, primary, address, name)
self.type = 'sensor'
# self.code = code
self.useCelsius = useCelsius
self.id = 'EcobeeSensorC' if self.useCelsius else 'EcobeeSensorF'
self.drivers = self._convertDrivers(driversMap[self.id]) if self.controller._cloud else deepcopy(driversMap[self.id])
def start(self):
pass
def update(self, sensor):
try:
tempCurrent = int(sensor['capability'][0]['value']) / 10 if int(sensor['capability'][0]['value']) != 0 else 0
except ValueError as e:
tempCurrent = 0
if self.useCelsius:
tempCurrent = toC(tempCurrent)
updates = {
'ST': tempCurrent,
'GV1': 1 if sensor['capability'][1]['value'] == "true" else 0
}
for key, value in updates.items():
self.setDriver(key, value)
def query(self, command=None):
self.reportDrivers()
commands = {'QUERY': query, 'STATUS': query}
class Weather(polyinterface.Node):
def __init__(self, controller, primary, address, name, useCelsius, forecast):
super().__init__(controller, primary, address, name)
self.type = 'forecast' if forecast else 'weather'
self.forecastNum = 1 if forecast else 0
self.useCelsius = useCelsius
self.id = 'EcobeeWeatherC' if self.useCelsius else 'EcobeeWeatherF'
self.drivers = self._convertDrivers(driversMap[self.id]) if self.controller._cloud else deepcopy(driversMap[self.id])
def start(self):
pass
def update(self, weather):
currentWeather = weather['forecasts'][self.forecastNum]
windSpeed = 0
if self.type == 'weather' and currentWeather['windSpeed'] == 0 and weather['forecasts'][5]['windSpeed'] > 0:
windSpeed = weather['forecasts'][5]['windSpeed']
else:
windSpeed = currentWeather['windSpeed']
tempCurrent = currentWeather['temperature'] / 10 if currentWeather['temperature'] != 0 else 0
tempHeat = currentWeather['tempHigh'] / 10 if currentWeather['tempHigh'] != 0 else 0
tempCool = currentWeather['tempLow'] / 10 if currentWeather['tempLow'] != 0 else 0
if self.useCelsius:
tempCurrent = toC(tempCurrent)
tempHeat = toC(tempHeat)
tempCool = toC(tempCool)
updates = {
'ST': tempCurrent,
'GV1': currentWeather['relativeHumidity'],
'GV2': currentWeather['pop'],
'GV3': tempHeat,
'GV4': tempCool,
'GV5': windSpeed,
'GV6': windMap[currentWeather['windDirection']],
'GV7': weather['forecasts'][5]['sky'] if currentWeather['sky'] == -5002 else currentWeather['sky'],
'GV8': currentWeather['weatherSymbol'],
'GV9': currentWeather['weatherSymbol']
}
for key, value in updates.items():
self.setDriver(key, value)
def query(self, command=None):
self.reportDrivers()
commands = {'QUERY': query, 'STATUS': query} |
999,840 | dbd1a272ec5d073dd7685945e5d8ed798287d899 | from project.card.card import Card
class CardRepository:
def __init__(self):
self.count = 0
self.cards = []
def add(self, card: Card):
try:
temp = [c for c in self.cards if c.name == card.name][0]
raise ValueError(f"Card {card.name} already exists!")
except IndexError:
self.count += 1
self.cards.append(card)
def remove(self, card):
if card == "":
raise ValueError("Card cannot be an empty string!")
temp = self.find(card)
self.cards.remove(temp)
self.count -= 1
def find(self, card):
temp = [c for c in self.cards if c.name == card][0]
return temp
|
999,841 | e4de16f7ac887921969c02c0f312d6eae47b87b6 | import serial
import time
def main():
ser = serial.Serial('/dev/ttyACM0', 115200)
delay = 1
while 1:
ask_arduino(ser, '2;', delay) # hello
ask_arduino(ser, '3, Foobar;', delay) # echo
ask_arduino(ser, '4;', delay) # get temp
ask_arduino(ser, '5;', delay) # get status
time.sleep(1)
def ask_arduino(ser, command, delay):
delay = delay/1000.
ser.write(command)
time.sleep(delay)
message = ser.readline()
message = message.strip()
print "asked {} >> {}".format(command, message)
if __name__ == "__main__":
main()
|
999,842 | 87f32dd63699d6a347d6ff86322abc92c941c438 | # Generated by Django 3.0.8 on 2020-12-18 10:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('processes', '0089_auto_20201218_1330'),
]
operations = [
migrations.AddField(
model_name='process_2_2',
name='q19',
field=models.TextField(blank=True, verbose_name='Remarks'),
),
migrations.AlterField(
model_name='process_2',
name='q16',
field=models.IntegerField(choices=[(0, 'Yes'), (1, 'No'), (2, 'Not Relavant')], verbose_name='16.Three way valve is OK and fit to Hydraulic circut.'),
),
migrations.AlterField(
model_name='process_2',
name='q20',
field=models.IntegerField(choices=[(0, 'Yes'), (1, 'No'), (2, 'Not Relavant')], verbose_name='20.Cooling type and size are OK for assembly.'),
),
]
|
999,843 | 0ea084b4e495b2a90916d77633d69748e6cb709b | from unittest import TestCase
from day_one.part_two import get_location
class TestTrackPath(TestCase):
def test_track_path(self):
current_orientation = 0, 1
current_position = 0, 0
directions = [('R', 8), ('R', 4), ('R', 4), ('R', 8)]
self.assertEqual(
(4, 0),
get_location(current_orientation, current_position, directions)
)
|
999,844 | 544b294a3da3f61dab2e56283ab4558a3adefa43 | import unittest
from rational import Rational
class SamRationalMethods(unittest.TestCase):
# Sam
def test_zero(self):
# test if zero times zero
zero = Rational(0,1)
new = zero*zero
self.assertEqual(new.n, 0)
self.assertEqual(new.d, 1)
# Sam
def test_neg(self):
#make sure multiply by negative comes out correctly
neg = Rational(-1,1)
mult = neg*(neg)
self.assertEqual(mult.n, -1)
self.assertEqual(mult.d, 1)
# Sam
def test_float(self):
#test if rational floated is not instance of int or string
res = Rational()
self.assertFalse(isinstance(float(res), int))
self.assertFalse(isinstance(float(res), str))
|
999,845 | 14e4ee648d96bde40839dc3e4acc7d0b669e88d9 | # -*- coding: utf-8 -*-
"""Random graph permutation functions."""
import random
from typing import Optional
from pybel import BELGraph
from pybel.struct.pipeline import transformation
__all__ = [
'random_by_nodes',
'random_by_edges',
'shuffle_node_data',
'shuffle_relations',
]
@transformation
def random_by_nodes(graph: BELGraph, percentage: Optional[float] = None) -> BELGraph:
"""Get a random graph by inducing over a percentage of the original nodes.
:param graph: A BEL graph
:param percentage: The percentage of edges to keep
"""
if percentage is None:
percentage = 0.9
assert 0 < percentage <= 1
nodes = graph.nodes()
n = int(len(nodes) * percentage)
subnodes = random.sample(nodes, n)
result = graph.subgraph(subnodes)
return result
@transformation
def random_by_edges(graph: BELGraph, percentage: Optional[float] = None) -> BELGraph:
"""Get a random graph by keeping a certain percentage of original edges.
:param graph: A BEL graph
:param percentage: What percentage of eges to take
"""
if percentage is None:
percentage = 0.9
assert 0 < percentage <= 1
number_edges = int(graph.number_of_edges() * percentage)
rv = BELGraph()
rv.add_edges_from(random.sample(graph.edges(keys=True, data=True), number_edges))
return rv
@transformation
def shuffle_node_data(graph: BELGraph, key: str, percentage: Optional[float] = None) -> BELGraph:
"""Shuffle the graphs' nodes' data.
Useful for permutation testing. For example, shuffling differential gene expression values.
:param graph: A BEL graph
:param key: The node data dictionary key
:param percentage: What percentage of possible swaps to make
"""
if percentage is None:
percentage = 0.3
assert 0 < percentage <= 1
n = graph.number_of_nodes()
swaps = int(percentage * n * (n - 1) / 2)
result: BELGraph = graph.copy()
for _ in range(swaps):
s, t = random.sample(result.node, 2)
result.nodes[s][key], result.nodes[t][key] = result.nodes[t][key], result.nodes[s][key]
return result
@transformation
def shuffle_relations(graph: BELGraph, percentage: Optional[str] = None) -> BELGraph:
"""Shuffle the relations.
Useful for permutation testing.
:param graph: A BEL graph
:param percentage: What percentage of possible swaps to make
"""
if percentage is None:
percentage = 0.3
assert 0 < percentage <= 1
n = graph.number_of_edges()
swaps = int(percentage * n * (n - 1) / 2)
rv = graph.copy()
edges = rv.edges(keys=True)
for _ in range(swaps):
(s1, t1, k1), (s2, t2, k2) = random.sample(edges, 2)
rv[s1][t1][k1], rv[s2][t2][k2] = rv[s2][t2][k2], rv[s1][t1][k1]
return rv
|
999,846 | f6322c218b142049ad06402ae6adda82a510fcb4 | '''
Given two arrays A and B of equal size, the advantage of A with respect to B is the number of indices i for which A[i] > B[i].
Return any permutation of A that maximizes its advantage with respect to B.
Example 1:
Input: A = [2,7,11,15], B = [1,10,4,11]
Output: [2,11,7,15]
Example 2:
Input: A = [12,24,8,32], B = [13,25,32,11]
Output: [24,32,8,12]
Note:
1 <= A.length = B.length <= 10000
0 <= A[i] <= 10^9
0 <= B[i] <= 10^9
'''
class Solution:
def advantageCount(self, A: List[int], B: List[int]) -> List[int]:
sortedA = sorted(A)
sortedB = sorted(B)
assigned = {b: [] for b in B}
remaining = []
j = 0
for a in sortedA:
if a > sortedB[j]:
assigned[sortedB[j]].append(a)
j += 1
else:
remaining.append(a)
return [assigned[b].pop() if assigned[b] else remaining.pop() for b in B]
|
999,847 | 4de230941425caedb4ce81c793b654f8099aeada | import random
import unittest
from utils.client import ServerTest
class TestBasicFunctionality(ServerTest):
def setUp(self):
self.connect_to_server()
# CON should give us an ID
def test_con(self):
self.send("CON\n")
self.assertTrue(self.recv().startswith("OK ID "))
# CON is only valid as first operation
def test_double_con(self):
self.do_handshake()
self.send("CON\n")
self.assertEqual(self.recv(), "ERROR 71 EPROTO\n")
# CON is the only operation valid when starting
def test_invalid_con(self):
self.send("HEY\n")
reply = self.recv()
self.assertEqual(reply, "ERROR 71 EPROTO\n")
# LSD on an empty filesystem should work
def test_lsd_empty(self):
self.do_handshake()
self.send("LSD\n")
reply = self.recv()
self.assertEqual(reply, "OK \n")
def test_cre_lsd_del(self):
self.do_handshake()
# Files can only be created once
self.send("CRE testing1\n")
self.assertEqual(self.recv(), "OK\n")
self.send("CRE testing1\n")
self.assertEqual(self.recv(), "ERROR 17 EEXIST\n")
# LSD should list the files
self.send("LSD\n")
self.assertEqual(self.recv(), "OK testing1\n")
self.send("CRE testing2\n")
self.assertEqual(self.recv(), "OK\n")
self.send("LSD\n")
self.assertTrue(self.recv() in ["OK testing1 testing2\n", "OK testing2 testing1\n"])
# Files can only be deleted once
self.send("DEL testing1\n")
self.assertEqual(self.recv(), "OK\n")
self.send("DEL testing1\n")
self.assertEqual(self.recv(), "ERROR 2 ENOENT\n")
# Deleted files should not show up on LSD
self.send("LSD\n")
self.assertEqual(self.recv(), "OK testing2\n")
self.send("DEL testing2\n")
self.assertEqual(self.recv(), "OK\n")
self.send("LSD\n")
self.assertEqual(self.recv(), "OK \n")
def tearDown(self):
self.disconnect()
|
999,848 | 0b495d9418472d99ec927ab9cc0bff11ae92ece2 | class Cat():
"""
This docstring will discuss how to interact with our Cat class.
Parameters:
name: str
laziness_level: int
This holds how lazy the cat is on a scale of 1 to 10.
location: str
This holds where the cat is currently located at.
"""
def __init__(self, name, laziness_level, location):
self.name = name
self.laziness_level = 5
self.location = "home"
def sense_earthquake(self, earthquake):
"""Checks if the cat senses an earthquake, and if so changes the cat's
location to 'gone dark'.
Args:
earthquake: boolean
Holds a True or False as to whether there was an earthquake.
"""
if earthquake == True:
self.location = "gone dark"
return self.name + " has gone dark!"
else:
return self.location
class Car():
"""
Docstring >> This class defines a car.
Args:
model: string
color: string
tank_size: integer
"""
def __init__(self, model, color, tank_size):
self.model = model
self.color = "red"
self.tank_size = 20
self.gallons_of_gas = self.tank_size # We're assuming its tank is full.
def drive(self, miles_driven):
"""
Docstring >> This method will be used to steer the car to drive.
Args:
miles_driven: integer
gallons_of_gas: integer
"""
self.miles_driven = miles_driven
self.gallons_of_gas -= miles_driven / 10
class Plane():
"""
Docstring >> This class defines the general framework for a plane.
Args:
destination: string
departure_city: string
trip_distance: integer
"""
def __init__(self, destination, departure_city, trip_distance):
self.destination = destination
self.departure_city = departure_city
self.trip_distance = trip_distance
def fly(self):
destination = self.departure_city, self.destination = self.destination, self.departure_city
print(destination)
|
999,849 | 0dde7525014f2d1ae8f99779b835061768b422ad | import numpy as np
def mse(target, y):
"""
Mean Square Error
:param target: number of samples * 1, numpy array
:param y: number of samples * 1, numpy array
:return: score
"""
mse_score = ((target-y)**2).mean(axis=1)
return mse_score[0] |
999,850 | 68340b302f7c18fba483bc77a0ffaac4c73654a8 | from time import sleep
cidade = str(input('Informe o nome de uma cidade: ')).strip()
print('Vamos verificar se a cidade digitada começa com o nome Santo...')
sleep(5)
if cidade[0] in 'Santo' and cidade.upper() or cidade.lower():
print('Sim')
else:
print('Não') |
999,851 | ab9ddff054e6a0fcaefb63dbb6ff4c989730b06a | import numpy as np
import matplotlib.pyplot as plt
from scipy.special import gamma
from genetica import *
N = 20
names = ["r_histograma.pdf", "p_histograma.pdf"]
data = ["r_poblacion.dat", "p_poblacion.dat"]
for i in range(N):
expresion = Expresion()
expresion.resuelve()
r = expresion.rt[-1]
p = expresion.pt[-1]
results = [r, p]
for (result, item) in zip(results, data):
with open(item, 'a') as f:
style = ", %d"
if i == 0:
style = "%d"
f.write(style%result)
for (item, name) in zip(data, names):
result = np.genfromtxt(item, delimiter = ",")
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(result, normed = True)
if name == "r_histograma.pdf":
result = np.sort(result)
coeff = expresion.kr/expresion.gamma_r
factorial = gamma(result+1)
p = coeff**result*np.exp(-coeff)/factorial
ax.plot(result, p, "-o")
ax.set_xlabel("Final amount")
ax.set_ylabel("Relative frequency")
fig.savefig(name)
plt.close()
|
999,852 | 7965f5ebccb2b787222d605a434b532547559740 | from .vertex import Vertex
from .player import Player
class Settlement:
def __init__(self, vertex, player):
assert isinstance(vertex, Vertex)
self.vertex = vertex
assert isinstance(player, Player)
self.player = player
self.level = 1
|
999,853 | c576c814afa89f278f4c4e9dd2fe0f4499cf55ff | import json
import zmq
import os
import random
import pickle
import time
import threading
import requests
from collections import deque, defaultdict
import logging
logging.basicConfig(level=logging.WARNING)
from kazoo.client import KazooClient
from zmq.eventloop import ioloop, zmqstream
class Messager:
def __init__(self):
self.loop = ioloop.IOLoop.current()
# load topography from file
self._loadTopology()
self.context = zmq.Context()
self.zk = KazooClient()
self.zk.start(timeout=1000)
# send own address to zookeeper
#self.zk.ensure_path("/addr")
print('aaaaaaaaaaa')
a = self.getOwnName()
b = bytes(self.getOwnAddr(), "UTF-8")
# you should delete the path of addr/1 each running time, use the command of ./zkCli.sh
if not self.zk.exists("/addr/%s" % self.getOwnName()):
self.zk.create("/addr/%s" % self.getOwnName(), bytes(self.getOwnAddr(), "UTF-8"))
# get IP addresses from zookeeper
all_names = {k for k in self.topo.keys() if k.isnumeric() and k != self.getOwnName()}
self.addresses = {}
bak_all_names = []
for name in all_names:
ex_b = self.zk.exists("/addr/%s" % name)
if not ex_b:
continue
cv = threading.Condition()
cv.acquire()
def wakeup_watch(stat):
cv.acquire()
cv.notify()
cv.release()
ex = self.zk.exists(("/addr/%s" % name), wakeup_watch)
if not ex:
#cv.wait()
continue
(addr, _) = self.zk.get("/addr/%s" % name)
self.addresses[name] = addr.decode("UTF-8")
bak_all_names.append(name)
print('All nodes checked in to Zookeeper.')
# create PAIR connections for each network link
self.neighbors = {}
self._allNodes = {}
for name in bak_all_names:
# lower device establishes connection to avoid duplicate
print(name)
socket = self.context.socket(zmq.PAIR)
if int(name) < int(self.getOwnName()):
socket.connect(self.getAddr(name))
else:
socket.bind('tcp://*:%d' % self._findPortFor(name))
self._allNodes[name] = socket
if name in self.topo[self.getOwnName()]:
self.neighbors[name] = socket
self.resetSyncInbox()
self.sync_cv = threading.Condition()
self.streams = {}
def _loadTopology(self):
if 'ON_DEVICE' in os.environ:
try:
r = requests.get('http://162.243.59.63:58982/topo.json')
self.topo = json.loads(r.text)
return
except:
import traceback
traceback.print_exc()
#with open('topo.json') as data_file:
with open('k3.json') as data_file:
self.topo = json.load(data_file)
def reloadTopology(self):
self._loadTopology()
self.neighbors = {k: v for (k,v) in self._allNodes.items() if k in self.topo[self.getOwnName()]}
def _findPortFor(self, name):
a = min(int(self.getOwnName()), int(name))
b = max(int(self.getOwnName()), int(name))
# Cantor pairing function
return 9000 + (a + b) * (a + b + 1) / 2 + a
@staticmethod
def getOwnName():
if not 'DEVICE_ID' in os.environ:
raise RuntimeError('var DEVICE_ID not defined')
return os.environ['DEVICE_ID']
@staticmethod
def getOwnAddr():
if 'ON_DEVICE' not in os.environ:
# oh god why
import socket
return 'tcp://%s' % [(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for
s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]
else:
return 'tcp://localhost'
def getNeighbors(self):
"""
Iterate over names with getNeighbors().keys()
:return: the dict of names to sockets
"""
return self.neighbors
def getRandomNeighbor(self):
"""
:return: the name of a randomly selected neighbor.
"""
return random.choice(list(self.getNeighbors().keys()))
def getOwnLocation(self):
return self.getLocation(self.getOwnName())
def getLocation(self, id):
return tuple(self.topo['geo'][str(id)])
def getTarget(self):
if 'to' in self.topo:
return self.getLocation(self.topo['to'])
return tuple(self.topo['geo']['target'])
def startIsMe(self):
return str(self.topo['from']) == self.getOwnName()
def getAddr(self, name):
"""
:raises RuntimeError if network topology forbids this link
:param name: the name to query
:return: the address of the specified node
"""
if name in self.addresses:
addr = '%s:%d' % (self.addresses[name], self._findPortFor(name))
return addr
else:
raise RuntimeError('No link between me and %s in topology!' % name)
def getSocket(self, name):
"""
:raises RuntimeError if network topology forbids this link
:param name: the name to query
:return: the socket between self and o
ther node
"""
if name in self.neighbors:
return self.neighbors[name]
else:
raise RuntimeError('No link between me and %s in topology!' % name)
def sendMessage(self, name, message):
"""
Sends a message to a node.
:param name: node to send to
:param message: arbitrary python object to be sent
"""
time.sleep(int(self.getOwnName()) * 0.05)
self.getSocket(name).send_pyobj(message)
def waitForMessageFromAllNeighbors(self, sync):
"""
Blocks until a message has been received from every neighbor
:param sync: value of sync field (iteration number, say)
:return:
"""
self.flush() # if we don't flush we might somehow block before actually sending messages!
self.sync_cv.acquire()
while True:
nameset = {message['from'] for message in self.sync[sync]}
if len(nameset) >= len(self.neighbors):
break
self.sync_cv.wait()
self.sync_cv.release()
def registerCallbackSync(self):
"""
Registers a callback for synchronous algorithms, which will
queue up a message based on its "sync" field.
Put the iteration number in the field, for example.
"""
def callbacksync(message, name):
self.flush()
message['from'] = name
# print('callbacksync blocking...')
self.sync_cv.acquire()
self.sync[message['sync']].append(message)
self.sync_cv.notifyAll()
self.sync_cv.release()
# print('callbacksync unblocking')
for name in self.neighbors:
if name is not self.getOwnName():
self.registerCallbackIndividual(callbacksync, name)
def resetSyncInbox(self):
self.sync = defaultdict(deque)
def registerCallbackIndividual(self, callbackFunction, name):
"""
Register an async callback on a specific neighbor. Use registerCallback() to register on all neighbors.
:param callbackFunction: function taking two parameters, message and name.
:param name: neighbor we're registering on
"""
socket = self.getSocket(name)
stream = zmqstream.ZMQStream(socket, self.loop)
def decorator(data):
message = pickle.loads(b''.join(data))
callbackFunction(message, name)
stream.on_recv(decorator, copy=True)
self.streams[name] = stream
def registerCallback(self, callbackFunction):
"""
Register an async callback on every neighbor.
:param callbackFunction: function taking two parameters, message (arbitrary python object) and
name (name of node who sent this message)
"""
for name in self.neighbors:
print(name)
if name is not self.getOwnName():
self.registerCallbackIndividual(callbackFunction, name)
def start(self):
"""
Starts the event loop in a background thread. Call this once, after having set the callback.
"""
# ioloop.install()
threading.Thread(target=self.loop.start).start()
time.sleep(1)
def stop(self):
self.loop.stop()
def flush(self):
for stream in self.streams.values():
stream.flush()
|
999,854 | 501e61dbe23128cb91315cb6044927788e534af6 | import os, time, datetime
import numpy as np
from sklearn.neural_network import MLPClassifier as MLP
from sklearn.model_selection import train_test_split as tts
from sklearn.metrics import confusion_matrix as CM
from sklearn.metrics import accuracy_score as AS
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
import tensorflow as tf
import tensorflow_probability as tfp
from scipy.stats import mode
import src.bnn.hmc as hmc
import src.bnn.bnn as bnn
def get_data():
mat = np.loadtxt('mat.csv', delimiter=',', dtype='float32')
test_indices = range(2, len(mat), 5)
train_indices = np.setdiff1d(range(len(mat)), test_indices)
test = np.array([mat[i] for i in test_indices])
train = np.array([mat[i] for i in train_indices])
X_train = train[:, :-1]
X_test = test[:, :-1]
Y_train = train[:, -1]
Y_test = test[:, -1]
return X_train, X_test, Y_train, Y_test
def accuracy(Y_true, Y_pred):
cm_mean, cm_std = confusion(Y_true, Y_pred)
return np.trace(cm_mean) / np.sum(cm_mean), np.trace(cm_std) / np.sum(cm_mean)
def confusion(Y_true, Y_pred):
cms = []
for Y in Y_pred:
cms.append(CM(Y_true, Y))
cms = np.array(cms)
cm_mean = np.mean(cms, axis=0)
cm_std = np.std(cms, axis=0)
return cm_mean, cm_std
def output(test_num, cm_mean, cm_std, acc_mean, acc_std):
with open("results" + str(test_num) + ".txt", "w") as file:
file.write(np.array2string(cm_mean) + "\n")
file.write(np.array2string(cm_std) + "\n")
file.write(str(acc_mean) + "\n")
file.write(str(acc_std) + "\n")
# One-hot NN
def OHNN(architecture, X_train, X_test, Y_train, Y_test):
clf = MLP(hidden_layer_sizes=architecture)
clf.fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
cm_mean = CM(Y_test, Y_pred)
cm_std = np.zeros_like(cm_mean)
acc_mean = AS(Y_test, Y_pred)
acc_std = 0
output(1, cm_mean, cm_std, acc_mean, acc_std)
# One vs Rest NN
def OVRNN(architecture, X_train, X_test, Y_train, Y_test):
clf = OneVsRestClassifier(MLP(hidden_layer_sizes=architecture))
clf.fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
cm_mean = CM(Y_test, Y_pred)
cm_std = np.zeros_like(cm_mean)
acc_mean = AS(Y_test, Y_pred)
acc_std = 0
output(2, cm_mean, cm_std, acc_mean, acc_std)
# One vs One NN
def OVONN(architecture, X_train, X_test, Y_train, Y_test):
clf = OneVsOneClassifier(MLP(hidden_layer_sizes=architecture))
clf.fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
cm_mean = CM(Y_test, Y_pred)
cm_std = np.zeros_like(cm_mean)
acc_mean = AS(Y_test, Y_pred)
acc_std = 0
output(3, cm_mean, cm_std, acc_mean, acc_std)
# One-hot BNN
def OHBNN(architecture, X_train, X_test, Y_train, Y_test):
start = time.time()
prior = tfp.distributions.Normal(0, 1.0)
architecture = [len(X_train[0])] + list(architecture) + [int(np.max(Y_train)) + 1]
init = bnn.get_random_initial_state(prior, prior, architecture, overdisp=1.0)
Y_pred, trace, k, s = hmc.hmc_predict(prior, prior, init, X_train, Y_train, X_test)
log_prob = trace[0].inner_results.accepted_results.target_log_prob.numpy()
Y_pred = tf.math.argmax(Y_pred, axis=2)
cm_mean, cm_std = confusion(Y_test, Y_pred)
acc_mean, acc_std = accuracy(Y_test, Y_pred)
end = time.time()
print(str(datetime.timedelta(seconds=int(end-start))))
output(4, cm_mean, cm_std, acc_mean, acc_std)
# One vs Rest BNN
def OVRBNN(architecture, X_train, X_test, Y_train, Y_test):
prior = tfp.distributions.Normal(0, 1.0)
architecture = [len(X_train[0])] + list(architecture) + [2]
Y_pred = 0
for i in range(int(np.max(Y_train)) + 1):
Y_train_temp = np.copy(Y_train)
Y_train_temp[Y_train_temp == i] = 1
Y_train_temp[Y_train_temp != i] = 0
init = bnn.get_random_initial_state(prior, prior, architecture, overdisp=1.0)
Y_pred_temp, trace, k, s = hmc.hmc_predict(prior, prior, init, X_train, Y_train_temp, X_test)
Y_pred_temp = np.array(tf.math.argmax(Y_pred_temp, axis=2))
Y_pred_temp[Y_pred_temp == 1] = i
if (i == 0):
Y_pred = Y_pred_temp
else:
Y_pred[np.nonzero(Y_pred_temp)] = Y_pred_temp[np.nonzero(Y_pred_temp)]
print(Y_pred)
cm_mean, cm_std = confusion(Y_test, Y_pred)
acc_mean, acc_std = accuracy(Y_test, Y_pred)
output(5, cm_mean, cm_std, acc_mean, acc_std)
# One vs One BNN
def OVOBNN(architecture, X_train, X_test, Y_train, Y_test):
prior = tfp.distributions.Normal(0, 1.0)
architecture = [len(X_train[0])] + list(architecture) + [2]
Y_preds = []
for i in range(int(np.max(Y_train)) + 1):
for j in range(i+1, int(np.max(Y_train)) + 1):
traini = np.where(Y_train == i)
trainj = np.where(Y_train == j)
X_train_temp = np.concatenate((X_train[traini], X_train[trainj]))
Y_train_temp = np.concatenate((Y_train[traini], Y_train[trainj]))
Y_train_temp[Y_train_temp == i] = 0
Y_train_temp[Y_train_temp == j] = 1
init = bnn.get_random_initial_state(prior, prior, architecture, overdisp=1.0)
Y_pred_temp, trace, k, s = hmc.hmc_predict(prior, prior, init, X_train_temp, Y_train_temp, X_test)
Y_pred_temp = np.array(tf.math.argmax(Y_pred_temp, axis=2))
Y_pred_temp[Y_pred_temp == 0] = i
Y_pred_temp[Y_pred_temp == 1] = j
Y_preds.append(Y_pred_temp)
Y_pred = mode(np.array(Y_preds), axis=0)[0][0]
cm_mean, cm_std = confusion(Y_test, Y_pred)
acc_mean, acc_std = accuracy(Y_test, Y_pred)
output(6, cm_mean, cm_std, acc_mean, acc_std)
architecture = (300, 100)
X_train, X_test, Y_train, Y_test = get_data()
OHNN(architecture, X_train, X_test, Y_train, Y_test)
OVRNN(architecture, X_train, X_test, Y_train, Y_test)
OVONN(architecture, X_train, X_test, Y_train, Y_test)
OHBNN(architecture, X_train, X_test, Y_train, Y_test)
OVRBNN(architecture, X_train, X_test, Y_train, Y_test)
OVOBNN(architecture, X_train, X_test, Y_train, Y_test)
|
999,855 | 93fb63a7e68e3e7451e65ed19c3b5ba4add23003 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0008_personajuridica_logo'),
('config', '0006_auto_20160307_1514'),
]
operations = [
migrations.CreateModel(
name='AlmacenesCampos',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('codigo_usar', models.BooleanField()),
('codigo_requerido', models.BooleanField()),
('codigo_itemtipo', models.CharField(max_length=100)),
('codigo_itemcaractr', models.IntegerField()),
('codigo_fabr_usar', models.BooleanField()),
('codigo_fabr_reque', models.BooleanField()),
('codigo_fabricatipo', models.CharField(max_length=100)),
('codigo_fabricacaractr', models.IntegerField()),
('descrip_usar', models.BooleanField()),
('descrip_requerido', models.BooleanField()),
('descrip_tipo', models.CharField(max_length=100)),
('descrip_caractr', models.IntegerField()),
('caract_espec_usar', models.BooleanField()),
('caract_espec_requerid', models.BooleanField()),
('caract_espectipo', models.CharField(max_length=100)),
('caract_especaractr', models.IntegerField()),
('unidad_medid_usar', models.BooleanField()),
('unidad_medid_requerido', models.BooleanField()),
('unidad_medidatipo', models.CharField(max_length=100)),
('unidad_medidacaractr', models.IntegerField()),
('imagen_usar', models.BooleanField()),
('imagen_requer', models.BooleanField()),
('empresa', models.ForeignKey(blank=True, to='users.Personajuridica', null=True)),
],
),
]
|
999,856 | 6a76e20b4e296874b627719cfedc03311937bfba | class Solution:
def countTexts(self, p: str) -> int:
MOD = int(1e9+7)
if not p:
return 0
n = len(p)
f = [0] * (n+50)
f[0],f[1],f[2] = 1,1,2
for i in range(3,n+1):
f[i] = (f[i-1]+f[i-2]+f[i-3]) % MOD
g = [0] * (n+50)
g[0],g[1],g[2],g[3] = 1,1,2,4
for i in range(4,n+1):
g[i] = (g[i-1]+g[i-2]+g[i-3]+g[i-4]) % MOD
ans = 1
l,r = 0,0
while l < n and r < n:
while r < n and p[l] == p[r]:
r += 1
if p[l] == '7' or p[l] == '9':
ans = (ans * g[r-l]) % MOD
else:
ans = (ans * f[r-l]) % MOD
l = r
return ans % MOD |
999,857 | 608b7cef728a512beb8ca7b18e30555ea70f3b25 | #! /usr/bin/env python
# coding=utf-8
#================================================================
# Copyright (C) 2018 * Ltd. All rights reserved.
#
# Editor : VIM
# File name : train.py
# Author : YunYang1994
# Created date: 2018-11-30 15:47:45
# Description :
#
#================================================================
import tensorflow as tf
from core import utils, yolov3
INPUT_SIZE = 416
BATCH_SIZE = 16
EPOCHS = 5000000
LR = 0.0001
SHUFFLE_SIZE = 10000
sess = tf.Session()
classes = utils.read_coco_names('./data/coco.names')
num_classes = len(classes)
# file_pattern = "../COCO/tfrecords/coco*.tfrecords"
file_pattern = "./data/train_data/quick_train_data/tfrecords/quick_train_data*.tfrecords"
anchors = utils.get_anchors('./data/yolo_anchors.txt')
is_training = tf.placeholder(dtype=tf.bool, name="phase_train")
dataset = tf.data.TFRecordDataset(filenames = tf.gfile.Glob(file_pattern))
dataset = dataset.map(utils.parser(anchors, num_classes).parser_example, num_parallel_calls = 10)
dataset = dataset.repeat().shuffle(SHUFFLE_SIZE).batch(BATCH_SIZE).prefetch(BATCH_SIZE)
iterator = dataset.make_one_shot_iterator()
example = iterator.get_next()
images, *y_true = example
model = yolov3.yolov3(num_classes)
with tf.variable_scope('yolov3'):
y_pred = model.forward(images, is_training=is_training)
loss = model.compute_loss(y_pred, y_true)
y_pred = model.predict(y_pred)
optimizer = tf.train.MomentumOptimizer(LR, momentum=0.9)
train_op = optimizer.minimize(loss[0])
saver = tf.train.Saver(max_to_keep=2)
rec_tensor = tf.Variable(0.)
prec_tensor = tf.Variable(0.)
mAP_tensor = tf.Variable(0.)
tf.summary.scalar("yolov3/recall", rec_tensor)
tf.summary.scalar("yolov3/precision", prec_tensor)
tf.summary.scalar("yolov3/mAP", mAP_tensor)
tf.summary.scalar("yolov3/total_loss", loss[0])
tf.summary.scalar("loss/coord_loss", loss[1])
tf.summary.scalar("loss/sizes_loss", loss[2])
tf.summary.scalar("loss/confs_loss", loss[3])
tf.summary.scalar("loss/class_loss", loss[4])
write_op = tf.summary.merge_all()
writer_train = tf.summary.FileWriter("./data/log/train", graph=sess.graph)
sess.run(tf.global_variables_initializer())
for epoch in range(EPOCHS):
run_items = sess.run([train_op, y_pred, y_true] + loss, feed_dict={is_training:True})
rec, prec, mAP = utils.evaluate(run_items[1], run_items[2], num_classes)
_, _, _, summary = sess.run([tf.assign(rec_tensor, rec),
tf.assign(prec_tensor, prec),
tf.assign(mAP_tensor, mAP), write_op], feed_dict={is_training:True})
writer_train.add_summary(summary, global_step=epoch)
writer_train.flush() # Flushes the event file to disk
if epoch%1000 == 0: saver.save(sess, save_path="./checkpoint/yolov3.ckpt", global_step=epoch)
print("=> EPOCH:%10d\ttotal_loss:%7.4f\tloss_coord:%7.4f\tloss_sizes:%7.4f\tloss_confs:%7.4f\tloss_class:%7.4f"
"\trec:%.2f\tprec:%.2f\tmAP:%.2f"
%(epoch, run_items[3], run_items[4], run_items[5], run_items[6], run_items[7], rec, prec, mAP))
|
999,858 | ac0d587c2c49e198d5eb7e0df0f488ac05ba7b7b | def testes:
pass
|
999,859 | 15d2b90fb843442863b438d5cdc1418763592d27 | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 19 21:33:53 2020
FUNCTION APPLICATIONS
@author: David
"""
def introduction(firstName, lastName):
print("Hello, my name is", firstName, lastName)
introduction("Luke", "Skywalker")
introduction("Jesse", "Quick")
introduction("Clark", "Kent")
print("***************************")
introduction("Skywalker", "Luke")
introduction("Quick", "Jesse")
introduction("Kent", "Clark") |
999,860 | 8534fbada7c190705272615f7e0b390bae651815 | __author__ = 'yinjun'
class Solution:
"""
@param A : a list of integers
@param target : an integer to be searched
@return : a list of length 2, [index1, index2]
"""
def searchRange(self, A, target):
# write your code here
length = len(A)
start = 0
end = length - 1
if end < 0:
return [-1,-1]
while start + 1 < end:
mid = start + (end - start) /2
if A[mid] < target:
start = mid
else:
end = mid
if A[start]!=target and A[end]!=target:
return[-1, -1]
if A[start] == target and A[end] == target:
pos1 = min(start, end)
elif A[end] == target:
pos1 = end
elif A[start] == target:
pos1 = start
start = 0
end = length - 1
while start + 1 < end:
mid = start + (end - start) /2
if A[mid] > target:
end = mid
else:
start = mid
if A[start] == target and A[end] == target:
pos2 = max(start, end)
elif A[end] == target:
pos2 = end
elif A[start] == target:
pos2 = start
return [pos1, pos2]
s = Solution()
print s.searchRange([5, 7, 7, 8, 8, 10], 8) |
999,861 | b1dc2f454a8c2f1a1902741efde068a7bb108ff5 | import pandas as pd
import numpy as np
class MSDDataset:
def __init__(
self,
path='data/YearPredictionMSD.txt',
normalize=True,
binarize=True,
train_size=463715,
shuffle=True
) -> None:
self.path = path
self.normalize = normalize
self.binarize = binarize
self.train_size = train_size
self.shuffle = shuffle
def load(self):
data = pd.read_csv(self.path, header=None, usecols=range(13))
y = data[0].to_numpy()
if self.binarize:
y = (y < 2001).astype(int)
data.drop(0, axis=1, inplace=True)
X = data.to_numpy()
self.X_train = X[:self.train_size]
self.y_train = y[:self.train_size]
self.X_test = X[self.train_size:]
self.y_test = y[self.train_size:]
if self.normalize:
self.X_train = self._normalize(self.X_train)
self.X_test = self._normalize(self.X_test)
self.train_idxs = list(range(self.X_train.shape[0]))
if self.shuffle:
np.random.shuffle(self.train_idxs)
self.dim = X.shape[1]
def get_train_data(self, idxs):
return np.take(self.X_train, idxs, axis=0), np.take(self.y_train, idxs)
def get_test_data(self):
return self.X_test, self.y_test
def _normalize(self, x):
mean_x = np.mean(x, axis=0)
std_x = np.std(x, axis=0)
return (x - mean_x) / std_x |
999,862 | e713a6de196f9b576d7874fda15918a397fae431 | import pika
import uuid
import json
from rabbit import Rabbit
class RpcClient(object):
def __init__(self, routing_key, host="localhost"):
# routing_key - 'rpc_queue'
self.routing_key = routing_key
self.rabbit = Rabbit(host)
# 队列名,随机
self.callback_queue_name = self.rabbit.declare_queue(exclusive=True)
self.rabbit.register_consumer(queue_name=self.callback_queue_name, callback=self.on_response)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.rabbit.close()
def on_response(self, ch, method, props, body):
"""
收到消息就调用
:param ch: 管道内存对象地址
:param method: 消息发给哪个queue
:param props:
:param body: 数据对象
:return:
"""
# 判断本机生成的ID与生产端发过来的ID是否相等
if self.corr_id == props.correlation_id:
# 将body值 赋值给self.response
self.response = body
def call(self, msg):
"""
发送dict
:param msg:
:type msg: dict
:return: dict
"""
# 随机的字符串
self.response = None
self.corr_id = str(uuid.uuid4())
# routing_key='rpc_queue' 发一个消息到rpc_queue内
self.rabbit.channel.basic_publish(exchange='',
routing_key=self.routing_key,
properties=pika.BasicProperties(
# 执行命令之后消费端将结果返回给self.callaback_queue这个队列中
reply_to=self.callback_queue_name,
# 生成UUID 发送给消费端
correlation_id=self.corr_id,
),
# 发的消息,必须传入字符串,不能传数字
body=json.dumps(msg))
# 监听数据
while self.response is None:
# 没有消息不阻塞
self.rabbit.conn.process_data_events()
return json.loads(str(self.response, encoding="utf-8"))
|
999,863 | 819d9d79aa5f7a72ed7f000bcb5685e5e59e178d | import os
import random
import requests
from bs4 import BeautifulSoup
from flask import Flask,jsonify,request
app= Flask(__name__)
@app.route('/')
def hello():
return '여기는 챗봇 페이지 입니다.'
@app.route('/keyboard')
def keyboard():
keyboard ={
"type" : "buttons",
"buttons" : ["메뉴", "로또", "고양이","영화"]}
return jsonify(keyboard)
@app.route('/message', methods=['POST']) # post 방식으로 들어온다. message 를타고
def message():
user_msg=request.json['content'] # content를 넣고
img_bool=False
if user_msg =="메뉴":
menu = ["시골길","20층","김가네","소풍김밥"]
return_msg = random.choice(menu)
elif user_msg =="로또":
lotto=list(range(1,46))
pick=random.sample(lotto,6)
return_msg= str(sorted(pick))
elif user_msg =="고양이":
img_bool=True
url ="https://api.thecatapi.com/v1/images/search?mime_types=jpg"
req=requests.get(url).json()
return_img=req[0]['url']
return_msg="예비 집사님"
elif user_msg == "영화":
img_bool=True
url = "https://movie.naver.com/movie/running/current.nhn"
req = requests.get(url).text
doc=BeautifulSoup(req , 'html.parser')#bs4 로 정제 파이썬이 해석을 할 수 있게끔 만든다.
title_tag=doc.select('dt.tit > a')
star_tag=doc.select('#content > div.article > div > div.lst_wrap > ul > li > dl > dd.star > dl.info_star > dd > div > a > span.num')
resever_tag=doc.select('#content > div.article > div > div.lst_wrap > ul > li > dl > dd.star > dl.info_exp > dd > div > span.num')
img_tag =doc.select('div.thumb > a > img')
#('div.star_t1>a>span.num')
#('div.star_t1.b_star>span.num') class 사이에 띄어쓰기가 있으면 . 으로 연결한다. 다른 클래스이다.
# star_list=[]
# resever_list=[]
# title_list=[]
movie_dict={}
# img_list=[]
# for i in img_tag:
# img_list.append(i['src'])
# print(img_list)
for i in range(0,10):
movie_dict[i]={
"title" : title_tag[i].text,
"star" :star_tag[i].text,
"resever":resever_tag[i].text,
"image" : img_tag[i].get('src')
# "image" : img_tag[i]['src']
}
pick_movie = movie_dict[random.randrange(0,10)]
return_msg="제목 :{}, 별점 {}, 예매율"
return_msg="%s/평점:%s/예매율:%s" % (pick_movie['title'],pick_movie['star'],pick_movie['resever'])
return_img=pick_movie['image']
else :
return_msg = "메뉴만 사용가능!"
if img_bool==False:
return_json={
"message" : {
"text": return_msg
},
"keyboard" : {
"type" : "buttons",
"buttons" : ["메뉴", "로또", "고양이","영화"]
}
}
else:
return_json={
"message" : {
"text": return_msg,
"photo" : {
"url":return_img ,
"height": 630,
"width": 720
}
},
"keyboard" : {
"type" : "buttons",
"buttons" : ["메뉴", "로또", "고양이","영화"]
}
}
return jsonify(return_json)
app.run(host=os.getenv('IP','0.0.0.0'),port=int(os.getenv('PORT',8080))) |
999,864 | 23cc08c8f5fbc8ae6e9bd25b3b6ad31ed485500e | from django.conf.urls import include, url, patterns
from vivsite import settings
from vivs.views.Project import *
project_url = [
url(r'^$', DashboardView.as_view(), name ='dashboard'),
url(r'^about/$', AboutView.as_view(), name='about'),
url(r'^works/$', WorksView.as_view(), name='works'),
url(r'^contact/$', ContactView.as_view(), name='contact'),
url(r'^project/$', ProjectDetailView, name='project_detail'),
url(r'^task/(?P<pk>\d+)/$', TaskToProjectFilterView.as_view(), name='task_filter_view'),
url(r'^mentions-legales/$', LegalView.as_view(), name='mentions_legales')
]
urlpatterns = patterns('',
url(r'^index/', include(project_url, namespace="project")),
)
|
999,865 | e69bbc017f2f031aee93ce116d17a09f55be7934 | import math as m
def queq(a, b=0, c=0):
if b == 0:
try:
return m.sqrt(-c/a), m.sqrt(-c/a)
except (ZeroDivisionError, ValueError):
return 'No roots'
elif c == 0:
try:
return -b/a
except (ZeroDivisionError, ValueError):
return 'No roots'
else:
D = m.pow(b, 2)-4*a*c
if D < 0:
return 'No roots'
elif D < 0:
return float('{:.2f}'.format(-b/(2*a)))
else:
X1 = float('{:.2f}'.format((-b-m.sqrt(D))/(2*a)))
X2 = float('{:.2f}'.format((-b+m.sqrt(D))/(2*a)))
return X1, X2
def piftri(a=0, b=0, c=0):
if a == 0:
return m.sqrt(m.pow(c, 2)-m.pow(b, 2))
elif b == 0:
return m.sqrt(m.pow(c, 2)-m.pow(a, 2))
elif c == 0:
return m.sqrt(m.pow(a, 2)+m.pow(b, 2))
else:
if m.pow(c, 2) == m.pow(a, 2)+m.pow(b, 2):
return True
else:
return False
def arifmp(d, n, astart=0, afinish=0):
if astart == 0:
return -afinish+d*(n-1)
elif afinish == 0:
return astart+d*(n-1)
else:
return('Non-correct data')
def arifmpsum(n, astart, afinish):
return ((astart+afinish)/2)*n
def geomp(q, n, bstart=0, bfinish=0):
if bstart == 0:
return bfinish/m.pow(q, n-1)
elif bfinish == 0:
return bstart/m.pow(q, n-1)
else:
return('Non-correct data')
def geompsumasc(q, n, bstart=0):
return (bstart*(1-m.pow(q, n)))/(1-q)
def geompsumdesc(q, bstart=0):
if abs(q) < 1:
return bstart/(1-q)
|
999,866 | 83aded3f2a6dabbbdcc07b21de1d3c8c592f0d32 | class Instructor:
def __init__(self):
self.__instructor_name = None
self.__technology_skill = None
self.__experience = None
self.__avg_feedback = None
def set_instructor_name(self, instructor_name):
self.__instructor_name = instructor_name
def get_instructor_name(self):
return self.__instructor_name
def set_technology_skill(self, technology_skill):
self.__technology_skill = technology_skill
def get_technology_skill(self):
return self.__technology_skill
def set_experience(self, experience):
self.__experience = experience
def get_experience(self):
return self.__experience
def set_avg_feedback(self, avg_feedback):
self.__avg_feedback = avg_feedback
def get_avg_feedback(self):
return self.__avg_feedback
def check_eligibility(self):
if self.__experience > 3 and self.__avg_feedback >= 4.5:
return True
elif self.__experience <= 3 and self.__avg_feedback >= 4:
return True
else:
return False
def allocate_course(self, technology):
if((technology == self.__technology_skill) or (technology == "c++")):
return True
else:
return False
|
999,867 | fb945f613cf4eed20b36f895247c2e1e80f0a9dc | from collections import deque
from sys import maxsize
class Node:
def __init__(self, x=0, y=0, distance=0, parent=None, move=None):
if parent:
self.distance = parent.distance + 1
if move is 'up':
self.x = parent.x
self.y = parent.y - 1
elif move is 'down':
self.x = parent.x
self.y = parent.y + 1
elif move is 'left':
self.x = parent.x - 1
self.y = parent.y
elif move is 'right':
self.x = parent.x + 1
self.y = parent.y
else:
self.x, self.y = x, y
else:
self.x, self.y, self.distance = x, y, distance
self.parent = parent
class BFS:
@staticmethod
def get_path(node: Node):
path = []
n = node
while n is not None:
path.append(n)
n = n.parent
path.reverse()
return path
@staticmethod
def find_path_breadth_first(maze: [], start_pos=(0, 0), end_value=9):
q_open = deque([])
visited = [[False] * len(maze[0]) for _ in range(len(maze))]
min_distance = maxsize
curr_node = Node(start_pos[0], start_pos[1])
q_open.append(curr_node)
visited[curr_node.y][curr_node.x] = True
while len(q_open) > 0:
curr_node = q_open.popleft()
if maze[curr_node.y][curr_node.x] == end_value:
min_distance = curr_node.distance
break
is_valid_move = lambda n: 0 <= n.x < len(maze[0]) and 0 <= n.y < len(maze) and maze[n.y][n.x] and not \
visited[n.y][n.x]
for node in [Node(parent=curr_node, move='up'),
Node(parent=curr_node, move='down'),
Node(parent=curr_node, move='left'),
Node(parent=curr_node, move='right')]:
if is_valid_move(node):
q_open.append(node)
visited[node.y][node.x] = True
if curr_node and min_distance != maxsize:
print(str([(node.x, node.y) for node in BFS.get_path(curr_node)]).strip('[]'))
return min_distance
|
999,868 | a9eaf03c592b6ce0e1cf8f165a7b7fa91d69bb6c | from collections import deque
KEY = "3"
TREASURE = "4"
def find_dest(M, start, dest):
q = deque()
q.append(start)
dirs = [(1, 0), (-1, 0), (0, 1), (0, -1)]
steps = 0
visited = set()
visited.add(start)
while q:
for i in range(len(q)):
node = q.popleft()
x, y = node[0], node[1]
if M[x][y] == dest:
return {'steps': steps, "key": (x, y)}
for dir in dirs:
newX, newY = x+dir[0], y+dir[1]
# check bounds:
if newX >= 0 and newX <= len(M)-1 and newY >= 0 and newY <= len(M[0])-1:
# check cell:
if M[newX][newY] != '2':
# check if visited:
if (newX, newY) not in visited:
q.append((newX, newY))
visited.add((newX, newY))
steps += 1
return {'steps': steps, "key": (x, y)}
m = [["0", "1", "3"], ["1", "1", "4"], ["1", "3", "4"]]
def left(M, dest):
for row in M:
if dest in row:
return True
return False
r = []
r2 = {}
min_sum = 10**10
while left(m, KEY):
s = (0, 0)
info = find_dest(m, s, KEY)
steps, key = info["steps"], info["key"]
m[key[0]][key[1]] = '1'
r.append(steps)
m_copy = [row.copy() for row in m]
r2[key] = []
while left(m_copy, TREASURE):
s = key
info2 = find_dest(m, s, TREASURE)
steps2 += info['steps']
key2 = info['key']
m[key2[0]][key2[1]] = '1'
r2['distance'].append(steps)
if sum(r2[key]) < min_sum:
min_sum = sum(r2[key])
print(max(r))
|
999,869 | 6f38920ac3ee8e2b9a4188e32a5127ede74d1f59 | # -*- coding: utf-8 -*-
import sys
import time
import random
start_time = time.time()
def bubbleSort(array):
size = len(array)
for i in range(size-1):
for j in range(0, size-i-1):
if array[j] > array[j+1]:
array[j], array[j+1] = array[j+1], array[j]
return array
with open('C://Users//beyza//Desktop//numbers.txt', 'r') as infile:
data = infile.readlines()
for i in data:
lines=i.split(',')
print(lines)
def matrix(file):
contents = open(file).read()
return [list(map(int, line.split(','))) for line in contents.split('\n')]
A=matrix('C://Users//beyza//Desktop//numbers.txt')
for i in A:
print ("Before sorting:",i)
bubbleSort(i)
print("After sorting:", i)
infile.close()
print("--- %s seconds ---" % (time.time() - start_time))
|
999,870 | 0fad64e46cdc5bdb34ead1634cdb653f5206d095 | import sys
sys.path.append(".")
import my_module
my_module.test(execution,x,a,b,c,d,e) |
999,871 | ae3ded9e29d7b0125c45f0ad5173c78f66c8d2ea | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : show_all_jobs.py
@Time : 2021/09/03 11:17:09
@Author : Jeffrey Wang
@Version : 1.0
@Contact : shwangjj@163.com
@Desc : 回显当前运行全部任务示例
输出:
[Every 10 minutes do job() (last run: [never], next run: 2021-09-03 11:29:03),
Every 2 seconds do job() (last run: [never], next run: 2021-09-03 11:19:05)]
[Every 10 minutes do job() (last run: [never], next run: 2021-09-03 11:29:03),
Every 2 seconds do job() (last run: [never], next run: 2021-09-03 11:19:05)]
job()函数调用 ... 2021-09-03 11:19:05.186024
[Every 10 minutes do job() (last run: [never], next run: 2021-09-03 11:29:03),
Every 2 seconds do job() (last run: 2021-09-03 11:19:05, next run: 2021-09-
11:19:07)]
[Every 10 minutes do job() (last run: [never], next run: 2021-09-03 11:29:03),
Every 2 seconds do job() (last run: 2021-09-03 11:19:05, next run: 2021-09-03
11:19:07)]
job()函数调用 ... 2021-09-03 11:19:07.194646
'''
import datetime
import schedule
import time
def job():
print("job()函数调用 ... {0} ".format(datetime.datetime.now()))
# 安排两个调度
schedule.every(10).minutes.do(job)
schedule.every(2).seconds.do(job)
# 发布后的周期任务需要用run_pending 函数来检测是否执行
# 因此需要一个While 循环不断地轮询这个函数
while True:
schedule.run_pending()
time.sleep(1)
all_jobs = schedule.get_jobs()
print(all_jobs)
|
999,872 | c2175414ec9526f3de9a3caae8917d00ab0875a6 | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField, IntegerField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo
from app.models import Therapist, Patient, Intervention, Session
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Remember Me')
submit = SubmitField('Sign In')
class RegistrationForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
fname = StringField('First Name', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
password2 = PasswordField(
'Repeat Password', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Register')
def validate_username(self, username):
therapist = Therapist.query.filter_by(username=username.data).first()
if therapist is not None:
raise ValidationError('Please use a different username.')
def validate_fname(self, fname):
therapist = Therapist.query.filter_by(fname=fname.data).first()
if therapist is not None:
raise ValidationError('Please use a different First Name')
class PatientRegistrationForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
fname = StringField('First Name', validators=[DataRequired()])
lname = StringField('Last Name', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
password2 = PasswordField(
'Repeat Password', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Register')
def validate_username(self, username):
patient = Patient.query.filter_by(username=username.data).first()
if patient is not None:
raise ValidationError('Please use a different username.')
class FollowForm(FlaskForm):
username=StringField('Username:',validators=[DataRequired()])
name=StringField('Intervention Session:',validators=[DataRequired()])
submit=SubmitField('Assign')
class CreateSessionForm(FlaskForm):
therapist_id = IntegerField('Please enter your Therapist ID:', validators=[DataRequired()])
intervention_id = IntegerField('Please enter ID of the Intervention you would like to assign a session to:', validators=[DataRequired()])
patient_id = IntegerField('Please enter ID of desired Patient:', validators=[DataRequired()])
gametype = StringField('Desired Game Type:', validators=[DataRequired()])
date_started = StringField('Please enter Current Date:', validators=[DataRequired()])
session_number = StringField('Is this the First, Second or Third session assigned to this intervention?', validators=[DataRequired()])
submit = SubmitField('Create Session')
def validate_therapistid(self, therapist_id):
therapist = Therapist.query.filter_by(id=therapist_id.data).first()
if therapist is None:
raise ValidationError('Please use a different username.')
|
999,873 | 800efa47342b09f166476aaa626667472e70481e | #!/usr/bin/env python2
'''
Find the target from contour detection and the contours nesting.
Usage:
./polarity_find.py <image>
'''
import cv2
import cv
import numpy as np
import random
import argparse
from ptgrey import PTGreyCamera
import flycapture2 as fc2
from time import time
def show_img(img, wait=True, title='bah'):
while max(img.shape[:2]) > 500:
img = cv2.pyrDown(img)
#ratio = 500./max(img.shape)
#cv2.resize(img,
cv2.imshow(title, img)
if cv2.waitKey(1)&0xff == ord('q'):
raise KeyboardInterrupt()
if wait:
while True:
if cv2.waitKey(0)&0xff==ord('q'):
break
cv2.destroyAllWindows()
class RANSACFind:
def __init__(self, center_black, number_of_rings, debug=True):
self._center_black = center_black
self._number_of_rings = number_of_rings
self._debug = debug
self._canny_threshold1 = 200
self._canny_threshold2 = 20
self._max_iterations = 1000
#self._max_proc_time = 2 # seconds
def find_target(self, image):
'''
Return the center of the target in pixel coordinates as tuple (x, y).
'''
# resize input image, assuming target takes at least 1/2 of the frame.
self.resize = 400
orig_shape = image.shape[:2]
while min(image.shape[:2]) > self.resize:
image = cv2.pyrDown(image)
ratio = 1.*image.shape[0]/orig_shape[0]
center, radius = self.find_circles(image)
if center is None:
return (None, None)
return (center[0]/ratio, center[1]/ratio), radius/ratio
def find_circles(self, image):
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# blur kernel about 20 for 1000x1000px, 10 for 500x500, 5 for 250x250
blurk = max(image.shape)/50
if ~blurk&1:
blurk += 1
image = cv2.medianBlur(image, blurk)
can = cv2.Canny(image, self._canny_threshold1, self._canny_threshold2)
if self._debug:
print can.shape, 'points:', can.sum()/255
if self._debug:
show_img(can, wait=False, title='canny')
circles = [] # (center, radius, percentage)
MAX_CIRCLES = 10
# get distance matrix (distance to nearest 1)
distance = cv2.distanceTransform(255-can, cv.CV_DIST_L2, 3)
points = [_[::-1] for _ in np.transpose(can.nonzero())]
if len(points) == 0:
return None, None
MIN_RADIUS = 20
MAX_RADIUS = max(can.shape)
for i in xrange(self._max_iterations):
# get 3 points for circle
p = [random.randrange(len(points)) for _ in range(3)]
# make sure we get different points
if len(set(p)) != 3:
continue
rand_points = [points[_] for _ in p]
# make a circle from the points
center, radius = self.get_circle(rand_points)
# if radius is None, there was an error (points on one line)
if radius is None:
continue
# throw too big / too small circles
if radius < MIN_RADIUS or radius > MAX_RADIUS:
continue
#centers[j] = center
#j+=1
#continue
percent = self.verify_circle(distance, center, radius)
if len(circles) == 0 or percent > circles[0][2]:
circles.append((center, radius, percent))
circles.sort(key=lambda _: _[2])
if len(circles) > MAX_CIRCLES:
circles = circles[1:]
if circles[-1][2] > 0.9:
print 'Break!! we found a circle > 0.9! i=', i
break
#centers_x = [_[0] for _ in centers[:j]]
#centers_y = [_[1] for _ in centers[:j]]
#x = (np.median(centers_x), np.median(centers_y)), 10
#print 'average of', j, x
#return x
if len(circles) == 0 or sum([_[2] for _ in circles])==0:
return None, None
# average by value * percent**2
avg_x = sum([_[0][0]*_[2]**2 for _ in circles])/sum([_[2]**2 for _ in circles])
avg_y = sum([_[0][1]*_[2]**2 for _ in circles])/sum([_[2]**2 for _ in circles])
avg_radius = sum([_[1]*_[2]**2 for _ in circles])/sum([_[2]**2 for _ in circles])
avg_center = (avg_x, avg_y)
for (center, radius, percent) in circles:
#print percent, radius, center
cv2.circle(image, tuple(map(int, center)), int(radius), (255, 0, 0), 1)
cv2.circle(image, tuple(map(int, circles[-1][0])), int(circles[-1][1]), (255, 0, 0), 3)
self.verify_circle(distance, circles[-1][0], circles[-1][1])
cv2.circle(image, tuple(map(int, avg_center)), 5, (255, 0, 0), -1)
if self._debug:
show_img(image, wait=False, title='debug2?')
# TODO: find the actual radius. lol.
return (avg_center, avg_radius)
@staticmethod
def get_circle(p):
# http://mathforum.org/library/drmath/view/55239.html
x = np.linalg.det([
[p[0][0]**2+p[0][1]**2, p[0][1], 1],
[p[1][0]**2+p[1][1]**2, p[1][1], 1],
[p[2][0]**2+p[2][1]**2, p[2][1], 1],
])
y = np.linalg.det([
[p[0][0], p[0][0]**2+p[0][1]**2, 1],
[p[1][0], p[1][0]**2+p[1][1]**2, 1],
[p[2][0], p[2][0]**2+p[2][1]**2, 1],
])
bah = 2*np.linalg.det([
[p[0][0], p[0][1], 1],
[p[1][0], p[1][1], 1],
[p[2][0], p[2][1], 1],
])
if bah == 0:
return None, None
x /= bah
y /= bah
radius = np.linalg.norm(p[0]-(x,y))
return (x, y), radius
@staticmethod
def verify_circle(distance, center, radius, debug=False):
max_dist = min(radius/25, 50)
count = 0
inlie = 0
#incircle = 0
for a in np.linspace(0, 2*np.pi, 50):
count += 1
x, y = center[0] + np.cos(a) * radius, center[1]-np.sin(a)*radius;
if x<0 or x>=distance.shape[1] or y<0 or y>=distance.shape[0]:
continue
#incircle += 1
if debug:
print x, y, distance[y, x]
#inlie += distance[y, x]
if distance[y, x] < max_dist:
inlie += 1
#if incircle == 0:
# print center, radius
# return 0
#return -1.0*inlie/incircle
return 1.0*inlie/count
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('--shutter', default=10, type=int, help='shutter time (10 ms default for indoor, 1-2 ms is fine for outside)')
parser.add_argument('--gain', default=0, type=float, help='gain')
parser.add_argument('--debug', action='store_true', help='debug')
parser.add_argument('-t', '--threshold', default=100, help='')
parser.add_argument('cam', nargs='*', help='image file or camera')
args = parser.parse_args()
if args.cam:
import cProfile
finder = PolarityFind(True, 3, args.threshold, debug=args.debug)
for image in args.cam:
image = cv2.imread(image)
#center, size = finder.find_target(image)
cProfile.run("center, size = finder.find_target(image)")
print 'center:', center
if center is not None:
cv2.circle(image, tuple(map(int, center)), 10, (0, 255, 255), -1)
show_img(image)
else:
print 'could not find target'
else:
c = PTGreyCamera()
# set manual values
c.set_property_manual(fc2.AUTO_EXPOSURE, 0) # exposure = 0, we don't modify this. I'm not sure, but it had no effect.
c.set_property_manual(fc2.SHUTTER, args.shutter) # 10ms shutter (1/100, hopefully fast enough)
# if frame_rate is too high, it is set to maximum :)
c.set_property_manual(fc2.FRAME_RATE, 100) # maximum framerate
c.set_property_manual(fc2.GAIN, args.gain)
c.print_infos()
c.start_capture()
finder = RANSACFind(True, 3, debug=args.debug)
try:
while True:
img = c.get_frame()
t = time()
center, size = finder.find_target(img)
t = time()-t
print 'time:', t, 1/t, 'fps'
if center is not None:
cv2.circle(img, (int(center[0]), int(center[1])), 10, (255, 255, 0), -1)
show_img(img, wait=False, title='img_target')
except KeyboardInterrupt:
print 'bye..'
|
999,874 | 21ce4180a954503b27180f9f58d0349d61f1c1e4 | # Generated by Django 3.1.3 on 2020-11-30 14:47
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Poll',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question', models.TextField(max_length=250)),
('option1', models.CharField(max_length=100)),
('option2', models.CharField(max_length=100)),
('option3', models.CharField(max_length=100)),
('option4', models.CharField(max_length=100)),
('option1_count', models.IntegerField(default=0)),
('option2_count', models.IntegerField(default=0)),
('option3_count', models.IntegerField(default=0)),
('option4_count', models.IntegerField(default=0)),
],
),
]
|
999,875 | 9dfaf1a9bc588467f4c3df73e7b90e6ddd35d5b8 | from django.shortcuts import render, redirect, get_object_or_404
from accounts.forms import *
from django.contrib.auth.models import User
from accounts.models import *
from home.models import *
from django.contrib.auth.forms import UserChangeForm, PasswordChangeForm
from django.contrib.auth import update_session_auth_hash, authenticate, login
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.views.generic.detail import DetailView
from django.views.generic import TemplateView
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
def registration(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
messages.success(request, f'Your account has been created! You are now able to log in')
return redirect('accounts:login')
else:
form = UserRegisterForm()
return render(request, 'accounts/reg_form.html', {'form': form})
# @login_required
def profile(request):
context = {
'users': User.objects.all(),
}
return render(request, 'accounts/profile.html', context)
# @login_required
def change_password(request):
form = PasswordChangeForm(data=request.POST, user=request.user)
if form.is_valid():
form.save()
update_session_auth_hash(request, form.user)
return redirect('accounts:profile')
else:
form = PasswordChangeForm(user=request.user)
messages.success(request, 'Your new password has been saved')
args = {'form': form}
return render(request, 'accounts/change-password.html', args)
def user_profile(request, pk):
user = get_object_or_404(User, pk=pk)
posts = Post.objects.filter(user=user)
context = {
'user': user,
'posts': posts
}
return render(request, 'accounts/user_profile.html', context)
def profile_settings(request):
if request.method == 'POST':
u_form = UserUpdateForm(request.POST, instance=request.user)
p_form = ProfileUpdateForm(request.POST,
request.FILES,
instance=request.user.userprofile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
return redirect('accounts:profile')
else:
u_form = UserUpdateForm(instance=request.user)
p_form = ProfileUpdateForm( instance=request.user.userprofile)
context = {
'u_form': u_form,
'p_form': p_form,
'users': User.objects.all(),
}
return render(request, 'accounts/profile_settings.html', context)
|
999,876 | 5922154c540e735c69ec884aebfc8d3fbab9af4b | Python 3.8.3 (v3.8.3:6f8c8320e9, May 13 2020, 16:29:34)
[Clang 6.0 (clang-600.0.57)] on darwin
Type "help", "copyright", "credits" or "license()" for more information.
>>> string1= "Hello"
>>> string2="Everyone"
>>> string1+string2
'HelloEveryone'
>>> #Concatenation
>>> string1-string2
Traceback (most recent call last):
File "<pyshell#4>", line 1, in <module>
string1-string2
TypeError: unsupported operand type(s) for -: 'str' and 'str'
>>> string1*string2
Traceback (most recent call last):
File "<pyshell#5>", line 1, in <module>
string1*string2
TypeError: can't multiply sequence by non-int of type 'str'
>>> string1/string2
Traceback (most recent call last):
File "<pyshell#6>", line 1, in <module>
string1/string2
TypeError: unsupported operand type(s) for /: 'str' and 'str'
>>> string1+3
Traceback (most recent call last):
File "<pyshell#7>", line 1, in <module>
string1+3
TypeError: can only concatenate str (not "int") to str
>>> string1*5
'HelloHelloHelloHelloHello'
>>> #ASCII TO CHARACTER & CHARACTER TO ASCII CONVERSION
>>> #ASCII -> American Standard Code for Information Interchange
>>> #'A'->ASCII->Binary
>>> chr(65)
'A'
>>> chr(90)
'Z'
>>> #ord->ordinal
>>> ord('X')
88
>>> ord('@')
64
>>> 'a' > 'b'
False
>>> 'a' < 'b'
True
>>> #Conditional Statements->
>>> #->if statement
>>> #->if-else statement
>>> #->ladder if-else
>>> #->nestedd if-else
>>> #Assignment Operator
>>> x = 5
>>> x = x+1
>>> x
6
>>> a = 5
>>> a+=1
>>> a
6
>>> a-=3
>>> a
3
>>> a *=5
>>> a
15
>>> a /= 5
>>> a
3.0
>>> a%=2
>>> a
1.0
>>> a//=0.5
>>> a
2.0
>>> a **=3
>>> a
8.0
>>> import math
>>> a= math.sqrt(a)
>>> a
2.8284271247461903
>>> help(math)
Help on module math:
NAME
math
MODULE REFERENCE
https://docs.python.org/3.8/library/math
The following documentation is automatically generated from the Python
source files. It may be incomplete, incorrect or include features that
are considered implementation detail and may vary between Python
implementations. When in doubt, consult the module reference at the
location listed above.
DESCRIPTION
This module provides access to the mathematical functions
defined by the C standard.
FUNCTIONS
acos(x, /)
Return the arc cosine (measured in radians) of x.
acosh(x, /)
Return the inverse hyperbolic cosine of x.
asin(x, /)
Return the arc sine (measured in radians) of x.
asinh(x, /)
Return the inverse hyperbolic sine of x.
atan(x, /)
Return the arc tangent (measured in radians) of x.
atan2(y, x, /)
Return the arc tangent (measured in radians) of y/x.
Unlike atan(y/x), the signs of both x and y are considered.
atanh(x, /)
Return the inverse hyperbolic tangent of x.
ceil(x, /)
Return the ceiling of x as an Integral.
This is the smallest integer >= x.
comb(n, k, /)
Number of ways to choose k items from n items without repetition and without order.
Evaluates to n! / (k! * (n - k)!) when k <= n and evaluates
to zero when k > n.
Also called the binomial coefficient because it is equivalent
to the coefficient of k-th term in polynomial expansion of the
expression (1 + x)**n.
Raises TypeError if either of the arguments are not integers.
Raises ValueError if either of the arguments are negative.
copysign(x, y, /)
Return a float with the magnitude (absolute value) of x but the sign of y.
On platforms that support signed zeros, copysign(1.0, -0.0)
returns -1.0.
cos(x, /)
Return the cosine of x (measured in radians).
cosh(x, /)
Return the hyperbolic cosine of x.
degrees(x, /)
Convert angle x from radians to degrees.
dist(p, q, /)
Return the Euclidean distance between two points p and q.
The points should be specified as sequences (or iterables) of
coordinates. Both inputs must have the same dimension.
Roughly equivalent to:
sqrt(sum((px - qx) ** 2.0 for px, qx in zip(p, q)))
erf(x, /)
Error function at x.
erfc(x, /)
Complementary error function at x.
exp(x, /)
Return e raised to the power of x.
expm1(x, /)
Return exp(x)-1.
This function avoids the loss of precision involved in the direct evaluation of exp(x)-1 for small x.
fabs(x, /)
Return the absolute value of the float x.
factorial(x, /)
Find x!.
Raise a ValueError if x is negative or non-integral.
floor(x, /)
Return the floor of x as an Integral.
This is the largest integer <= x.
fmod(x, y, /)
Return fmod(x, y), according to platform C.
x % y may differ.
frexp(x, /)
Return the mantissa and exponent of x, as pair (m, e).
m is a float and e is an int, such that x = m * 2.**e.
If x is 0, m and e are both 0. Else 0.5 <= abs(m) < 1.0.
fsum(seq, /)
Return an accurate floating point sum of values in the iterable seq.
Assumes IEEE-754 floating point arithmetic.
gamma(x, /)
Gamma function at x.
gcd(x, y, /)
greatest common divisor of x and y
hypot(...)
hypot(*coordinates) -> value
Multidimensional Euclidean distance from the origin to a point.
Roughly equivalent to:
sqrt(sum(x**2 for x in coordinates))
For a two dimensional point (x, y), gives the hypotenuse
using the Pythagorean theorem: sqrt(x*x + y*y).
For example, the hypotenuse of a 3/4/5 right triangle is:
>>> hypot(3.0, 4.0)
5.0
isclose(a, b, *, rel_tol=1e-09, abs_tol=0.0)
Determine whether two floating point numbers are close in value.
rel_tol
maximum difference for being considered "close", relative to the
magnitude of the input values
abs_tol
maximum difference for being considered "close", regardless of the
magnitude of the input values
Return True if a is close in value to b, and False otherwise.
For the values to be considered close, the difference between them
must be smaller than at least one of the tolerances.
-inf, inf and NaN behave similarly to the IEEE 754 Standard. That
is, NaN is not close to anything, even itself. inf and -inf are
only close to themselves.
isfinite(x, /)
Return True if x is neither an infinity nor a NaN, and False otherwise.
isinf(x, /)
Return True if x is a positive or negative infinity, and False otherwise.
isnan(x, /)
Return True if x is a NaN (not a number), and False otherwise.
isqrt(n, /)
Return the integer part of the square root of the input.
ldexp(x, i, /)
Return x * (2**i).
This is essentially the inverse of frexp().
lgamma(x, /)
Natural logarithm of absolute value of Gamma function at x.
log(...)
log(x, [base=math.e])
Return the logarithm of x to the given base.
If the base not specified, returns the natural logarithm (base e) of x.
log10(x, /)
Return the base 10 logarithm of x.
log1p(x, /)
Return the natural logarithm of 1+x (base e).
The result is computed in a way which is accurate for x near zero.
log2(x, /)
Return the base 2 logarithm of x.
modf(x, /)
Return the fractional and integer parts of x.
Both results carry the sign of x and are floats.
perm(n, k=None, /)
Number of ways to choose k items from n items without repetition and with order.
Evaluates to n! / (n - k)! when k <= n and evaluates
to zero when k > n.
If k is not specified or is None, then k defaults to n
and the function returns n!.
Raises TypeError if either of the arguments are not integers.
Raises ValueError if either of the arguments are negative.
pow(x, y, /)
Return x**y (x to the power of y).
prod(iterable, /, *, start=1)
Calculate the product of all the elements in the input iterable.
The default start value for the product is 1.
When the iterable is empty, return the start value. This function is
intended specifically for use with numeric values and may reject
non-numeric types.
radians(x, /)
Convert angle x from degrees to radians.
remainder(x, y, /)
Difference between x and the closest integer multiple of y.
Return x - n*y where n*y is the closest integer multiple of y.
In the case where x is exactly halfway between two multiples of
y, the nearest even value of n is used. The result is always exact.
sin(x, /)
Return the sine of x (measured in radians).
sinh(x, /)
Return the hyperbolic sine of x.
sqrt(x, /)
Return the square root of x.
tan(x, /)
Return the tangent of x (measured in radians).
tanh(x, /)
Return the hyperbolic tangent of x.
trunc(x, /)
Truncates the Real x to the nearest Integral toward 0.
Uses the __trunc__ magic method.
DATA
e = 2.718281828459045
inf = inf
nan = nan
pi = 3.141592653589793
tau = 6.283185307179586
FILE
/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/lib-dynload/math.cpython-38-darwin.so
>>> math.sin(90)
0.8939966636005579
>>> math.remainder(5,2)
1.0
>>> math.fabs(12)
12.0
>>> math.fabs(-12)
12.0
>>> math.cos(45)
0.5253219888177297
>>> math.cos(90)
-0.4480736161291701
>>> x = 2
>>> y = 3
>>> x & y
2
>>> 10 & 15
10
>>> #& -<bitwise Operator
>>> x = 5
>>> x &=4
>>> x
4
>>> X |= 4
Traceback (most recent call last):
File "<pyshell#61>", line 1, in <module>
X |= 4
NameError: name 'X' is not defined
>>> x |= 4
>>> x = 10
>>> x |= 5
>>> x
15
>>> 10 | 5
15
>>> x = 10
>>> x <<=1
>>> x
20
>>> y = 10
>>> y >>=1
>>> y
5
>>> |
999,877 | 9bb87049ecc7b2e6de95d442178265af881443bf | # -*- coding: utf-8 -*-
from __future__ import division
from calendar import monthrange
from ConfigParser import RawConfigParser
from datetime import datetime
import argparse
import time
from pyspark import SparkConf
from pyspark.ml.evaluation import BinaryClassificationEvaluator
from pyspark.sql import functions as F
from pyspark.sql import Row, SparkSession
from pyspark.sql.types import DoubleType, IntegerType
def get_samples(spark, data_date):
sql = """
select
phone,
imei,
y,
data_date
from
ronghui.yhy_model_sample
where
data_date = '{}'
""".format(data_date)
print(sql)
samples = spark.sql(sql)
return samples
def get_credit_scores(spark, alias, data_date):
sql = """
select
imei {0},
get_json_object(score, '$.score') score
from
ronghui_mart.t_model_scores_orc_bucket
where
dt = '{1}'
and model = 'credit_v8'
""".format(alias, data_date)
print(sql)
scores = spark.sql(sql)
return scores
def softmax(t):
total_score = 0.0
partition = 0.0
imei = t[0]
for row in t[1]:
total_score += row['score']*row['total_w']
partition += row['total_w']
return Row(imei=imei, nid_score=round(total_score/partition, 6))
if __name__ == '__main__':
print('====> Initializing Spark APP')
localConf = RawConfigParser()
localConf.optionxform = str
localConf.read('../config')
sparkConf = SparkConf()
for t in localConf.items('spark-config'):
sparkConf.set(t[0], t[1])
spark = SparkSession.builder \
.appName('RLab_ID_Project___Aggregate_the_Bipartite_by_IMEI') \
.config(conf=sparkConf) \
.enableHiveSupport() \
.getOrCreate()
sc = spark.sparkContext
sc.setLogLevel('ERROR')
print('====> Parsing local arguments')
parser = argparse.ArgumentParser()
parser.add_argument('--query_month', type=str, help='The format should be YYYYmm')
parser.add_argument('--mode', type=str, choices=['train', 'eval', 'test'])
parser.add_argument('--print_part1', action='store_true', default=False)
parser.add_argument('--print_part2_org', action='store_true', default=False)
parser.add_argument('--print_part2_new', action='store_true', default=False)
args = parser.parse_args()
month_end = str(monthrange(int(args.query_month[:4]), int(args.query_month[4:6]))[1])
data_date = args.query_month+month_end
date_time = time.mktime(datetime.strptime('{} 23:59:59'.format(data_date), '%Y%m%d %H:%M:%S').timetuple())
print('====> Start computation')
samples = get_samples(spark, data_date)
weights = spark.read.csv('/user/ronghui_safe/hgy/nid/weights/{}_{}'.format(args.query_month, args.mode), header=True, inferSchema=True)
weights = weights.withColumn('phone_salt', F.split(F.col('key'), '_').getItem(0))
weights = weights.withColumn('imei', F.split(F.col('key'), '_').getItem(1))
weights = weights.withColumn('itime', F.split(F.col('key'), '_').getItem(2))
weights = weights.withColumn('itime', F.col('itime').cast(IntegerType()))
weights = weights.withColumn('lasting_days', F.lit(date_time)-F.col('itime'))
weights = weights.withColumn('lasting_days', F.round(F.col('lasting_days')/F.lit(24*3600), scale=2))
weights = weights.withColumn('weight', F.pow(F.col('prediction'), F.col('lasting_days'))*F.lit(1e6))
weights = weights.groupby(['imei', 'phone_salt']).agg(F.sum('weight').alias('weight'))
#print(weights.describe('weight').show())
samples = samples.join(weights, on='imei', how='left_outer')
scores = get_credit_scores(spark, 'imei', data_date)
scores = scores.withColumn('score', F.col('score').cast(DoubleType()))
evaluator = BinaryClassificationEvaluator(rawPredictionCol='score', labelCol='y')
if args.print_part1:
samples_1 = samples.where(F.isnull(F.col('phone_salt')))
print('----> Total count of part 1 is {}'.format(samples_1.count()))
print('----> Distinct phone count of part 1 is {}'.format(samples_1.select('phone').distinct().count()))
print('----> Distinct imei count of part 1 is {}'.format(samples_1.select('imei').distinct().count()))
samples_1 = samples_1.join(scores, on='imei', how='inner')
print('----> Total count of part 1 after join is {}'.format(samples_1.count()))
print('----> Distinct phone count of part 1 after join is {}'.format(samples_1.select('phone').distinct().count()))
print('----> Distinct imei count of part 1 after join is {}'.format(samples_1.select('imei').distinct().count()))
print('----> AUC on part 1 is {:.6f}'.format(evaluator.evaluate(samples_1)))
samples_2 = samples.where(~F.isnull(F.col('phone_salt')))
samples_2_org = samples_2.select(['phone', 'imei', 'y']).distinct()
if args.print_part2_org:
print('----> Total count of part 2 is {}'.format(samples_2.count()))
print('----> Distinct phone count of part 2 is {}'.format(samples_2.select('phone').distinct().count()))
print('----> Distinct imei count of part 2 is {}'.format(samples_2.select('imei').distinct().count()))
samples_2_score = samples_2_org.join(scores, on='imei', how='inner')
print('----> Total count of part 2 after join is {}'.format(samples_2_score.count()))
print('----> Distinct phone count of part 2 after join is {}'.format(samples_2_score.select('phone').distinct().count()))
print('----> Distinct imei count of part 2 after join is {}'.format(samples_2_score.select('imei').distinct().count()))
print('----> Original AUC on part 2 is {:.6f}'.format(evaluator.evaluate(samples_2_score)))
if args.print_part2_new:
samples_2 = samples_2.join(weights.select(F.col('phone_salt'), F.col('imei').alias('connected_imei'), F.col('weight').alias('connected_weight')), on='phone_salt', how='inner')
samples_2 = samples_2.withColumn('w', F.col('weight')*F.col('connected_weight'))
samples_2 = samples_2.groupby(['imei', 'connected_imei']).agg(F.sum('w').alias('total_w'))
print('----> Total imei-imei pair count is {}'.format(samples_2.count()))
#print('----> The stats of total_w is below: ')
#print(samples_2.where(samples_2.total_w > 0).describe('total_w').show())
samples_2 = samples_2.withColumn('total_w', F.when(F.col('total_w') == 0, 4.9e-324).otherwise(F.col('total_w')))
scores = scores.select(F.col('imei').alias('connected_imei'), 'score')
samples_2 = samples_2.join(scores, on='connected_imei', how='inner')
samples_2 = samples_2.rdd.map(lambda row: (row['imei'], row)).groupByKey().map(softmax).toDF()
samples_2 = samples_2.join(samples_2_org, on='imei', how='inner')
print('----> Total count of final part 2 is {}'.format(samples_2.count()))
print('----> Distinct phone count of final part 2 is {}'.format(samples_2.select('phone').distinct().count()))
print('----> Distinct imei count of final part 2 is {}'.format(samples_2.select('imei').distinct().count()))
evaluator.setRawPredictionCol('nid_score')
print('----> New AUC on part 2 is {:.6f}'.format(evaluator.evaluate(samples_2))) |
999,878 | 694fa0b722385eeae3420eca2e2eb73f1b1a6619 | from collections import deque
import sys
read = sys.stdin.readline
n = int(read())
queue = deque(range(1,n+1))
while True :
if len(queue) == 1:
break
queue.popleft()
queue.append(queue.popleft())
print(queue[0]) |
999,879 | f1b47b67541089b074a5199891ccf49f793fd7cc | import random
class Carte:
def __init__(self, valeur, nom):
self.valeur = valeur
self.nom = nom
def pioche(carte) :
carte = Carte(pioche[0].valeur, pioche[0].nom)
del pioche[0]
Ezmo1 = Carte(1, "Ezmo")
Ezmo2 = Carte(1, "Ezmo")
Ezmo3 = Carte(1, "Ezmo")
Ezmo4 = Carte(1, "Ezmo")
Bob1 = Carte(2, "Bob")
Bob2 = Carte(2, "Bob")
Bob3 = Carte(2, "Bob")
Bob4 = Carte(2, "Bob")
Flies1 = Carte(3, "Giant flies")
Flies2 = Carte(3, "Giant flies")
Flies3 = Carte(3, "Giant flies")
Flies4 = Carte(3, "Giant flies")
Sharkonaut1 = Carte(4, "Sharkonaut")
Sharkonaut2 = Carte(4, "Sharkonaut")
Sharkonaut3 = Carte(4, "Sharkonaut")
Sharkonaut4 = Carte(4, "Sharkonaut")
Robots1 = Carte(5, "Robots")
Robots2 = Carte(5, "Robots")
Robots3 = Carte(5, "Robots")
Robots4 = Carte(5, "Robots")
pioche = [Ezmo1, Ezmo2, Ezmo3, Ezmo4, Bob1, Bob2, Bob3, Bob4, Flies1, Flies2, Flies3, Flies4,
Sharkonaut1, Sharkonaut2, Sharkonaut3, Sharkonaut4, Robots1, Robots2, Robots3, Robots4]
scoreJ1 = 0
scoreJ2 = 0
scoreJ3 = 0
cartes = [CJ1HG,CJ2HG,CJ3HG,CJ1HD,CJ2HD,CJ3HD,CJ1BG,CJ2BG,CJ3BG,CJ1BD,CJ2BD,CJ3BD]
condJoueur50 = False
#Boucle de Partie
while not condJoueur50:
random.shuffle(pioche)
defausse = []
for i in range(0, len(cartes)):
pioche(cartes[i])
defausse.append(Carte(pioche[0].valeur, pioche[0].nom))
del pioche[0]
cartesJ1 = [CJ1HG,CJ1HD, CJ1BG, CJ1BD]
cartesJ2 = [CJ2HG,CJ2HD, CJ2BG, CJ2BD]
cartesJ3 = [CJ3HG,CJ3HD, CJ3BG, CJ3BD]
print("Main J1:", "\n",cartesJ1[0].valeur, cartesJ1[0].nom, cartesJ1[1].valeur, cartesJ1[1].nom,
"\n",cartesJ1[2].valeur, cartesJ1[2].nom, cartesJ1[3].valeur, cartesJ1[3].nom, "\n")
print("CarteHautDefausse :", defausse[0].valeur, defausse[0].nom)
totalJ1 = cartesJ1[0].valeur + cartesJ1[1].valeur + cartesJ1[2].valeur + cartesJ1[3].valeur
totalJ2 = cartesJ1[0].valeur + cartesJ1[1].valeur + cartesJ1[2].valeur + cartesJ1[3].valeur
totalJ3 = cartesJ1[0].valeur + cartesJ1[1].valeur + cartesJ1[2].valeur + cartesJ1[3].valeur
finManche = False
dernierTourJ1 = False
dernierTourJ2 = False
dernierTourJ3 = False
#Boucle de Manche
while not finManche:
if dernierTourJ1 == True:
finManche = True
break
if len(pioche) == 0:
for i in defausse[1:]:
pioche.append(Carte(defausse[0].valeur, defausse[0].nom))
del defausse[0]
random.shuffle(pioche)
choixJ1 = input("D = Defausser une carte, P = Piocher, E = Echanger : ")
if choixJ1 == "D" or choixJ1 == "d":
carteAdefausser = input("Quelle carte voulez-vous défausser ? 1, 2 3 ou 4 ?")
if carteAdefausser == 1 and cartesJ1[0].valeur == defausse[0].valeur:
cartesJ1[0] = Carte(0, "Vide")
if carteAdefausser == 2 and cartesJ1[1].valeur == defausse[0].valeur:
cartesJ1[1] = Carte(0, "Vide")
if carteAdefausser == 3 and cartesJ1[2].valeur == defausse[0].valeur:
cartesJ1[2] = Carte(0, "Vide")
if carteAdefausser == 4 and cartesJ1[3].valeur == defausse[0].valeur:
cartesJ1[3] = Carte(0, "Vide")
if choixJ1 == "P" or choixJ1 == "p":
print("La carte piochée :", pioche[0].valeur, pioche[0].nom)
choixPJ1 = input("Voulez-vous l'échanger (E) ou la défausser (D) ? : ")
if choixPJ1 == "E" or choixPJ1 == "e":
carteEchange = input("Avec quelle carte ? 1, 2, 3, 4 : ")
if carteEchange == 1:
#defausse.insert(0, Carte(cartesJ1[0].valeur, cartesJ1[0].nom))
cartesJ1[0] = Carte(pioche[0].valeur, pioche[0].nom)
del pioche[0]
if carteEchange == 2:
#defausse.insert(0, Carte(cartesJ1[0].valeur, cartesJ1[0].nom))
cartesJ1[1] = Carte(pioche[0].valeur, pioche[0].nom)
del pioche[0]
if carteEchange == 3:
#defausse.insert(0, Carte(cartesJ1[0].valeur, cartesJ1[0].nom))
cartesJ1[2] = Carte(pioche[0].valeur, pioche[0].nom)
del pioche[0]
if carteEchange == 4:
#defausse.insert(0, Carte(cartesJ1[0].valeur, cartesJ1[0].nom))
cartesJ1[3] = Carte(pioche[0].valeur, pioche[0].nom)
del pioche[0]
print("Main J1:", "\n", cartesJ1[0].valeur, cartesJ1[0].nom, cartesJ1[1].valeur, cartesJ1[1].nom,
"\n", cartesJ1[2].valeur, cartesJ1[2].nom, cartesJ1[3].valeur, cartesJ1[3].nom, "\n") |
999,880 | 03b06944ac1fdae2cebeea1aadc296ff3b9ce0b1 | #! python3
# coding: utf-8
import numpy as np
import gensim
from collections import OrderedDict
from sklearn.decomposition import PCA
def load_model(embeddings_file, fasttext=False):
if fasttext:
emb_model = gensim.models.fasttext.load_facebook_vectors(embeddings_file)
# Определяем формат модели по её расширению:
elif embeddings_file.endswith(".bin.gz") or embeddings_file.endswith(
".bin"
): # Бинарный формат word2vec
emb_model = gensim.models.KeyedVectors.load_word2vec_format(
embeddings_file, binary=True, unicode_errors="replace"
)
elif (
embeddings_file.endswith(".txt.gz")
or embeddings_file.endswith(".txt")
or embeddings_file.endswith(".vec.gz")
or embeddings_file.endswith(".vec")
): # Текстовый формат word2vec
emb_model = gensim.models.KeyedVectors.load_word2vec_format(
embeddings_file, binary=False, unicode_errors="replace"
)
else: # Нативный формат Gensim?
emb_model = gensim.models.Word2Vec.load(embeddings_file)
# На всякий случай приводим вектора к единичной длине (нормируем)
emb_model.init_sims(replace=True)
return emb_model
def jaccard(list0, list1):
# Близость двух массивов по коэффициенту Жаккара
set_0 = set(list0)
set_1 = set(list1)
n = len(set_0.intersection(set_1))
return n / (len(set_0) + len(set_1) - n)
def jaccard_f(word, models, row=10):
# Сравнение слова в нескольких моделях через сравнение его ближайших ассоциатов
associations = OrderedDict()
similarities = {word: OrderedDict()}
previous_state = None
for m in models:
model = models[m]
word_neighbors = [i[0] for i in model.most_similar(positive=[word], topn=row)]
associations[m] = word_neighbors
if previous_state:
similarity = jaccard(previous_state[1], word_neighbors)
similarities[word][m] = similarity
previous_state = (m, word_neighbors)
return similarities, associations
def wordvectors(words, emb_model):
# Функция получения векторов слов из модели
matrix = np.zeros((len(words), emb_model.vector_size))
for i in range(len(words)):
matrix[i, :] = emb_model[words[i]]
return matrix
def get_number(word, vocab=None):
# Функция получения номера слова в словаре модели
if word in vocab:
return vocab[word].index
else:
return 0
|
999,881 | b202a93d7b0969ce11639b60a4902f22960c68a0 | import numpy as np
from scipy.interpolate import interp2d
from sklearn.base import BaseEstimator, TransformerMixin
# from skimage.feature import hog
class Scaler(BaseEstimator, TransformerMixin):
"""This class will scale the data as: x = (x/a) - b"""
def __init__(self, a=100, b=1):
self.a = a
self.b = b
def fit(self, X, y=None):
return self
def transform(self, X):
return ((X / self.a) - self.b)
class Flatten(BaseEstimator, TransformerMixin):
"""This class will scale the data as: x = (x/a) - b"""
def fit(self, X, y=None):
return self
def transform(self, X):
return X.reshape(X.shape[0],-1)
class Preprocessing(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X):
def resize(img, new_W=28, new_H=28):
"""
Función para escalar imágenes
Args:
img (array): array de 2 dimensiones que representa la imagen
Kwargs:
new_H (int): nueva altura
new_W (int): nueva anchura
Returns:
array con la nueva imagen escalada
"""
W, H = img.shape
xrange = lambda x: np.linspace(0, 1, x)
f = interp2d(xrange(H), xrange(W), img, kind="linear")
print(f)
new_img = f(xrange(new_W), xrange(new_H))
return new_img
X = np.mean(X, axis=2)
img = -(resize(X)-255.0)
# Creamos máscara sobre el dígito y hacemos que el resto sea 0 (fondo)
mask = img > 115.0
img[~mask] = 0.0
return img[np.newaxis]
# class HogFeaturesExtraction(BaseEstimator, TransformerMixin):
# """
# This class will create a new dataset by extracting the hog features of
# the input dataset
#
# param orientations: the number of orientations in which the gradient will be calculated
# param ppc: number of pixels per cell. It's recommended that the value of ppc is not lower than 4, as
# three scales will be concatenated for a better performance (ppc, ppc/2 and ppc/4)
# param cpb: number of cells per block
# """
#
# def __init__(self, orientations=6, ppc=7, cpb=1):
# self.orientations = orientations
# self.ppc = ppc
# self.cpb = cpb
#
# def fit(self, X, y=None):
# return self
#
# def transform(self, X):
# X_hog = np.concatenate([np.concatenate([hog(xi, orientations=self.orientations,
# pixels_per_cell=(ppc, ppc),
# cells_per_block=(self.cpb, self.cpb),
# visualize=False,
# block_norm='L1')[np.newaxis, :] for xi in X],
# axis=0) for ppc in [self.ppc, int(self.ppc / 2), int(self.ppc / 4)]],
# axis=1)
# return X_hog |
999,882 | 6c82588bd680eb33a4634737333b62431c6580e7 | import math
import random
import numpy as np
import csv
np.seterr(all = 'ignore')
#np.random.seed(0)
def tanh(x):
return np.tanh(x)
# derivative for tanh sigmoid
def dtanh(x):
y = tanh(x)
return 1 - y*y
def softmax(x):
e = [np.exp(ex - np.amax(ex)) for ex in x]
out = [e1 / np.sum(e1) for e1 in e]
return np.array(out)
class MLP_NeuralNetwork(object):
"""
Basic MultiLayer Perceptron (MLP) network, adapted and from the book 'Programming Collective Intelligence' (http://shop.oreilly.com/product/9780596529321.do)
Consists of three layers: input, hidden and output. The sizes of input and output must match data
the size of hidden is user defined when initializing the network.
The algorithm has been generalized to be used on any dataset.
As long as the data is in this format: [[[x1, x2, x3, ..., xn], [y1, y2, ..., yn]],
[[[x1, x2, x3, ..., xn], [y1, y2, ..., yn]],
...
[[[x1, x2, x3, ..., xn], [y1, y2, ..., yn]]]
An example is provided below with the digit recognition dataset provided by sklearn
Fully pypy compatible.
"""
def __init__(self, input, hidden1, hidden2, output, iterations, learning_rate, momentum, rate_decay):
"""
:param input: number of input neurons
:param hidden: number of hidden neurons
:param output: number of output neurons
"""
# initialize parameters
self.iterations = iterations
self.learning_rate = learning_rate
self.learning_rate_init = learning_rate
self.momentum = momentum
self.rate_decay = rate_decay
# initialize arrays
self.input = input
self.hidden1 = hidden1
self.hidden2 = hidden2
self.output = output
# set up array of 1s for activations
self.ai = 1
self.ah1 = 1
self.ah2 = 1
self.ao = 1
# create randomized weights
# use scheme from 'efficient backprop to initialize weights
input_range = 1.0 / self.input ** (1/2)
hidden_range = 1.0 / self.hidden1 ** (1/2)
self.wi = np.random.normal(loc = 0, scale = input_range, size = (self.input, self.hidden1))
self.wh = np.random.normal(loc = 0, scale = hidden_range, size = (self.hidden1, self.hidden2))
self.wo = np.random.uniform(size = (self.hidden2, self.output)) / np.sqrt(self.hidden2)
# create arrays of 0 for changes
# this is essentially an array of temporary values that gets updated at each iteration
# based on how much the weights need to change in the following iteration
self.ci = np.zeros((self.input, self.hidden1))
self.ch = np.zeros((self.hidden1, self.hidden2))
self.co = np.zeros((self.hidden2, self.output))
def feedForward(self, inputs):
"""
The feedForward algorithm loops over all the nodes in the hidden layer and
adds together all the outputs from the input layer * their weights
the output of each node is the sigmoid function of the sum of all inputs
which is then passed on to the next layer.
:param inputs: input data
:return: updated activation output vector
"""
self.ai = np.array(inputs)
self.ah1 = tanh(self.ai.dot(self.wi))
self.ah2 = tanh(self.ah1.dot(self.wh))
self.ao = softmax(self.ah2.dot(self.wo))
def backPropagate(self, targets):
"""
For the output layer
1. Calculates the difference between output value and target value
2. Get the derivative (slope) of the sigmoid function in order to determine how much the weights need to change
3. update the weights for every node based on the learning rate and sig derivative
For the hidden layer
1. calculate the sum of the strength of each output link multiplied by how much the target node has to change
2. get derivative to determine how much weights need to change
3. change the weights based on learning rate and derivative
:param targets: y values
:param N: learning rate
:return: updated weights
"""
target = np.array(targets)
output_deltas = -(target - self.ao)
error = output_deltas.dot(self.wo.T)
hidden2_deltas = dtanh(self.ah2) * error
error = hidden2_deltas.dot(self.wh.T)
hidden1_deltas = dtanh(self.ah1) * error
############output ----> hidden_2##############
change = output_deltas.T.dot(self.ah2).T
self.wo -= (self.learning_rate * change) + (self.co * self.momentum)
self.co = change
############hidden_2 ----> hidden_1##############
change = hidden2_deltas.T.dot(self.ah1).T
self.wh -= (self.learning_rate * change) + (self.ch * self.momentum)
self.ch = change
############hidden_1 ----> input##############
change = hidden1_deltas.T.dot(self.ai).T
self.wi -= (self.learning_rate * change) + (self.ci * self.momentum)
self.ci = change
return np.mean(-output_deltas)
def train(self, patterns):
print "Begin training"
for i in range(self.iterations):
error = 0.0
self.feedForward(patterns[1])
error = self.backPropagate(patterns[0])
print "Error : {}, lap : {}".format(error, i)
self.learning_rate = self.learning_rate * (self.learning_rate / (self.learning_rate + (self.learning_rate * self.rate_decay)))
def test_cross(self, test):
print "Predicting..."
self.ai = np.array(test[1])
self.ah1 = tanh(self.ai.dot(self.wi))
self.ah2 = tanh(self.ah1.dot(self.wh))
self.ao = softmax(self.ah2.dot(self.wo))
dic = {}
c = 0
e = 0
for out,check in zip(self.ao,test[0]):
e += 1
n = out.tolist().index(max(out))
if n == check.tolist().index(max(check)):
c += 1
print "Aciertos:", c/float(e)
def test_against(self):
test = open("csv/svd_test.csv", "r")
r = csv.reader(test)
next(r)
ar = open("csv/submit98.csv","r")
ta = csv.reader(ar)
next(ta)
print "Predicting..."
output = []
self.ai = []
for row in r:
self.ai.append([float(x) for x in row])
self.ai = np.array(self.ai)
self.ah1 = tanh(self.ai.dot(self.wi))
self.ah2 = tanh(self.ah1.dot(self.wh))
self.ao = softmax(self.ah2.dot(self.wo))
e = 0
for out, csv_out in zip(self.ao, ta):
n = out.tolist().index(max(out))
if n == int(csv_out[1]):
e += 1
print "{} laps lr = {} momentum = {} decay = {} Aciertos = {}".format(self.iterations, self.learning_rate_init, self.momentum, self.rate_decay, e/28000.0)
print e
test.close()
ar.close()
def test(self):
"""
Currently this will print out the targets next to the predictions.
Not useful for actual ML, just for visual inspection.
"""
test = open("csv/test.csv", "r")
r = csv.reader(test)
next(r)
ar = open("csv/submit2.csv","w")
w = csv.writer(ar)
print self.wi[0].mean()
print self.wo[0].mean()
print "Predicting..."
output = []
self.ai = []
for row in r:
self.ai.append([int(x) for x in row])
self.ai = np.array(self.ai)
self.ah1 = tanh(self.ai.dot(self.wi))
self.ah2 = tanh(self.ah1.dot(self.wh))
self.ao = softmax(self.ah2.dot(self.wo))
w.writerow(("ImageId","Label"))
c = 1
e = 0
dic = {}
for out in self.ao:
try:
n = out.tolist().index(max(out))
dic.setdefault(n,0)
dic[n] += 1
w.writerow((c, n))
except:
w.writerow((c, np.random.randint(0,9)))
e += 1
c += 1
print "Total errors: ",e
print dic
test.close()
ar.close()
def demo():
"""
run NN demo on the digit recognition dataset from sklearn
"""
def load_data():
train = open("csv/svd_train.csv", "r")
r = csv.reader(train)
next(r)
data = []
target = []
print "Prepping data..."
for row in r:
aux = [0 for x in xrange(10)]
aux[int(row[0])] = 1
target.append(aux)
data.append([float(x) for x in row[1:]])
train.close()
data = np.array(data)
target = np.array(target)
#train = [target[:35000],data[:35000]]
#test = [target[35000:],data[35000:]]
return [target, data]
NN = MLP_NeuralNetwork(101, 75, 35, 10,
iterations = 200,
learning_rate = 0.5,
momentum = 0.05,
rate_decay = 0.005)
train = load_data()
NN.train(train)
#NN.test_cross(test)
#NN.test()
NN.test_against()
if __name__ == '__main__':
demo()
#cross-validation
# 15 laps -> lr -> 0.1 -> Aciertos: 0.072058823529
# 15 laps -> lr -> 0.5 -> Aciertos: 0.052117647058
# 15 laps -> lr -> 0.01 -> Aciertos: 0.046
# 50 laps -> lr -> 0.01 -> Aciertos: 0.182529411765
# 50 laps -> lr -> 0.1 -> Aciertos: 0.300823529412
# 50 laps -> lr -> 0.1 -> Aciertos: 0.325764705882 -> l2_in = 0.01 -> l2_out = 0.01
# 50 laps -> lr -> 0.1 -> Aciertos: 0.328117647059 -> l2_in = 0.1 -> l2_out = 0.1
# 50 laps -> lr -> 0.1 -> Aciertos: 0.042117647058 -> l2_in = 10 -> l2_out = 10
# 50 laps -> lr -> 0.1 -> Aciertos: 0.225352941176 -> l2_in = 0.5 -> l2_out = 0.5
# 50 laps -> lr -> 0.1 -> Aciertos: 0.220764705882 -> l2_in = 0.5 -> l2_out = 0.1
# 50 laps -> lr -> 0.1 -> Aciertos: 0.297705882353 -> l2_in = 0.1 -> l2_out = 0.5
# 50 laps -> lr -> 0.1 -> Aciertos: 0.267235294118 -> l2_in = 0.1 -> l2_out = 0.1 -> hl = 200
# 50 laps -> lr -> 0.1 -> Aciertos: 0.336823529412 -> l2_in = 0.1 -> l2_out = 0.1 -> hl = 150
# 50 laps -> lr -> 0.1 -> Aciertos: 0.350058823529 -> l2_in = 0.1 -> l2_out = 0.1 -> momentum = 0.01
# 50 laps -> lr -> 0.05 -> Aciertos: 0.349882352941 -> l2_in = 0.1 -> l2_out = 0.1 -> momentum = 0.005 -> decay = 0.001
# 50 laps -> lr -> 0.05 -> Aciertos: 0.338588235294 -> l2_in = 0.1 -> l2_out = 0.1 -> momentum = 0.05 -> decay = 0.001
# 50 laps -> lr -> 0.05 -> Aciertos: 0.343823529412 -> l2_in = 0.1 -> l2_out = 0.1 -> momentum = 0.05 -> decay = 0.01
# 50 laps -> lr -> 0.05 -> Aciertos: 0.345882352941 -> l2_in = 0.1 -> l2_out = 0.1 -> momentum = 0.005 -> decay = 0.01
#1 hidden layer
# 50 laps lr = 0.5 momentum = 0.01 decay = 0.001 Aciertos: 0.435428571429
# 100 laps lr = 0.5 momentum = 0.01 decay = 0.001 Aciertos: 0.601928571429
# 100 laps lr = 0.01 momentum = 0.5 decay = 0.0001 Aciertos: 0.8425
# 100 laps lr = 0.05 momentum = 0.5 decay = 0.0001 Aciertos: 0.823642857143
# 100 laps lr = 0.05 momentum = 0.1 decay = 0.0001 Aciertos: 0.737785714286
# 100 laps lr = 0.01 momentum = 0.5 decay = 0.0002 Aciertos = 0.82725
# 100 laps lr = 0.01 momentum = 0.8 decay = 0.0002 Aciertos = 0.844607142857
# 100 laps lr = 0.01 momentum = 1 decay = 0.0002 Aciertos = 0.8265
# 100 laps lr = 0.01 momentum = 1.5 decay = 0.0002 Aciertos = 0.827571428571
# 100 laps lr = 0.01 momentum = 0.8 decay = 0.0005 Aciertos = 0.857142857143
# 150 laps lr = 0.01 momentum = 0.8 decay = 0.0002 Aciertos = 0.771107142857
# 100 laps lr = 0.01 momentum = 0.8 decay = 0.0005 Aciertos = 0.823464285714
#layers: 784 1000 10
#2 hidden layer
# 50 laps lr = 0.3 momentum = 0.05 decay = 0.01 Aciertos = 0.665 784, 250, 100, 10
# 50 laps lr = 0.5 momentum = 0.05 decay = 0.001 Aciertos = 0.651285714286 784, 250, 100, 10
# 50 laps lr = 0.01 momentum = 0.8 decay = 0.0005 Aciertos = 0.114178571429 784, 50, 200, 10
# 50 laps lr = 0.01 momentum = 0.8 decay = 0.0005 Aciertos = 0.128892857143 784, 225, 95, 10
# 50 laps lr = 0.5 momentum = 0.8 decay = 0.0005 Aciertos = 0.416964285714 784, 225, 95, 10
# 50 laps lr = 0.5 momentum = 0.5 decay = 0.05 Aciertos = 0.505321428571 784, 250, 100, 10
# 50 laps lr = 0.2 momentum = 0.05 decay = 0.005 Aciertos = 0.6535 784, 250, 100, 10 |
999,883 | c42b60cdb82b3c4b693a2a89489e551a708107f3 | from math import *
from sklearn.cluster import KMeans
from sklearn.linear_model import LinearRegression
from sklearn import preprocessing
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import folium
import seaborn as sns
from mpl_toolkits.mplot3d import Axes3D
from sklearn.metrics import silhouette_score
def get_column_value(df, column_name: str):
return np.array(df[column_name])
def main():
dataset = pd.read_csv('dataset.csv')
positive = dataset.loc[dataset['Lab Status'] == 'Positive ID']
latitude = get_column_value(positive, 'Latitude').tolist()
longitude = get_column_value(positive, 'Longitude').tolist()
date = get_column_value(positive, 'Detection Date').tolist()
date = pd.to_datetime(date)
interval = (date - date[0]).days
interval = interval - np.min(interval)
data = []
for i, la in enumerate(latitude):
data.append([latitude[i], longitude[i], interval[i]])
data = np.array(data)
data = data[np.argsort(data[:, 2])]
data_scale = preprocessing.scale(data)
SSE = []
for k in range(2, 9):
kmeans = KMeans(n_clusters=k, random_state=0).fit(data_scale)
SSE.append(kmeans.inertia_)
X = range(2, 9)
plt.xlabel('Number of Clusters(k)')
plt.ylabel('SSE')
plt.title('SSE vs k')
plt.plot(X, SSE, 'o-')
plt.show()
Scores = []
for k in range(2, 9):
kmeans = KMeans(n_clusters=k, random_state=0).fit(data)
Scores.append(silhouette_score(data, kmeans.labels_, metric='euclidean'))
X = range(2, 9)
plt.xlabel('Number of Clusters(k)')
plt.ylabel('Silhouette Coefficient')
plt.title('Silhouette Coefficient vs k')
plt.plot(X, Scores, 'o-')
plt.show()
cluster_num = 3
kmeans = KMeans(n_clusters=cluster_num, random_state=0).fit(data_scale)
label = kmeans.labels_
centers = []
label_list = []
for i in range(cluster_num):
label_list.append(data[label == i, 0:2].tolist())
centers.append(np.mean(data[label == i], axis=0).tolist())
centers = np.array(centers)
centers_list = np.delete(centers, -1, axis=1).tolist()
centers = centers[np.argsort(centers[:, 2])]
print(centers)
ax1 = plt.axes(projection='3d')
ax1.scatter3D(data[:, 1], data[:, 0], data[:, 2],
c=kmeans.labels_, cmap='rainbow')
ax1.scatter3D(centers[:, 1], centers[:, 0], centers[:, 2], c='black', s=150, alpha=0.5)
plt.show()
x = centers[:, 1].reshape((-1, 1))
y = centers[:, 0]
reg = LinearRegression().fit(x, y)
k = reg.coef_[0]
b = reg.intercept_
print("Y = %.5fX + (%.5f)" % (k, b))
plt.scatter(data[:, 1], data[:, 0], c=label, cmap='rainbow')
plt.scatter(centers[:, 1], centers[:, 0], c='black', s=150, alpha=0.5)
data = data[np.argsort(data[:, 1])]
plt.plot(data[np.argsort(data[:, 1])][:, 1].reshape((-1, 1)),
reg.predict(data[np.argsort(data[:, 1])][:, 1].reshape((-1, 1))), c='b', linestyle='--')
plt.xlabel('Longitude')
plt.ylabel('Latitude')
plt.title('Linear Regression of Cluster Centers(k=%d)' % cluster_num)
plt.grid()
plt.show()
cluster_foot_x, cluster_foot_y = get_foot_point(centers[-1, 1], centers[-1, 0], k, b)
print("center-%d distance to line:%.5f" % (cluster_num, get_distance([centers[-1, 1], centers[-1, 0]], [cluster_foot_x, cluster_foot_y])))
sum_dis = 0
for i in range(data.shape[0]):
foot_x, foot_y = get_foot_point(data[i, 1], data[i, 0], k, b)
sum_dis += get_distance([data[i, 1], data[i, 0]], [foot_x, foot_y])
print("sum_dis:%.5f" % sum_dis)
colors = ['blue', 'green', 'orange', 'pink', 'purple', 'red']
map = folium.Map(location=[48.9938, -122.702], zoom_start=8, tiles="OpenStreetMap")
for i in range(len(label_list)):
point_list = label_list[i]
for point in range(len(point_list)):
folium.CircleMarker(radius=2.5,
location=label_list[i][point],
color=colors[i],
fill=True,
fill_color=colors[i],
fill_opacity=1
).add_to(map)
for i in range(len(centers_list)):
folium.CircleMarker(cradius=6,
location=centers_list[i],
color=colors[i],
fill=True,
fill_color=colors[i],
fill_opacity=0.3
).add_to(map)
map.save('map_cluster%d.html' % cluster_num)
def get_foot_point(point_x, point_y, k, b):
foot_x = (point_x + k * (point_y - b)) / (k * k + 1)
foot_y = k * foot_x + b
return foot_x, foot_y
def get_distance(origin, destination):
lon1 = radians(float(destination[0]))
lon2 = radians(float(origin[0]))
lat1 = radians(float(destination[1]))
lat2 = radians(float(origin[1]))
dlon = lon1 - lon2
dlat = lat1 - lat2
a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
dist = 2 * asin(sqrt(a)) * 6371 * 1000
return dist
if __name__ == "__main__":
main()
|
999,884 | 9e704d74cb3189c8963800665a0bba393d3083ae | import math
import numpy as np
#Cost Function Bi-objective
# x = Solution variable
# Ftype = ZDT problem option
def ZDT(x, Ftype,pRound = 2):
n = len(x)
if Ftype == 1:
f1 = x[0]
g = 1 + 9* sum(x[1:]/(n-1))
h = 1- math.sqrt(f1/g)
f2 = g*h
elif Ftype == 2:
f1 = x[0]
g = 1 + 9* sum(x[1:]/(n-1))
h = 1 - (f1/g)**2
f2 = g*h
elif Ftype == 3:
f1 = x[0]
g = 1 + 9* sum(x[1:]/(n-1))
h = 1- math.sqrt(f1/g) - (f1/g)*math.sin(10*math.pi*f1)
f2 = g*h
elif Ftype == 4:
f1 = x[0]
g = 1 + 10*(n-1) + sum(x[1:]**2 - 10*np.cos(4*math.pi*x[1:]))
h = 1- math.sqrt(f1/g)
f2 = g*h
return round(f1,pRound),round(f2,pRound) |
999,885 | a18f051484be9df17d5db76723947a8bf9b288dd |
# ******************************************************
# * Copyright © 2017-2023 - Jordan Irwin (AntumDeluge) *
# ******************************************************
# * This software is licensed under the MIT license. *
# * See: LICENSE.txt for details. *
# ******************************************************
## @module dbr.icon
import wx
## @todo
# Doxygen.
class Icon(wx.Icon):
def __init__(self, name, desiredWidth=-1, desiredHeight=-1):
bm_type = self.GetBitmapType(name)
wx.Icon.__init__(self, name, bm_type, desiredWidth, desiredHeight)
## @todo
# Doxygen.
def GetBitmapType(self, filename):
if filename:
bm_types = {
"png": wx.BITMAP_TYPE_PNG,
}
suffix = filename.split(".")[-1]
if suffix in bm_types:
return bm_types[suffix]
return wx.BITMAP_TYPE_ANY
|
999,886 | a45f2c9415f81c7e09bb1ed16a64651f26391272 | from __future__ import absolute_import
from multipledispatch import Dispatcher
# Main interface to execution; ties the following functions together
execute = Dispatcher('execute')
# Individual operation execution
execute_node = Dispatcher('execute_node')
# Compute from the top of the expression downward
execute_first = Dispatcher('execute_first')
# Possibly preload data from the client, given a node
data_preload = Dispatcher('data_preload')
# Default does nothing
@data_preload.register(object, object)
def data_preload_default(node, data, **kwargs):
return data
|
999,887 | a4fbb98901c72367dbcc94e3e145bc44412c83f2 | def count_triplet(arr: [], n, k):
arr = sorted(arr)
count = 0
for i in range(0, n):
value = k - arr[i]
count = count + upper_bound_v2(arr[i + 1:], value, len(arr[i + 1:]) - 1)
return count
# Finding element count lesser than k using sliding window
def upper_bound_v2(arr: [], value, n):
start = 0
end = n - 1
count = 0
while start <= end:
sum_value = arr[start] + arr[end]
if sum_value >= value:
end = end - 1
else:
start = start + 1
count = count + 1
return count
# Finding element count lesser than the k
def upper_bound(arr: [], start: int, end: int, k):
if start > end:
return start
middle = start + (end - start) // 2
middle_element = arr[middle]
if middle_element == k:
return upper_bound(arr, start, middle - 1, k)
if middle_element > k:
return upper_bound(arr, start, middle - 1, k)
else:
return upper_bound(arr, middle + 1, end, k)
if __name__ == '__main__':
print(count_triplet([30, 8, 23, 6, 10, 9, 31, 7, 19, 20, 1, 33, 21, 27, 28, 3, 25, 26], 18, 86))
print(count_triplet([-2, 0, 1, 3], 4, 2))
print(count_triplet([5, 1, 3, 4, 7], 5, 12))
|
999,888 | 0b56fdd46b988270c6714dc96b58d907d3bc3006 | def add_to(
v, to, G, color, sum_ins,
sum_out, tot_mas, ver_wei):
"""
Add vertex to community
v - vertex, that we want to add
to - vertex, to which community we want to add v
G - adjacency matrix
color - labels color
sum_ins - sum of edges inside community
sum_out - sum of edges outside community
tot_mas - sum of all edges
ver_wei - sum of edges, that are connected to vertices
"""
com_col = color[to]
sum_tot = sum_ins[com_col] + sum_out[com_col]
k_v = ver_wei[v]
k_v_in = np.sum(G[v, np.where(color == com_col)])
first = (k_v_in / (2 * tot_mas))
second = (sum_tot * k_v) / (2 * (tot_mas ** 2))
add_ans = first - second
k_v_out = k_v - k_v_in
return add_ans, k_v_in, k_v_out |
999,889 | 0b44a59eabeeb3a1362953bcfdc3d35b256492de | #!/usr/bin/python3
# Author: Suzanna Sia
# Standard imports
#import random
import numpy as np
import pdb
import math
import os, sys
# argparser
#import argparse
#from distutils.util import str2bool
#argparser = argparser.ArgumentParser()
#argparser.add_argument('--x', type=float, default=0)
# Custom imports
def construct_tableau(A, b, c):
b = np.append(0, b)
b = b.reshape(1, b.shape[0])
zcol = np.zeros((A.shape[0]+1))
zcol[0] = 1
zcol = zcol.reshape(1, zcol.shape[0])
pret = np.vstack((-c, A))
prett = np.hstack((zcol.T, pret, b.T))
return prett
def row_echelon(prett, pivots):
#for (i, col) in enumerate(basis_columns):
for (row, col) in pivots:
#row = i+1 # index + 1 because we don't touch the first row
prett[row] = prett[row]/prett[row, col]
for irow in range(prett.shape[0]):
if irow!=row:
mul = prett[irow, col]/prett[row,col]
prett[irow] -= prett[row]*mul
return prett
def find_pivot(prett):
col = np.argmax(prett[0][1:-1])+1
ratios = prett[1:, -1]/prett[1:, col]
ignorec = np.where(ratios <=0)[0]
ratios[ignorec] = np.inf
row = np.argmin(ratios) + 1 # for first row
return (row, col)
def get_bfs(prett):
columns = np.where(prett[0]==0)[0]
bfs = np.zeros(prett.shape[1]-1)
for col in columns:
row = np.argmax(prett[:,col])
val = prett[row, -1]
bfs[col-1] = val
return bfs
if __name__ == "__main__":
# 1. construct pretableau
# 2. row_echelon on basis columns
# 3. identify new basis column and pivot row position
prob=3
A = np.loadtxt(f'inputs/A{prob}.txt')
b = np.loadtxt(f'inputs/b{prob}.txt')
c = np.loadtxt(f'inputs/c{prob}.txt')
pivots = np.loadtxt(f'inputs/pivots{prob}.txt')
pivots = [(i+1, int(p)) for i, p in enumerate(pivots)]
prett = construct_tableau(A, b, c)
prett = row_echelon(prett, pivots)
while len(np.where(prett[0][1:-1]<=0)[0])!=A.shape[1]:
new_piv = find_pivot(prett)
prett = row_echelon(prett, [new_piv])
bfs = get_bfs(prett)
objv = prett[0, -1]
print("bfs:", bfs)
print("objv:", objv)
|
999,890 | 8ed814b4c13f55deb3a725b98761e5a813e32396 | #!/usr/bin/env python
import matplotlib
matplotlib.use("Agg")
import threading
import matplotlib.pyplot as plt
from scipy.spatial import cKDTree as KDTree
import tf
import rospy
from message_filters import ApproximateTimeSynchronizer
from message_filters import Cache, Subscriber
from visualization_msgs.msg import Marker
from geometry_msgs.msg import PoseWithCovarianceStamped
from nav_msgs.msg import Odometry, Path, OccupancyGrid
from sensor_msgs.msg import PointCloud2
# For exploration services
from bruce_msgs.srv import PredictSLAMUpdate, PredictSLAMUpdateResponse
from bruce_msgs.msg import ISAM2Update
from bruce_msgs.srv import GetOccupancyMap, GetOccupancyMapRequest
from bruce_slam.utils.io import *
from bruce_slam.utils.conversions import *
from bruce_slam.utils.visualization import *
from bruce_slam.slam import SLAM, Keyframe
from bruce_slam import pcl
class SLAMNode(SLAM):
def __init__(self):
super(SLAMNode, self).__init__()
self.enable_slam = True
self.pz_samples = 30
self.pz_detection_rate = 0.5
self.lock = threading.RLock()
def init_node(self, ns="~"):
self.keyframe_duration = rospy.get_param(ns + "keyframe_duration")
self.keyframe_duration = rospy.Duration(self.keyframe_duration)
self.keyframe_translation = rospy.get_param(ns + "keyframe_translation")
self.keyframe_rotation = rospy.get_param(ns + "keyframe_rotation")
self.enable_slam = rospy.get_param(ns + "enable_slam", True)
self.prior_sigmas = rospy.get_param(ns + "prior_sigmas")
self.odom_sigmas = rospy.get_param(ns + "odom_sigmas")
self.icp_odom_sigmas = rospy.get_param(ns + "icp_odom_sigmas")
self.point_resolution = rospy.get_param(ns + "point_resolution")
self.ssm_params.min_points = rospy.get_param(ns + "ssm/min_points")
self.ssm_params.max_translation = rospy.get_param(ns + "ssm/max_translation")
self.ssm_params.max_rotation = rospy.get_param(ns + "ssm/max_rotation")
self.ssm_params.target_frames = rospy.get_param(ns + "ssm/target_frames")
self.nssm_params.min_st_sep = rospy.get_param(ns + "nssm/min_st_sep")
self.nssm_params.min_points = rospy.get_param(ns + "nssm/min_points")
self.nssm_params.max_translation = rospy.get_param(ns + "nssm/max_translation")
self.nssm_params.max_rotation = rospy.get_param(ns + "nssm/max_rotation")
self.nssm_params.source_frames = rospy.get_param(ns + "nssm/source_frames")
self.nssm_params.cov_samples = rospy.get_param(ns + "nssm/cov_samples")
self.pz_samples = rospy.get_param(ns + "pz_samples")
self.pz_detection_rate = rospy.get_param(ns + "pz_detection_rate")
self.pcm_queue_size = rospy.get_param(ns + "pcm_queue_size")
self.min_pcm = rospy.get_param(ns + "min_pcm")
self.feature_odom_sync_max_delay = 0.5
self.sonar_sub = rospy.Subscriber(
SONAR_TOPIC, OculusPing, self.sonar_callback, queue_size=1
)
self.feature_sub = Subscriber(SONAR_FEATURE_TOPIC, PointCloud2)
self.odom_sub = Subscriber(LOCALIZATION_ODOM_TOPIC, Odometry)
self.fo_ts = ApproximateTimeSynchronizer(
[self.feature_sub, self.odom_sub], 20, self.feature_odom_sync_max_delay
)
self.fo_ts.registerCallback(self.fo_callback)
# self.occ_sub = rospy.Subscriber(
# MAPPING_OCCUPANCY_TOPIC, OccupancyGrid, self.occ_callback, queue_size=1
# )
self.pose_pub = rospy.Publisher(
SLAM_POSE_TOPIC, PoseWithCovarianceStamped, queue_size=10
)
self.odom_pub = rospy.Publisher(SLAM_ODOM_TOPIC, Odometry, queue_size=10)
self.traj_pub = rospy.Publisher(
SLAM_TRAJ_TOPIC, PointCloud2, queue_size=1, latch=True
)
self.constraint_pub = rospy.Publisher(
SLAM_CONSTRAINT_TOPIC, Marker, queue_size=1, latch=True
)
self.cloud_pub = rospy.Publisher(
SLAM_CLOUD_TOPIC, PointCloud2, queue_size=1, latch=True
)
self.slam_update_pub = rospy.Publisher(
SLAM_ISAM2_TOPIC, ISAM2Update, queue_size=5, latch=True
)
self.tf = tf.TransformBroadcaster()
self.predict_slam_update_srv = rospy.Service(
SLAM_PREDICT_SLAM_UPDATE_SERVICE,
PredictSLAMUpdate,
self.predict_slam_update_handler,
)
self.get_map_client = rospy.ServiceProxy(
MAPPING_GET_MAP_SERVICE, GetOccupancyMap
)
# self.get_map_client.wait_for_service()
icp_config = rospy.get_param(ns + "icp_config")
self.icp.loadFromYaml(icp_config)
self.configure()
loginfo("SLAM node is initialized")
def get_common_factors(self, req_key):
"""
Get common factors and initials for path candidates
since last slam update.
"""
common_graph = gtsam.NonlinearFactorGraph()
common_values = gtsam.Values()
graph = self.isam.getFactorsUnsafe()
for i in range(graph.size()):
factor = graph.at(i)
keys = factor.keys()
if keys.size() != 2:
continue
key1, key2 = keys.at(0), keys.at(1)
if gtsam.symbolIndex(key1) > req_key or gtsam.symbolIndex(key2) > req_key:
common_graph.add(factor)
if gtsam.symbolIndex(key1) > req_key and not common_values.exists(key1):
common_values.insert(key1, self.isam.calculateEstimatePose2(key1))
if gtsam.symbolIndex(key2) > req_key and not common_values.exists(key2):
common_values.insert(key2, self.isam.calculateEstimatePose2(key2))
return common_graph, common_values
def prune_path(self, path):
"""
Search for the nearest pose in path to current pose.
The plan will not start from the beginning.
"""
while len(path.poses) >= 2:
pose0 = pose322(r2g(path.poses[0].pose))
pose1 = pose322(r2g(path.poses[1].pose))
d0 = np.linalg.norm(g2n(pose0.between(self.current_frame.pose)))
d1 = np.linalg.norm(g2n(pose1.between(self.current_frame.pose)))
if d1 < d0:
path.poses.pop(0)
else:
break
return path
def get_keyframes(self, path):
current_key = self.current_key
current_pose = self.current_keyframe.pose
current_cov = self.current_keyframe.cov
for pose_msg in path.poses:
pose = pose322(r2g(pose_msg.pose))
odom = current_pose.between(pose)
translation = odom.translation().norm()
rotation = abs(odom.theta())
if (
translation > self.keyframe_translation
or rotation > self.keyframe_rotation
):
cov = self.propagate_covariance(
current_pose, current_cov, pose, self.odom_sigmas
)
# Return keyframe information
yield (
current_key, # key
odom, # odometry from previous pose
cov, # predicted covariance
pose, # pose
pose_msg, # ROS pose msg
)
else:
continue
current_key += 1
current_pose = pose
current_cov = cov
###################################################################
def predict_measurements(self, pose, map_tree):
# Find points that the robot can observe at pose
# First search for points within max range
center = np.array([pose.x(), pose.y()])
idx = map_tree.query_ball_point(center, self.oculus.max_range, eps=0.1)
if len(idx) == 0:
return []
idx = np.array(idx)
# Second search for points within horizontal aperture
points_in_FOV = map_tree.data[idx] - center
bearings = np.arctan2(points_in_FOV[:, 1], points_in_FOV[:, 0]) - pose.theta()
bearings = np.arctan2(np.sin(bearings), np.cos(bearings))
sel = np.abs(bearings) < self.oculus.horizontal_aperture / 2
return idx[sel]
def get_measurement_probability(self, pose, cov, map_tree):
return self.pz_detection_rate
success = 1
for _ in range(self.pz_samples):
s = self.sample_pose(pose, cov)
idx = self.predict_measurements(s, map_tree)
if len(idx) > self.nssm_params.min_points / self.pz_detection_rate:
success += 1
return success / (1.0 + self.pz_samples)
def predict_sm(
self, current_key, odom, pose, cov, map_tree, map_keys, graph, values
):
idx = self.predict_measurements(pose, map_tree)
# Use icp_model if the robot can observe enough points
if len(idx) > self.ssm_params.min_points / self.pz_detection_rate:
model = self.icp_odom_model
else:
model = self.odom_model
factor = gtsam.BetweenFactorPose2(
X(current_key - 1), X(current_key), odom, model
)
graph.add(factor)
values.insert(X(current_key), pose)
# Check if nonsequential factor exists
if len(idx) < self.nssm_params.min_points / self.pz_detection_rate:
return
keys, counts = np.unique(np.int32(map_keys[idx]), return_counts=True)
# Find target key
matched = np.argmax(counts)
matched_key = keys[matched]
if current_key - matched_key > self.nssm_params.min_st_sep:
pose1 = self.keyframes[matched_key].pose
odom = pose1.between(pose)
# Scale noise model based on the probability of
# obtaining the measurements
prob = self.get_measurement_probability(pose, cov, map_tree)
factor = gtsam.BetweenFactorPose2(
X(matched_key), X(current_key), odom, self.scale_icp_odom_model(prob)
)
graph.add(factor)
###################################################################
def propagate_covariance(self, pose1, cov1, pose2, sigmas):
"""
H = [R2^T*R1 rot(-90)*R2^T*(t1 - t2)]
[ 0 1 ]
P' = H*P*H^T + Q (local)
"""
H = np.identity(3, np.float32)
R1, t1 = pose1.rotation().matrix(), pose1.translation().vector()
R2, t2 = pose2.rotation().matrix(), pose2.translation().vector()
H[:2, :2] = R2.T.dot(R1)
H[:2, 2] = np.array([[0, 1], [-1, 0]]).dot(R2.T).dot(t1 - t2)
cov2 = H.dot(cov1).dot(H.T) + np.diag(sigmas) ** 2
return cov2
def scale_icp_odom_model(self, prob):
"""
p * d^T * R^{-1} * d = d^T * (1/p * R)^{-1} * d
"""
return self.create_noise_model(
np.sqrt(1.0 / prob) * np.array(self.icp_odom_sigmas)
)
def predict_slam_update_handler(self, req):
resp = PredictSLAMUpdateResponse()
with self.lock:
resp.keyframes, resp.isam2_updates = self.predict_slam_update(
req.key, req.paths, req.return_isam2_update
)
return resp
def predict_slam_update(self, key, paths, return_isam2_update):
isam2_updates = []
path_keyframes = []
if return_isam2_update:
common_graph, common_values = self.get_common_factors(key)
# Points and tree are in global frame
map_points, map_keys = self.get_points(return_keys=True)
map_tree = KDTree(map_points)
for path in paths:
path = self.prune_path(path)
if return_isam2_update:
new_graph = gtsam.NonlinearFactorGraph(common_graph)
new_values = gtsam.Values(common_values)
# Add keyframes from the plan
keyframes = Path()
cov = self.current_keyframe.cov
for current_key, odom, cov, pose, pose_msg in self.get_keyframes(path):
keyframes.poses.append(pose_msg)
if return_isam2_update:
self.predict_sm(
current_key,
odom,
pose,
cov,
map_tree,
map_keys,
new_graph,
new_values,
)
if return_isam2_update:
isam2_update = ISAM2Update()
isam2_update.graph = gtsam.serializeNonlinearFactorGraph(new_graph)
isam2_update.values = gtsam.serializeValues(new_values)
isam2_updates.append(isam2_update)
path_keyframes.append(keyframes)
return path_keyframes, isam2_updates
@add_lock
def sonar_callback(self, ping):
"""
Subscribe once to configure Oculus property.
Assume sonar configuration doesn't change much.
"""
self.oculus.configure(ping)
self.sonar_sub.unregister()
# @add_lock
# def occ_callback(self, occ_msg):
# x0 = occ_msg.info.origin.position.x
# y0 = occ_msg.info.origin.position.y
# width = occ_msg.info.width
# height = occ_msg.info.height
# resolution = occ_msg.info.resolution
# occ_arr = np.array(occ_msg.data).reshape(height, width)
# occ_arr[occ_arr < 0] = 50
# occ_arr = occ_arr / 100.0
# self.occ = x0, y0, resolution, occ_arr
def get_map(self, frames, resolution=None):
"""
Get map from map server in mapping node.
TODO It's better to integrate mapping into SLAM node
"""
self.lock.acquire()
req = GetOccupancyMapRequest()
req.frames = frames
req.resolution = -1 if resolution is None else resolution
try:
resp = self.get_map_client.call(req)
except rospy.ServiceException as e:
logerror("Failed to call get_map service {}".format(e))
raise
x0 = resp.occ.info.origin.position.x
y0 = resp.occ.info.origin.position.y
width = resp.occ.info.width
height = resp.occ.info.height
resolution = resp.occ.info.resolution
occ_arr = np.array(resp.occ.data).reshape(height, width)
occ_arr[occ_arr < 0] = 50
occ_arr = np.clip(occ_arr / 100.0, 0.0, 1.0)
self.lock.release()
return x0, y0, resolution, occ_arr
@add_lock
def fo_callback(self, feature_msg, odom_msg):
self.lock.acquire()
time = feature_msg.header.stamp
dr_pose3 = r2g(odom_msg.pose.pose)
frame = Keyframe(False, time, dr_pose3)
points = r2n(feature_msg)[:, :2].astype(np.float32)
if len(points) and np.isnan(points[0, 0]):
# In case feature extraction is skipped in this frame.
frame.status = False
else:
frame.status = self.is_keyframe(frame)
frame.twist = odom_msg.twist.twist
if self.keyframes:
dr_odom = self.current_keyframe.dr_pose.between(frame.dr_pose)
pose = self.current_keyframe.pose.compose(dr_odom)
frame.update(pose)
if frame.status:
frame.points = points
if not self.keyframes:
self.add_prior(frame)
else:
self.add_sequential_scan_matching(frame)
self.update_factor_graph(frame)
if self.enable_slam and self.add_nonsequential_scan_matching():
self.update_factor_graph()
self.current_frame = frame
self.publish_all()
self.lock.release()
def publish_all(self):
if not self.keyframes:
return
self.publish_pose()
if self.current_frame.status:
self.publish_trajectory()
self.publish_constraint()
self.publish_point_cloud()
self.publish_slam_update()
def publish_pose(self):
"""
Append dead reckoning from Localization to SLAM estimate to achieve realtime TF.
"""
pose_msg = PoseWithCovarianceStamped()
pose_msg.header.stamp = self.current_frame.time
pose_msg.header.frame_id = "map"
pose_msg.pose.pose = g2r(self.current_frame.pose3)
cov = 1e-4 * np.identity(6, np.float32)
# FIXME Use cov in current_frame
cov[np.ix_((0, 1, 5), (0, 1, 5))] = self.current_keyframe.transf_cov
pose_msg.pose.covariance = cov.ravel().tolist()
self.pose_pub.publish(pose_msg)
o2m = self.current_frame.pose3.compose(self.current_frame.dr_pose3.inverse())
o2m = g2r(o2m)
p = o2m.position
q = o2m.orientation
self.tf.sendTransform(
(p.x, p.y, p.z),
[q.x, q.y, q.z, q.w],
self.current_frame.time,
"odom",
"map",
)
odom_msg = Odometry()
odom_msg.header = pose_msg.header
odom_msg.pose.pose = pose_msg.pose.pose
odom_msg.child_frame_id = "base_link"
odom_msg.twist.twist = self.current_frame.twist
self.odom_pub.publish(odom_msg)
def publish_constraint(self):
"""
Publish constraints between poses in the factor graph,
either sequential or non-sequential.
"""
links = []
for x, kf in enumerate(self.keyframes[1:], 1):
p1 = self.keyframes[x - 1].pose.x(), self.keyframes[x - 1].pose.y()
p2 = self.keyframes[x].pose.x(), self.keyframes[x].pose.y()
links.append((p1, p2, "green"))
for k, _ in self.keyframes[x].constraints:
p0 = self.keyframes[k].pose.x(), self.keyframes[k].pose.y()
links.append((p0, p2, "red"))
if links:
link_msg = ros_constraints(links)
link_msg.header.stamp = self.current_keyframe.time
self.constraint_pub.publish(link_msg)
def publish_trajectory(self):
"""
Publish 3D trajectory as point cloud in [x, y, z, roll, pitch, yaw, index] format.
"""
poses = np.array([g2n(kf.pose3) for kf in self.keyframes])
traj_msg = ros_colorline_trajectory(poses)
traj_msg.header.stamp = self.current_keyframe.time
traj_msg.header.frame_id = "map"
self.traj_pub.publish(traj_msg)
def publish_point_cloud(self):
"""
Publish downsampled 3D point cloud with z = 0.
The last column represents keyframe index at which the point is observed.
"""
all_points = [np.zeros((0, 2), np.float32)]
all_keys = []
for key in range(len(self.keyframes)):
pose = self.keyframes[key].pose
transf_points = self.keyframes[key].transf_points
all_points.append(transf_points)
all_keys.append(key * np.ones((len(transf_points), 1)))
all_points = np.concatenate(all_points)
all_keys = np.concatenate(all_keys)
sampled_points, sampled_keys = pcl.downsample(
all_points, all_keys, self.point_resolution
)
sampled_xyzi = np.c_[sampled_points, np.zeros_like(sampled_keys), sampled_keys]
if len(sampled_xyzi) == 0:
return
if self.save_fig:
plt.figure()
plt.scatter(
sampled_xyzi[:, 0], sampled_xyzi[:, 1], c=sampled_xyzi[:, 3], s=1
)
plt.axis("equal")
plt.gca().invert_yaxis()
plt.savefig("step-{}-map.png".format(self.current_key - 1), dpi=100)
plt.close("all")
cloud_msg = n2r(sampled_xyzi, "PointCloudXYZI")
cloud_msg.header.stamp = self.current_keyframe.time
cloud_msg.header.frame_id = "map"
self.cloud_pub.publish(cloud_msg)
def publish_slam_update(self):
"""
Publish the entire ISAM2 instance for exploration server.
So BayesTree isn't built from scratch.
"""
update_msg = ISAM2Update()
update_msg.header.stamp = self.current_keyframe.time
update_msg.key = self.current_key - 1
update_msg.isam2 = gtsam.serializeISAM2(self.isam)
self.slam_update_pub.publish(update_msg)
def offline(args):
from rosgraph_msgs.msg import Clock
from localization_node import LocalizationNode
from feature_extraction_node import FeatureExtractionNode
from mapping_node import MappingNode
from bruce_slam.utils import io
io.offline = True
node.save_fig = False
node.save_data = False
loc_node = LocalizationNode()
loc_node.init_node(SLAM_NS + "localization/")
fe_node = FeatureExtractionNode()
fe_node.init_node(SLAM_NS + "feature_extraction/")
mp_node = MappingNode()
mp_node.init_node(SLAM_NS + "mapping/")
clock_pub = rospy.Publisher("/clock", Clock, queue_size=100)
for topic, msg in read_bag(args.file, args.start, args.duration, progress=True):
while not rospy.is_shutdown():
if callback_lock_event.wait(1.0):
break
if rospy.is_shutdown():
break
if topic == IMU_TOPIC:
loc_node.imu_sub.callback(msg)
elif topic == DVL_TOPIC:
loc_node.dvl_sub.callback(msg)
elif topic == DEPTH_TOPIC:
loc_node.depth_sub.callback(msg)
elif topic == SONAR_TOPIC:
fe_node.sonar_sub.callback(msg)
if node.sonar_sub.callback:
node.sonar_sub.callback(msg)
mp_node.sonar_sub.callback(msg)
clock_pub.publish(Clock(msg.header.stamp))
# Publish map to world so we can visualize all in a z-down frame in rviz.
node.tf.sendTransform((0, 0, 0), [1, 0, 0, 0], msg.header.stamp, "map", "world")
# Save trajectory and point cloud
import os
import datetime
stamp = datetime.datetime.now()
name = os.path.basename(args.file).split(".")[0]
np.savez(
"run-{}@{}.npz".format(name, stamp),
poses=node.get_states(),
points=np.c_[node.get_points(return_keys=True)],
)
if __name__ == "__main__":
rospy.init_node("slam", log_level=rospy.INFO)
node = SLAMNode()
node.init_node()
args, _ = common_parser().parse_known_args()
if not args.file:
loginfo("Start online slam...")
rospy.spin()
else:
loginfo("Start offline slam...")
offline(args)
|
999,891 | 798b3016ea39031b489db417077bb2340d5a0a0f | '''
Akond Rahman
Nov 21 2018
Content Grabber for Chef Analysis
'''
import os
import shutil
def getFileLines(file_para):
file_lines = []
with open(file_para, 'rU') as log_fil:
file_str = log_fil.read()
file_lines = file_str.split('\n')
file_lines = [x_ for x_ in file_lines if x_ != '\n']
return file_lines
def grabContent(tot_fil_lis):
tot_fil_cnt = len(tot_fil_lis)
print 'Total files to analyze:', tot_fil_cnt
print '-'*100
file_counter = 0
for file_content_tup in tot_fil_lis:
file_content , file_name = file_content_tup
file_counter += 1
print '='*25 + ':'*3 + str(file_counter) + ':'*3 + 'START!' + '='*25
print file_name
print '*'*10
print file_content
print '*'*10
print 'DECISION===>:'
print '*'*10
print '='*25 + ':'*3 + str(file_counter) + ':'*3 + 'END!!!' + '='*25
print '-'*100
def getFileContent(path_f):
data_ = ''
with open(path_f, 'rU') as myfile:
data_ = myfile.read()
return data_
def getLegitFiles(dir_par, typ):
f_ = []
for root_, dirs, files_ in os.walk(dir_par):
for file_ in files_:
full_p_file = os.path.join(root_, file_)
if(os.path.exists(full_p_file)):
if typ == 'CHEF':
if (('cookbooks' in full_p_file) and (full_p_file.endswith('.rb'))):
file_content = getFileContent(full_p_file)
f_.append((file_content, full_p_file))
else:
if (('playbook' in full_p_file) or ('role' in full_p_file) or ('inventor' in full_p_file) ) and ( full_p_file.endswith('.yml') or full_p_file.endswith('.yaml')):
file_content = getFileContent(full_p_file)
f_.append((file_content, full_p_file))
return f_
if __name__=='__main__':
# repo_list = ['cookbook-openstack-compute', 'cookbook-openstack-common', 'cookbook-openstack-network',
# 'cookbook-openstack-block-storage', 'cookbook-openstack-dashboard', 'cookbook-openstack-identity',
# 'cookbook-openstack-image', 'cookbook-openstack-telemetry', 'cookbook-openstack-orchestration',
# 'cookbook-openstack-ops-database', 'compass-adapters',
# ]
# the_root_dir = '/Users/akond/SECU_REPOS/ostk-chef/'
repo_list = ['openstack-ansible', 'ironic-lib', 'tripleo-quickstart', 'networking-ovn', 'bifrost',
'puppet-openstack-integration', 'openstack-ansible-ops', 'browbeat', 'monasca-vagrant',
'fuel-ccp-installer', 'ansible-role-tripleo-modify-image', 'ansible-role-container-registry',
'ansible-role-python_venv_build', 'ansible-role-systemd_mount', 'ansible-role-systemd_networkd',
'windmill'
]
the_root_dir = '/Users/akond/SECU_REPOS/ostk-ansi/'
all_file_lis = []
repo_file = the_root_dir + 'all.openstack.repos.txt'
repo_dirs = getFileLines(repo_file)
for repo_ in repo_dirs:
repo_ = repo_.replace(' ', '')
full_dir_p = the_root_dir + repo_ + '/'
if (os.path.exists(full_dir_p)):
if repo_ not in repo_list:
shutil.rmtree( full_dir_p )
else:
# _files = getLegitFiles(full_dir_p, 'CHEF')
_files = getLegitFiles(full_dir_p, 'ANSIBLE')
for f_ in _files:
all_file_lis.append(f_)
grabContent(all_file_lis)
|
999,892 | 3373e903201fb545dbc7b1b55572572982380b20 | from BeautifulReport import BeautifulReport
import time
import unittest
discover=unittest.defaultTestLoader.discover("../case","test_case.py")
BeautifulReport(discover).report(
description=u'自动化测试报告',
filename=time.strftime("%Y-%m-%d %H_%M_%S")
) |
999,893 | 053b6fbe91b21179f49cbf2750679447e6186e46 | # -*- coding: utf-8 -*-
import scrapy
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.remote.remote_connection import LOGGER
import logging
import time
class TgsSpider(scrapy.Spider):
name = 'tgs'
allowed_domains = ['ogbuch.tg.ch']
start_urls = ['http://ogbuch.tg.ch']
def __init__(self):
scrapy.Spider.__init__(self)
self.driver = webdriver.PhantomJS(executable_path='/usr/local/bin/phantomjs')
LOGGER.setLevel(logging.WARNING)
def parse(self, response):
self.driver.get(response.url)
element = WebDriverWait(self.driver, 10).until(
EC.presence_of_element_located((By.XPATH, "//frame[@name='menuFrame']"))
)
self.driver.switch_to.frame(element)
# links = self.driver.find_elements_by_xpath('//a[contains(@href, "parent.aufzu")]')
# links = self.driver.find_elements_by_xpath('//a[(contains(@href, "parent.aufzu")) and not (contains(@href, "register"))]')
# links = self.driver.find_elements_by_xpath('//body/font/nobr/a and .//img[contains(@alt, "RBOG")]')
links = self.driver.find_elements_by_xpath('//body/font/nobr/a')
# self.driver.save_screenshot("weiter.png")
for link in links:
url = link.get_attribute('href')
ref = link.get_attribute('text')
# proper_url = response.urljoin(url)
# print proper_url
print url
print ref
link.click()
time.sleep(2)
self.driver.save_screenshot("weiter0.png")
yield scrapy.Request(url=url, callback=self.entscheide)
def entscheide(self, response):
self.driver.save_screenshot("weiter2.png")
self.__init__()
self.driver.get(response.url)
response.url.click()
self.driver.save_screenshot("weiter3.png")
unstu = self.driver.find_elements_by_xpath('//a[containts(text(), "html")]')
for nr in unstu:
u2 = nr.get_attribute('href')
print u2
# next_rbogs = self.driver.find_elements_by_xpath('')
# for ent in next_rbogs:
# link = ent.get_attribute('href')
# print link
class decision(scrapy.Item):
url = scrapy.Field()
file_urls = scrapy.Field()
files = scrapy.Field()
referenz = scrapy.Field()
|
999,894 | 099dd0d803946e1d053ed7b0aa8664bb3b098a0c | from ._leap import *
from ._leapros import *
|
999,895 | e3b1d264aca1460d021adb1aa80928da8c4b0f4f | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('bluefire/<str:color>/', views.picker, name='bluefire')
] |
999,896 | 303dda4eda54e4940a392cb0e8103d1deb5449de | x = int(input())
z = int(input())
if z <= x:
while z <= x:
z = int(input())
if z > x:
k = 0
soma = 0
while True:
soma+= x+k
k += 1
if soma > z:
print(k)
break
else:
print("0") |
999,897 | a42d9ba989db123e703e35d8d02ce6e78878bb23 | import asyncio
class AsyncHTTPHandler:
'''
This is just to mimic the socketserver.TCPServer functionality
'''
def __init__(self, writer):
self.wfile = writer
def send_response(self, status):
self.wfile.write(f'HTTP/1.1 {status} OK\r\n'.encode())
def send_header(self, header, value):
self.wfile.write(f'{header}: {value}\r\n'.encode())
def end_headers(self):
self.wfile.write(b'\r\n\r\n')
def do_GET(self):
body = b'{"foo": "bar"}'
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.send_header('Content-Length', len(body))
self.end_headers()
self.wfile.write(body)
async def end_response(self):
await self.wfile.drain()
self.wfile.close()
await self.wfile.wait_closed()
async def handle(reader, writer):
handler = AsyncHTTPHandler(writer)
handler.do_GET()
await handler.end_response()
async def main():
server = await asyncio.start_server(handle, '0.0.0.0', 8888)
async with server:
await server.serve_forever()
asyncio.run(main())
|
999,898 | 3c1ab82d364f9d9ce83b3ca21ecc5efe2decc1c8 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re, time, json, logging, hashlib, base64, asyncio
from coroweb import get, post
def check_admin(request):
if request.__user__ is None or not request.__user__.admin:
raise APIPermissionError()
@get('/')
async def index(*, page='1'):
return {
'__template__': 'blogs.html',
'page': page,
'blogs': blogs
}
|
999,899 | 2587d3f629f6107070698e02067ee8d12a51c61c | import os
from datetime import datetime
mod_time = os.stat('destination.loc.txt').st_mtime
print(datetime.fromtimestamp(mod_time)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.