blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0038e8bb52dc6ebb2349f86f36abea352f53deb8 | f92a525a2c91b12d0bd710ce40e44312324a5bd4 | /code/sandpit/echostate.py | 958c2e482c3ec2ee82add7842fad429151f5f54f | [] | no_license | dhuppenkothen/BlackHoleMagic | 735f71ef9120157a912d0d7b78a5fbbc37bcb36b | 17f353fe3fc9eaa5a2083b578ce141a4fedbc80e | refs/heads/master | 2021-01-17T18:08:44.507248 | 2016-11-22T21:03:44 | 2016-11-22T21:03:44 | 27,083,555 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,448 | py | ### This is an attempt at making an echo state network work"
## Some naming conventions (following Giannotis+ 2015)
## y: time series points at time t
## t: times at which the time series is measured
## N: number of hidden units
## v: input weights, of dimension Nx1
## x: hidden units, of length N (could be changed?)
## u: weights of the reservoir units (vector of shape NxN)
## w: readout weights, vector of shape NX1
import numpy as np
import scipy.stats
def sigmoid(x):
return 1./(1.+np.exp(-x))
class EchoStateNetwork(object):
def __init__(self, x,y, N, a, r, b=None,topology="scr"):
"""
Initialization for the echo state network for time series.
:param x: x-coordinate (time bins)
:param y: data (K by D, where K is the number of data points, D the dimensionality)
:param N: number of hidden units.
:param a: absolute value of input weights
:param r: weights for forward connections between hidden weights
:param b: weights for backward connections between hidden weights (for topology="dlrb" only)
:param topology: reservoir topology (one of "SCR", "DLR", "DLRB",
see Rodan+Tino, "Minimum Complexity Echo State Network" for details
:return:
"""
## x-coordinate
self.x = x
## y-coordiante
self.y = y
print("shape of data stream: " + str(self.y.shape))
## number of data points
self.K = self.y.shape[0]
print("Number of data points: %i"%self.K)
## number of dimensions
if len(self.y.shape) > 1:
self.D = self.y.shape[1]
else:
self.D = 1
print("Dimensionality of the data: %i"%self.D)
## number of hidden units
self.N = N
print("Number of hidden units: %i"%self.N)
## output weight matrix
self.ww = np.zeros((self.N, self.D))
## reservoir topology and associated parameters
self.topology = topology
self.r = r
self.b = b
## input unit weights
self.a = a
## initialize input weights
self.vv = self._initialize_input_weights(self.a)
## initialize hidden weights
self.uu = self._initialize_hidden_weights(self.r,self.b,self.topology)
def _initialize_input_weights(self, a):
"""
Initialize input weights.
Input layer fully connected to reservoir, weights have same absolute value a,
but signs randomly flipped for each weight.
:param a: weight value
:return: vv = input layer weights
"""
## probability for the Bernoulli trials
pr = 0.5
## initialize weight matrix with Bernoulli distribution
vv = scipy.stats.bernoulli.rvs(pr, size=(self.N, self.D)).astype(np.float64)
## populate with actual weights
vv[vv == 0.] = -a
vv[vv == 1.] = a
return vv
def _initialize_hidden_weights(self,r, b=None, type="scr"):
"""
Initialize the weights for the connections between the
hidden units.
Options for the typology of the reservoir are:
- SCR (Simple Cycle Reservoir): units organised in a a cycle
- DLR (Delay Line Reservoir): units organised in a line
- DLRB (Delay Line Reservoice /w backward connection)
:param r: weight value
:param b: weight value for backward connections (for DLRB)
:param type: string; "scr", "dlr", "dlrb", "esn"
:return: uu = hidden weights
"""
## if we're using a DLRB topology, b needs to have a value!
if type == "dlrb":
assert(b is not None)
## initialize the array to store the weights in
uu = np.zeros((self.N, self.N))
## all three topologies have the lower subdiagonal filled
for i in xrange(self.N):
for j in xrange(self.N):
if i == j+1:
uu[i,j] = r
## if DLRB topology, fill upper subdiagonal with backwards connections
if type == "dlrb":
for i in xrange(self.N):
for j in xrange(self.N):
if i+1 == j:
uu[i,j] = b
## if SCR, fill the connection between the last and the first node
## to make the line into a circle
if type == "scr":
uu[0,-1] = r
return uu
| [
"daniela.huppenkothen@nyu.edu"
] | daniela.huppenkothen@nyu.edu |
095083917773966edc0c2b1a6e709c702b1053f9 | 3469703501c75c7190c4d8c1f7529561df2448ba | /example/wallet/get_sub_account_transfer_history.py | 42c33b4acae9e169fd0df11390e8cdae8b91103b | [] | no_license | AbigalChulchill/binance-api-python | b5c745e98b5b4ab9873a7b4aaa8708bf9cf24496 | 544bbc152dd7accd0ac4f13faa7d254382c79e77 | refs/heads/master | 2022-04-20T18:03:31.889472 | 2019-12-20T14:21:12 | 2019-12-20T14:21:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | from binance import RequestClient
from binance.constant.test import *
from binance.base.printobject import *
request_client = RequestClient(api_key=g_api_key, secret_key=g_secret_key)
sub_account_transfer_history = request_client.get_sub_account_transfer_history(email = "123@test.com",
startTime = None, endTime = None, page = None, limit = None)
print("======= Query Sub-account Transfer History(For Master Account) =======")
PrintMix.print_data(sub_account_transfer_history)
print("======================================================================") | [
"heartbeat0415@yahoo.com"
] | heartbeat0415@yahoo.com |
599f1e8a2cfd0f500544975c8f34cebd3ba01126 | 3fdd9e2f5663c6b07420ff0047e20aa1d4dec0e9 | /uim/codec/writer/encoder/base.py | 4e8c0bc737fcc8fdaaf8c40c63558a5bc29fd7e2 | [
"Apache-2.0"
] | permissive | erdoukki/universal-ink-library | 39150814e1a11a5f15e7e66491784563e28e6de8 | 689ed90e09e912b8fc9ac249984df43a7b59aa59 | refs/heads/main | 2023-08-05T02:22:11.142863 | 2021-08-20T10:12:06 | 2021-08-20T10:12:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,034 | py | # -*- coding: utf-8 -*-
# Copyright © 2021 Wacom Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC
from typing import List
from uim.model.ink import InkModel
class Codec(ABC):
"""
Abstract codec encoder class.
"""
def encode(self, ink_model: InkModel, *args, **kwargs) -> bytes:
"""
Encodes Ink Model object the chosen file codec.
Parameters
----------
ink_model: `InkModel`
Universal Ink Model (memory model)
Returns
-------
content - bytes
File content encode in bytes UIM v3.1.0
"""
raise NotImplementedError
class CodecEncoder(Codec, ABC):
"""Abstract content parser for the different versions of the Universal Ink Model."""
@classmethod
def __encoding__(cls, data_list: List[float], precision: int, resolution: float = 1.,
ignore_first: bool = False) -> List[int]:
# Encoding
if len(data_list) == 0:
return []
factor: float = 10.0 ** precision
# Setting the data type
converted: List[int] = []
last: int = round(factor * (resolution * data_list[0]))
converted.append(last)
# Iterate items
for idx in range(1, len(data_list)):
v = round(factor * (resolution * data_list[idx]))
converted.append(v - last)
last = v
if ignore_first:
converted[0] = 0
return converted
| [
"github@wacom.com"
] | github@wacom.com |
50ed8118fb1bb5a1f76605c49a03b902855585eb | 519f0b5308941e693d4768101225b75dbd34a14b | /chemoinformatics/molecule_web_database/_settings.py | 29cdc61edc89e91159f5cf52ee400dc153c31ad9 | [
"MIT"
] | permissive | gorgitko/bioinformatics-chemoinformatics | a3a5502f6b4c8e311098ada2fe1794b2d49af86d | b780783abe8ee7ccda88f92eacf1232c2456d046 | refs/heads/master | 2021-01-17T14:45:41.452341 | 2016-10-19T18:26:16 | 2016-10-19T18:26:16 | 55,147,152 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,539 | py | """
Django settings for molinf project.
Generated by 'django-admin startproject' using Django 1.9.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'cp0#=@+xv16(4(^ni@@oep3n^l@@v@q86wu44m7mb(9xf!!401'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'moldb',
'django_rdkit'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'molinf.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'molinf.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'moldb',
'USER': 'moldb',
'PASSWORD': 'monitor',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
"""
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'moldb.sqlite3'),
}
}
"""
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = ''
STATIC_URL = '/static/'
STATICFILES_DIRS = ( os.path.join('static'), )
FILE_UPLOAD_TEMP_DIR = "static/temp/"
FILE_UPLOAD_HANDLERS = (#"django.core.files.uploadhandler.MemoryFileUploadHandler",
"django.core.files.uploadhandler.TemporaryFileUploadHandler",)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format' : "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt' : "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': 'moldb.log',
'formatter': 'verbose'
},
},
'loggers': {
'moldb': {
'handlers': ['file'],
'level': 'DEBUG',
},
}
}
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SESSION_SAVE_EVERY_REQUEST = True | [
"fg-42@seznam.cz"
] | fg-42@seznam.cz |
935e63a0c03212250310cdaa61a4e52029199dfb | 09d53368fcc2bb6deedeff7119385f1faeaf81df | /sentence_completion_by_HMM.py | 7199db4fee205e16c710190c9002bf927c567c8e | [] | no_license | Jverma/text-mining | e9372377526939919d743c5d9facc932d8bf0753 | 3af668076ae08dccad6368808bde080906e8edcf | refs/heads/master | 2020-06-08T05:32:07.358236 | 2013-12-03T04:17:28 | 2013-12-03T04:17:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,857 | py | """
A bigram langauge model for completing a sentence using Hidden Markov Methods.
Author : Janu Verma
http://www.math.ksu.edu/~jv291/
Twitter : @januverma
"""
import re
from collections import *
import random
import sys
########## Load the Data #########
"""
The data is a collections of text documents or a corpus of sentences.
We'll store the data as a list of text documents or a list of sentences.
"""
text = []
input = open(sys.argv[1])
for line in input:
text.append(line)
###############################################################
###### Word Tokenization #####################################
def word_tokens(text):
words = text.split()
words = [x.lower() for x in words]
formated_text = [re.sub('[^a-z]', '', word) for word in words]
return formated_text
#############################################################
######### Extracting Bigrams #######
def bigrams(text):
words = word_tokens(text)
n = len(words)
bigrams = []
i = 0
while (i < n-1):
bigram = words[i], words[i + 1]
if (bigram != None):
bigrams.append(bigram)
i = i + 1
else:
i = i + 1
return bigrams
################################ Training
print "training....."
#################################################################
##### Compute all the words in the Training set ########
all_words = []
for line in text:
words = word_tokens(line)
all_words.extend(words)
################################################################
##### Compute the frequencies of occurences of possible transitions ###########
pair_counts = defaultdict(float)
for x in text:
for pairs in bigrams(x):
pair_counts[pairs] = pair_counts[pairs] + 1
#########################################################################
#### Count the total number of possible transactions #####
count_dict = {}
for x in all_words:
count = 0
for pair in pair_counts.keys():
if (pair[0] == x):
count = count + 1
count_dict[x] = count
#####################################################################
##### Compute the transition probabilities #########
transition_prob = defaultdict(float)
for pair in pair_counts.keys():
transition_prob[pair] = pair_counts[pair]/count_dict[pair[0]]
######################################################
######### Compute the most probable next word ####
def next_word(x):
max_prob = 0.0
for pair in transition_prob.keys():
if (pair[0] == x):
if (transition_prob[pair] > max_prob):
most_probable = pair[1]
max_prob = transition_prob[pair]
if (max_prob != 0):
return most_probable
else:
return None
##########################################################################
######## Completion of a sentence ###############################
def complete_sentence(sentence, threshold):
words = word_tokens(sentence)
n = len(words)
last_word = words[n-1]
i = n
while (i < threshold): #and (last_word != None):
last_word = next_word(last_word)
words.append(last_word)
i = i + 1
return words
##################################################
###Validity of a distribution#####################
def distribution(x):
for pair in transition_prob.keys():
if pair[0] == x:
guess = random.random()
count = 0
if (guess <= transition_prob[pair]):
return pair[1]
guess -= transition_prob[pair]
assert False, "not a valid prob"
################################################################
######## Query ##############
q = sys.argv[1]
print complete_sentence(q, 5)
| [
"j.verma5@gmail.com"
] | j.verma5@gmail.com |
34040fa92ecd404a88488ce57826c60fb30b8f6a | b5e76345cc260960c032e690a9cc47099632af63 | /perceptron_example.py | 4a7d356d5b0990ef7d658e4633ea570d97e49e62 | [] | no_license | zgmartin/neural | 33213b5853faff1748ed5d28aad9d401c0e9428b | 300c97d948e2575729eedc3f28a97eca7f1cffc0 | refs/heads/master | 2020-04-16T00:17:18.052055 | 2015-05-04T17:35:24 | 2015-05-04T17:35:24 | 35,048,594 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 675 | py | """
Line perception test.
"""
import random
import neural
import numpy
from matplotlib import pyplot
def f(x):
return x
def is_above(x,y,f):
if y>f(x): return 1
else: return -1
test_set = []
for n in range(2000):
x = random.uniform(-10,10)
y = random.uniform(-10,10)
test_set.append([x,y,is_above(x,y,f)])
perception = neural.Perceptron(2)
print 'before:', perception
perception.training(test_set)
print 'after:', perception
"""
#plot
x = numpy.linspace(-10,10)
y = map(f,x)
pyplot.plot(x,y)
color = [random.random() for n in range(len(test_set))]
for test in test_set:
pyplot.scatter(test[0], test[1], c=color, alpha=.3)
pyplot.show()
""" | [
"zacharygilmartin@gmail.com"
] | zacharygilmartin@gmail.com |
c704cdb44596a02fbd9574844e661fb5a2ace221 | f5e46c48fc69b04abd3ef0d65f6a9d58273ae333 | /ClassifierTester.py | b218e5d4964f88a398e8c40c5df2afb254d947fd | [] | no_license | karthikmit/toy-chatbot | 3bdb330004bd5c74d7cde3c81cc9eea9224234f0 | 0aa6ec7d643f44903a7f83e406e9b43019b1ff48 | refs/heads/master | 2021-05-07T08:07:33.870013 | 2017-11-03T12:05:47 | 2017-11-03T12:05:47 | 109,254,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,654 | py | #ClassifierTester.py
import Classifier
import WordEmbedding
classifier = Classifier.IntentClassifier(WordEmbedding.WordEmbedding("./model.txt"))
data={
"greet": {
"examples" : ["hello","hey there","howdy","hello","hi","hey","hey ho"]
},
"pnr_status_query": {
"examples":[
"What is my pnr status 2436014775",
"what is the pnr status of 2436014789",
"status of pnr 2436014889",
"Please tell my PNr status 2436014775",
"Status of my PNR 2436014775",
"PNR status for 2436014775",
"what is the status of Pnr 2436014775",
"Please tell me the status of my PNR 2436014775"
]
},
"train_status_query": {
"examples": [
"Where is the train 16236 now",
"show me the status of train #16236",
"when is the train 16236 expected at CRLM",
"where is the train 16236 now",
"tell me the train status, 16236",
"live status of train 16236",
"what is the current status of train 16236"
]
},
"flight_status_query": {
"examples": [
"Where is the flight fl1234 now",
"show me the status of flight #fl1235",
"when is the flight fl1236 expected at Bangalore international airport",
"Is the flight fl7896 on time",
"tell me the flight status, fl34556",
"live status of flight fl4567",
"what is the current status of flight fl16236"
]
}
}
classifier.train_examples(data);
for text in ["hi", "tell me the pnr status for 2436014234", "what is the status of train 16236", "show me the status of flight fl78967"]:
print "text : '{0}', predicted_label : '{1}'".format(text, classifier.get_intent(text))
| [
"Karthikeyan.Subbaraj@go-mmt.com"
] | Karthikeyan.Subbaraj@go-mmt.com |
86c61de680717bd8e991c6ec92cababcefa73682 | 285888aa3c2252e7ac61137e57d5ffd5c7fe50b2 | /bookMng/migrations/0020_delete_cart.py | 1f8605f7abadb1ccc55b916b11a52fb6ff65a650 | [] | no_license | JeffreyLum/bookEx | 7c1d4c8fcdaf09914dff579858ba862ef94d3e10 | 031c00bb5a58b86c8ed05fd4e8e04642ef724586 | refs/heads/main | 2023-04-20T08:32:39.263747 | 2021-05-14T03:56:32 | 2021-05-14T03:56:32 | 333,901,258 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | # Generated by Django 3.1.5 on 2021-05-04 20:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('bookMng', '0019_cart'),
]
operations = [
migrations.DeleteModel(
name='Cart',
),
]
| [
"jeffreylum81@gmail.com"
] | jeffreylum81@gmail.com |
4469aadcb30debab4a9987e742fc570b91900f7f | c2f8356fb3afdbbdfac5631b8d4df9ef0785e10f | /tap_appointment_plus/test/test_unit.py | c1638b196e51f9b83e4e0b1dcd13d5db782c0528 | [] | no_license | isabella232/tap-appointment-plus | f92c82ec06e5ba2162c87ff4c80cb4e48d14df8f | 2826a3ae3d3a26a5d768770a0a089c231dcd27bf | refs/heads/master | 2022-06-17T04:20:02.518208 | 2017-08-14T14:37:55 | 2017-08-14T14:37:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,298 | py | import unittest
import voluptuous
import tap_appointment_plus
import tap_appointment_plus.config
def generate_config():
return {
'site_id': 'abc',
'api_key': 'def',
'start_date': '2017-01-01T00:00:00Z',
'user_agent': 'test <test@fishtownanalytics.com>'
}
class TestUnit(unittest.TestCase):
def test__build_request(self):
config = generate_config()
self.assertEqual(
tap_appointment_plus.build_request(config),
{'data': {'response_type': 'json'},
'headers': {'Authorization': 'Basic YWJjOmRlZg==',
'User-Agent': 'test <test@fishtownanalytics.com>'}})
def test__validate_config(self):
valid_config = {
'site_id': 'abc',
'api_key': 'def',
'start_date': '2017-01-01T00:00:00Z',
'user_agent': 'testing <test@fishtownanalytics.com>'
}
tap_appointment_plus.config.validate(valid_config)
with self.assertRaises(voluptuous.error.Invalid):
tap_appointment_plus.config.validate({})
with self.assertRaises(voluptuous.error.Invalid):
invalid = valid_config.copy()
invalid['start_date'] = 100
tap_appointment_plus.config.validate(invalid)
| [
"cmcarthur@rjmetrics.com"
] | cmcarthur@rjmetrics.com |
6507617a5996b2599eeb66af63027b8ce92d7271 | eddd6e1ef8354f9a89bd9c63c27b6cb444bb98e9 | /tool/utils.py | ce589cc87812e68fddf75d59933620d797d9b352 | [] | no_license | chiekubo/FaceDetection1 | dbb917e8846fe72e60b48f31cec4e4b3a3ff141e | 42761188a526c9cbd50058ad3a7a582d8b682d18 | refs/heads/master | 2022-08-01T21:26:52.141932 | 2020-05-29T14:51:02 | 2020-05-29T14:51:02 | 267,883,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,036 | py | import numpy as np
def iou(box, boxes, isMin = False):
box_area = (box[2] - box[0]) * (box[3] - box[1])
area = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
xx1 = np.maximum(box[0], boxes[:, 0])
yy1 = np.maximum(box[1], boxes[:, 1])
xx2 = np.minimum(box[2], boxes[:, 2])
yy2 = np.minimum(box[3], boxes[:, 3])
w = np.maximum(0, xx2 - xx1)
h = np.maximum(0, yy2 - yy1)
inter = w * h
if isMin:
ovr = np.true_divide(inter, np.minimum(box_area, area))
else:
ovr = np.true_divide(inter, (box_area + area - inter))
return ovr
def nms(boxes, thresh=0.3, isMin = False):
if boxes.shape[0] == 0:
return np.array([])
_boxes = boxes[(-boxes[:, 4]).argsort()]
r_boxes = []
while _boxes.shape[0] > 1:
a_box = _boxes[0]
b_boxes = _boxes[1:]
r_boxes.append(a_box)
# print(iou(a_box, b_boxes))
index = np.where(iou(a_box, b_boxes,isMin) < thresh)
_boxes = b_boxes[index]
if _boxes.shape[0] > 0:
r_boxes.append(_boxes[0])
return np.stack(r_boxes)
def convert_to_square(bbox):
square_bbox = bbox.copy()
if bbox.shape[0] == 0:
return np.array([])
h = bbox[:, 3] - bbox[:, 1]
w = bbox[:, 2] - bbox[:, 0]
max_side = np.maximum(h, w)
square_bbox[:, 0] = bbox[:, 0] + w * 0.5 - max_side * 0.5
square_bbox[:, 1] = bbox[:, 1] + h * 0.5 - max_side * 0.5
square_bbox[:, 2] = square_bbox[:, 0] + max_side
square_bbox[:, 3] = square_bbox[:, 1] + max_side
return square_bbox
def prewhiten(x):
mean = np.mean(x)
std = np.std(x)
std_adj = np.maximum(std, 1.0/np.sqrt(x.size))
y = np.multiply(np.subtract(x, mean), 1/std_adj)
return y
if __name__ == '__main__':
# a = np.array([1,1,11,11])
# bs = np.array([[1,1,10,10],[11,11,20,20]])
# print(iou(a,bs))
bs = np.array([[1, 1, 10, 10, 40], [1, 1, 9, 9, 10], [9, 8, 13, 20, 15], [6, 11, 18, 17, 13]])
# print(bs[:,3].argsort())
print(nms(bs))
| [
"807940470@qq.com"
] | 807940470@qq.com |
f84892a21094cad7f80bd823cb6494e6bc1bbe17 | 033beb1ecf6dd347a4fe608ef33a2a224776ef69 | /object_learn/object_save_one.py | 69c0aeb900ea1e1ff56dae771d60419ec3704a1b | [
"MIT"
] | permissive | MojaX2/vision_module | b9ab9a87af8b3e29a0cc84cb8dad05b998b56d7f | 607bbcc9a799c47ac2cd6d142d68d0d47a457e36 | refs/heads/master | 2020-04-01T19:36:14.039736 | 2018-10-24T15:42:25 | 2018-10-24T15:42:25 | 153,562,271 | 0 | 0 | null | 2018-10-18T04:10:57 | 2018-10-18T04:10:57 | null | UTF-8 | Python | false | false | 4,290 | py | import argparse
import os
import sys
import shutil
import rospy
import numpy as np
from PIL import Image
from tqdm import *
import cv2
from vision_module.msg import ObjectInfo
NUM_TO_SAVE = 1000
class SaveTrainImage():
def __init__(self, folder, num_to_save, image_id, obj_id):
self.save_id = int(image_id)
self.obj_id = obj_id
self.PATH = os.path.expanduser('~') + \
"/Documents/{}/{}/".format(folder, self.obj_id)
self.IMAGE_NUM = int(num_to_save)
self.firstID_flag = True
self.last_pos = None
self.stop = False
sub = rospy.Subscriber('/vision_module/object_detection_info',
ObjectInfo, self.callback)
self.make_dir()
def get_xy_position(self, message):
'''Get xy position of object with index of obj_index'''
x_pos = message.objects[0].camera.x
y_pos = message.objects[0].camera.y
xy_array = np.array((x_pos, y_pos))
return xy_array
def command_input(self, command_str):
'''Command to control the saving process'''
stop = False
while not stop:
ans = raw_input(command_str + "[y/n] ")
if ans == "n":
self.stop = True
sys.exit()
elif ans == "y":
stop = True
else:
print "Invalid answer! Try again...\n"
return stop
def make_dir(self):
if os.path.exists(self.PATH):
if os.path.isfile(self.PATH + "/" + "{0:03}".format(self.save_id) + ".jpg"):
stop = self.command_input("Files with this name already exist! Remove entire folder? ")
if stop:
shutil.rmtree(self.PATH)
if not os.path.exists(self.PATH):
os.makedirs(self.PATH)
return None
def save_image(self, message, index, img_name, mode="test"):
'''Convert uint8 string image to png image and save to PATH'''
width = message.objects[index].width
height = message.objects[index].height
uint8_data = np.fromstring(message.objects[index].bgr, dtype=np.uint8)
uint8_data = uint8_data.reshape((height, width, 3))
uint8_revert = cv2.cvtColor(uint8_data, cv2.COLOR_BGR2RGB)
img = Image.fromarray(uint8_revert, "RGB")
img.save(self.PATH + img_name + ".jpg")
if mode=="save":
self.save_id += 1
def callback(self, message):
if self.firstID_flag == True:
self.last_pos = self.get_xy_position(message)
# self.save_image(message, 0, "test")
if self.firstID_flag == False:
dist_to_last = []
for obj_index in range(len(message.objects)):
position = self.get_xy_position(message)
dist_to_last.append(np.linalg.norm(position - self.last_pos))
dist_to_last = np.array(dist_to_last)
if dist_to_last == None:
return
save_obj_index = np.argmin(dist_to_last)
self.save_image(message, save_obj_index, "{0:03}".format(self.save_id), "save")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Parameters setting')
parser.add_argument('-d', '--save_directory', default="TrainImage")
parser.add_argument('-n', "--save_image_numbers", default=NUM_TO_SAVE, required=False)
parser.add_argument('-id', "--beginning_image_id", default=0, required=False)
parser.add_argument('-o', "--object_index", required=True)
args = parser.parse_args()
rospy.init_node("save_object_image")
saveTrainImage = SaveTrainImage(args.save_directory,
args.save_image_numbers,
args.beginning_image_id,
args.object_index)
saveTrainImage.command_input("Setting object No {} OK? ".format(saveTrainImage.obj_id))
while not rospy.is_shutdown():
saveTrainImage.firstID_flag = False
print "Saving image No {}/{}".format(saveTrainImage.save_id, args.save_image_numbers)
sys.stdout.write("\033[F")
if saveTrainImage.save_id > (int(args.beginning_image_id) + saveTrainImage.IMAGE_NUM - 1):
sys.exit()
| [
"zawa0319@gmail.com"
] | zawa0319@gmail.com |
37b40e5f9f9835752e00832a3f9b1efc8166a281 | a8547f73463eef517b98d1085430732f442c856e | /numpy/core/tests/test_longdouble.py | 9f4b9eddeecd2c0ffee07713482ebfef2b9e5381 | [] | no_license | EnjoyLifeFund/macHighSierra-py36-pkgs | 63aece1b692225ee2fbb865200279d7ef88a1eca | 5668b5785296b314ea1321057420bcd077dba9ea | refs/heads/master | 2021-01-23T19:13:04.707152 | 2017-12-25T17:41:30 | 2017-12-25T17:41:30 | 102,808,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py | ../../../../../../Cellar/numpy/1.13.3/lib/python3.6/site-packages/numpy/core/tests/test_longdouble.py | [
"Raliclo@gmail.com"
] | Raliclo@gmail.com |
fcf943c9a12f68cdbce93984752dc88ce47547dc | 3fa4a77e75738d00835dcca1c47d4b99d371b2d8 | /backend/telegram/models/chats/admin_log_event/admin_log_event_action_toggle_admin.py | 8cc707026c5b98d6b7ea0b8a0ae05a57598b13b7 | [
"Apache-2.0"
] | permissive | appheap/social-media-analyzer | 1711f415fcd094bff94ac4f009a7a8546f53196f | 0f9da098bfb0b4f9eb38e0244aa3a168cf97d51c | refs/heads/master | 2023-06-24T02:13:45.150791 | 2021-07-22T07:32:40 | 2021-07-22T07:32:40 | 287,000,778 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,080 | py | from typing import Optional
from django.db import models, DatabaseError
from telegram import models as tg_models
from core.globals import logger
from ...base import BaseModel
class AdminLogEventActionToggleAdminQuerySet(models.QuerySet):
def update_or_create_action(self, **kwargs) -> Optional['AdminLogEventActionToggleAdmin']:
try:
return self.update_or_create(
**kwargs
)[0]
except DatabaseError as e:
logger.exception(e)
except Exception as e:
logger.exception(e)
return None
class AdminLogEventActionToggleAdminManager(models.Manager):
def get_queryset(self) -> AdminLogEventActionToggleAdminQuerySet:
return AdminLogEventActionToggleAdminQuerySet(self.model, using=self._db)
def update_or_create_action(
self,
*,
db_prev_chat_member: 'tg_models.ChatMember',
db_new_chat_member: 'tg_models.ChatMember',
) -> Optional['AdminLogEventActionToggleAdmin']:
if db_prev_chat_member is None or db_new_chat_member is None:
return None
return self.get_queryset().update_or_create_action(
**{
'prev_participant': db_prev_chat_member,
'new_participant': db_new_chat_member,
}
)
class AdminLogEventActionToggleAdmin(BaseModel):
"""
The admin rights of a user were changed
"""
prev_participant = models.OneToOneField(
'telegram.ChatMember',
on_delete=models.CASCADE,
null=True, blank=True,
related_name="action_toggle_admin_prev",
)
new_participant = models.OneToOneField(
'telegram.ChatMember',
on_delete=models.CASCADE,
null=True, blank=True,
related_name="action_toggle_admin_new",
)
###########################################
# `admin_log_event` : AdminLogEvent this action belongs to
objects = AdminLogEventActionToggleAdminManager()
class Meta:
verbose_name_plural = 'Events (toggle admin)'
| [
"taleb.zarhesh@gmail.com"
] | taleb.zarhesh@gmail.com |
7ec6988b240e9685e8a4607ea508bba1012032bf | ccbe4b62d42fc599adac870c1fdf67366f8bcbad | /lossFuncs.py | 9744b1643c551277375a5b622394e04d665e3a84 | [] | no_license | dogasiyli-boun/keyhandshapediscovery | 17b940c95cd2761fed2c98b9b84f4c4bcc96b19d | 782d66c5bc5213b6ebff3bf16cd14251251efa9b | refs/heads/master | 2023-04-21T10:19:15.898091 | 2021-04-25T21:39:03 | 2021-04-25T21:39:03 | 326,433,973 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | from keras import backend as K
def neg_l2_reg(activation):
return -weight_of_regularizer*K.mean(K.square(activation))
def neg_l2_reg2(activation, weight_of_regularizer):
return -weight_of_regularizer*(K.square(activation))
def penalized_loss(l2_val):
def loss(y_true, y_pred):
return K.mean(K.square(y_pred - y_true)) + K.mean(l2_val)
return loss
def only_sparsity_loss(l2_val):
def loss(y_true, y_pred):
return K.mean(K.square(y_pred - y_true))*0 + K.mean(l2_val)
return loss
| [
"dogasiyli@gmail.com"
] | dogasiyli@gmail.com |
f6e07d300711214276c1a08dc5305787c2e16965 | aacf985c4cd9d87822e0a2bd82cadc1da5e0d11c | /jyaang_robinliu106/old scripts/fetchHospital.py | 7d3f6f48fbd68aeba137c909aee16794a7385238 | [] | no_license | robinliu106/Project-2-jyaang_robinliu106 | 8d5d5fb49d3d825ee301687a056911b1c336f4ed | f443ea96599349ecbb4e676d430d2f0473a184df | refs/heads/master | 2020-12-24T11:37:21.013880 | 2016-11-14T01:45:43 | 2016-11-14T01:45:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,039 | py | #filename: fetchHospitals.py
import json
import urllib.request #request the JSON url
import datetime
import pymongo
#import dml
#from pymongo import MongoClient
#client = MongoClient('localhost',27017)
#set up mongodb connection
#client = dml.pymongo.MongoClient()
#repo = client.Response
#repo.authenticate("")
startTime = datetime.datetime.now()
url = "https://data.cityofboston.gov/api/views/46f7-2snz/rows.json?accessType=DOWNLOAD"
JSON_response = urllib.request.urlopen(url).read().decode("utf-8")
JSON_object = json.loads(JSON_response) #deserializes a str containing a JSON object to a list
#just for viewing as a string, we don't use this for anything...
#HospitalData = json.dumps(JSON_object, indent=2, sort_keys=True) #seriealize obj to a JSON formatted str
#print(JSON_object['data']);
#list of schools from JSON object
hospitalData = JSON_object['data']
hospitalList = []
for hospital in hospitalData:
hospitalList.append([hospital[8],hospital[-1]])
print(hospitalList[0])
endTime = datetime.datetime.now()
| [
"noreply@github.com"
] | noreply@github.com |
15b0120a6df7223e01d2f3afa3879e7993d63438 | 174f848b62fb2ea0a1605e1aab70085ffd27ce50 | /beginning/age.py | 0540185272594c22e444e0b66ab14903a4e2d11f | [] | no_license | unet-echelon/by_of_python_lesson | cd71bd3890d42d49cc128ec1730371bf1b64dbfa | c6c5c917414ac98b6dfb582dc06c26d31ea5b30c | refs/heads/master | 2021-07-11T07:16:20.243347 | 2020-09-14T12:39:13 | 2020-09-14T12:39:13 | 201,041,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | #!/usr/bin/env python3
age = 26
name = 'kernel'
print('Возраст {0} -- {1} лет'.format(name,age))
print('Почему {0} забаляэеться с этим Python?'.format(name)) | [
"aleguk@ukr.net"
] | aleguk@ukr.net |
82790cca6a6af7a174000f534c965d3708078922 | dc8cf6cdb7e41e049bcd962d25455a9761657a5f | /20mathFn.py | 98c7331cf54db3ab25e94dd7a82fdc95cf77b913 | [] | no_license | Shaheryarsaleem/Python-Demo | 88b13b4c7344eddbbf037360e9c5f5c8943fcb0a | 210ad599708415b90aeb27242c8c3bd461a28c14 | refs/heads/master | 2021-05-19T15:51:41.907176 | 2020-03-31T23:11:58 | 2020-03-31T23:11:58 | 251,951,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | from math import *
number = 4.6
print(floor(number))
print(ceil(number))
number = -3.8
print(round(number))
print(abs(number))
print(min(7,2.1))
print(max(7,2.1))
| [
"shaheryarsaleem1@gmail.com"
] | shaheryarsaleem1@gmail.com |
8c2567a4b784ce24de31ec9eec73b66c17516a2d | d07a63ddd41f88ddb31ae451bde35cdda29ee98e | /coordinate_transformation/functions/domain.py | 82089535df4494d64af0c46249aef981e9dafad7 | [] | no_license | krisjuune/pre-post-noisi | 81f9f845f0b1e7a77646550df2769e80d28e242a | ab04f2e35a96ae142d1cc77cd1afc55181e4f2a1 | refs/heads/master | 2023-02-21T08:02:15.732731 | 2020-05-27T11:43:34 | 2020-05-27T11:43:34 | 223,137,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,482 | py | from pathlib import Path
import netCDF4 as nc4
import numpy as np
import numpy.ma as ma
def find_nearest(array, value):
"""
Find index of the element in a one-dimensional array
nearest to the value specified.
"""
array = np.asarray(array)
indx = (np.abs(array - value)).argmin()
return indx
def truncate_domain(lat, lon, value, bounds):
"""
Input lat, lon, elevatin value of interest (topography, depth to Moho
etc.), and domain boundaries as an array of 4 elements, in the order:
max latitude, min latitude, max longitude, min longitude.
"""
#Find indices
indx_lat_max = find_nearest(lat, bounds[0])
indx_lat_min = find_nearest(lat, bounds[1])
indx_lon_max = find_nearest(lon, bounds[2])
indx_lon_min = find_nearest(lon, bounds[3])
#Truncate domain
if indx_lat_max >= indx_lat_min:
lat_domain = lat[indx_lat_min:indx_lat_max]
elif indx_lat_max <= indx_lat_min:
lat_domain = lat[indx_lat_max:indx_lat_min]
if indx_lon_max >= indx_lon_min:
lon_domain = lon[indx_lon_min:indx_lon_max]
elif indx_lon_max <= indx_lon_min:
lon_domain = lon[indx_lon_max:indx_lon_min]
n = len(value)
if n == len(lat):
if indx_lat_max >= indx_lat_min:
if indx_lon_max >= indx_lon_min:
value_domain = value[indx_lat_min:indx_lat_max, indx_lon_min:indx_lon_max]
elif indx_lon_max <= indx_lon_min:
value_domain = value[indx_lat_min:indx_lat_max, indx_lon_max:indx_lon_min]
elif indx_lat_max <= indx_lat_min:
if indx_lon_max >= indx_lon_min:
value_domain = value[indx_lat_max:indx_lat_min, indx_lon_min:indx_lon_max]
elif indx_lon_max <= indx_lon_min:
value_domain = value[indx_lat_max:indx_lat_min, indx_lon_max:indx_lon_min]
elif n == len(lon):
if indx_lat_max >= indx_lat_min:
if indx_lon_max >= indx_lon_min:
value_domain = value[ indx_lon_min:indx_lon_max, indx_lat_min:indx_lat_max]
elif indx_lon_max <= indx_lon_min:
value_domain = value[ indx_lon_max:indx_lon_min,
indx_lat_min:indx_lat_max]
elif indx_lat_max <= indx_lat_min:
if indx_lon_max >= indx_lon_min:
value_domain = value[ indx_lon_min:indx_lon_max, indx_lat_max:indx_lat_min]
elif indx_lon_max <= indx_lon_min:
value_domain = value[ indx_lon_max:indx_lon_min, indx_lat_max:indx_lat_min]
else:
print('Array must have same dimensions as lat and lon')
return(lat_domain, lon_domain, value_domain)
def relative_depth(elevation, reference_value):
"""
Return depths relative to reference value, given the
elevation. Elevation dataset must be negative below
surface. Output dataset is positive downwards.
"""
# TODO write code
return(relative_depth)
def get_variable(variable, path):
"""
Import saved variables, given the variable name and path
both as strings.
"""
# importing within function although bad habits since
# requires so many silly dependencies
import pickle
import numpy as np
import numpy.ma as ma
from pathlib import Path
# get the data
path = Path(path)
with open(path / variable, 'rb') as f:
variable = 0
variable = pickle.load(f)
variable = np.ma.getdata(variable)
return variable
| [
"kristiina.joon@gmail.com"
] | kristiina.joon@gmail.com |
3b909fb21f16eefae1b403bb3b13881e9ebbdbf1 | f56a439ecf33d713b3cafb1513cd28e02b11ee54 | /homework20_11_2019/Main.py | 24dd32191f0766e3893da8c803e23a7abcbba98e | [] | no_license | ElenaGavrilovaMMF/Python-Homework | 3348e417a9a3c3a849df2c84e9055b183f23c3c3 | be57d6b95dd6fedaef1975114755b24bc2e8945f | refs/heads/master | 2020-09-11T01:29:24.091372 | 2019-11-21T16:53:55 | 2019-11-21T16:53:55 | 221,895,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,834 | py | import random
import ActionBook
import ImportLibrary
library = ImportLibrary.createLibrary()
# Количество посетителей в библиотеке
userCount = random.randint(1, 5)
# Работа пользователя
userNumber = 1
while userCount != 0:
print("Посетитель номер ", userNumber)
userFunctionality = random.randint(1, 2)
# если 1 - то берет книгу, если 2 - то дарит/сдает книгу
if userFunctionality == 1:
print("Он берет книгу")
# 1- хочет найти по автору, 2 - хочет найти по году, 3-выбирает на месте из всех книг
userDeleteBook = random.randint(1, 3)
if len(library.getLibrary()) != 0:
if userDeleteBook == 1:
library.searchByAuthor()
elif userDeleteBook == 2:
library.searchByYear()
else:
libraryList = library.getLibrary()
index = random.randint(0, len(libraryList)-1)
bookDeleted = libraryList[index]
bookDeleted.getString()
library.deleteLibrary(bookDeleted.getId())
else:
print("В библиотеке нет книг")
else:
print("Он подарил/принес книгу")
newID = library.genarateID()
actionBook = ActionBook.ActionBook()
newBook = actionBook.createBook(newID)
library.addLibrary(newBook)
print("Книга добавлена")
userNumber += 1
userCount -= 1
print("--------------------------------------------------------------\n")
print()
print("Сейчас в библиотеке\n")
library.getLibraryString()
| [
"noreply@github.com"
] | noreply@github.com |
174381d9dbc5ca6653d670f4e06be4f0bf6322c7 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/atbash-cipher/b9b8b95767434aa0871b2c8be48c53bd.py | f0b099a7bd55fe14b4e43829323d85f729c01d42 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 486 | py | from string import ascii_lowercase, digits, maketrans, translate, whitespace, punctuation
def xcode(text, space=None):
xlate = maketrans(ascii_lowercase + digits, ascii_lowercase[::-1] + digits)
out = translate(text.lower(), xlate, whitespace + punctuation)
if space:
tmp = ""
for i in range(len(out))[::space]:
tmp += (out[i:i+space] + " ")
out = tmp.rstrip()
return out
encode = lambda x: xcode(x, 5)
decode = lambda x: xcode(x)
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
a0a745860debb68c77f7b93110de4c23aa4f70ae | 5c87690a2ef17e204456224e6388d64b2f20ab12 | /test/functional/wallet_import_rescan.py | 9700ec41a880bfc9d5fd36c57927e9fb86ee178c | [
"MIT"
] | permissive | durgeshkmr/Libra-Coin | aee161b81315c9a8163170db370136b09def6712 | c40293ac5c8f289e4c06b46d0c7f3ca76ff591a6 | refs/heads/master | 2020-06-09T02:18:53.830023 | 2019-06-23T14:02:57 | 2019-06-23T14:02:57 | 193,351,213 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,624 | py | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Libra Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet import RPCs.
Test rescan behavior of importaddress, importpubkey, importprivkey, and
importmulti RPCs with different types of keys and rescan options.
In the first part of the test, node 0 creates an address for each type of
import RPC call and sends LIBRA to it. Then other nodes import the addresses,
and the test makes listtransactions and getbalance calls to confirm that the
importing node either did or did not execute rescans picking up the send
transactions.
In the second part of the test, node 0 sends more LIBRA to each address, and the
test makes more listtransactions and getbalance calls to confirm that the
importing nodes pick up the new transactions regardless of whether rescans
happened previously.
"""
from test_framework.test_framework import LibraTestFramework
from test_framework.util import (assert_raises_rpc_error, connect_nodes, sync_blocks, assert_equal, set_node_times)
import collections
import enum
import itertools
Call = enum.Enum("Call", "single multi")
Data = enum.Enum("Data", "address pub priv")
Rescan = enum.Enum("Rescan", "no yes late_timestamp")
class Variant(collections.namedtuple("Variant", "call data rescan prune")):
"""Helper for importing one key and verifying scanned transactions."""
def try_rpc(self, func, *args, **kwargs):
if self.expect_disabled:
assert_raises_rpc_error(-4, "Rescan is disabled in pruned mode", func, *args, **kwargs)
else:
return func(*args, **kwargs)
def do_import(self, timestamp):
"""Call one key import RPC."""
rescan = self.rescan == Rescan.yes
if self.call == Call.single:
if self.data == Data.address:
response = self.try_rpc(self.node.importaddress, address=self.address["address"], label=self.label, rescan=rescan)
elif self.data == Data.pub:
response = self.try_rpc(self.node.importpubkey, pubkey=self.address["pubkey"], label=self.label, rescan=rescan)
elif self.data == Data.priv:
response = self.try_rpc(self.node.importprivkey, privkey=self.key, label=self.label, rescan=rescan)
assert_equal(response, None)
elif self.call == Call.multi:
response = self.node.importmulti([{
"scriptPubKey": {
"address": self.address["address"]
},
"timestamp": timestamp + TIMESTAMP_WINDOW + (1 if self.rescan == Rescan.late_timestamp else 0),
"pubkeys": [self.address["pubkey"]] if self.data == Data.pub else [],
"keys": [self.key] if self.data == Data.priv else [],
"label": self.label,
"watchonly": self.data != Data.priv
}], {"rescan": self.rescan in (Rescan.yes, Rescan.late_timestamp)})
assert_equal(response, [{"success": True}])
def check(self, txid=None, amount=None, confirmations=None):
"""Verify that listtransactions/listreceivedbyaddress return expected values."""
txs = self.node.listtransactions(label=self.label, count=10000, skip=0, include_watchonly=True)
assert_equal(len(txs), self.expected_txs)
addresses = self.node.listreceivedbyaddress(minconf=0, include_watchonly=True, address_filter=self.address['address'])
if self.expected_txs:
assert_equal(len(addresses[0]["txids"]), self.expected_txs)
if txid is not None:
tx, = [tx for tx in txs if tx["txid"] == txid]
assert_equal(tx["label"], self.label)
assert_equal(tx["address"], self.address["address"])
assert_equal(tx["amount"], amount)
assert_equal(tx["category"], "receive")
assert_equal(tx["label"], self.label)
assert_equal(tx["txid"], txid)
assert_equal(tx["confirmations"], confirmations)
assert_equal("trusted" not in tx, True)
address, = [ad for ad in addresses if txid in ad["txids"]]
assert_equal(address["address"], self.address["address"])
assert_equal(address["amount"], self.expected_balance)
assert_equal(address["confirmations"], confirmations)
# Verify the transaction is correctly marked watchonly depending on
# whether the transaction pays to an imported public key or
# imported private key. The test setup ensures that transaction
# inputs will not be from watchonly keys (important because
# involvesWatchonly will be true if either the transaction output
# or inputs are watchonly).
if self.data != Data.priv:
assert_equal(address["involvesWatchonly"], True)
else:
assert_equal("involvesWatchonly" not in address, True)
# List of Variants for each way a key or address could be imported.
IMPORT_VARIANTS = [Variant(*variants) for variants in itertools.product(Call, Data, Rescan, (False, True))]
# List of nodes to import keys to. Half the nodes will have pruning disabled,
# half will have it enabled. Different nodes will be used for imports that are
# expected to cause rescans, and imports that are not expected to cause
# rescans, in order to prevent rescans during later imports picking up
# transactions associated with earlier imports. This makes it easier to keep
# track of expected balances and transactions.
ImportNode = collections.namedtuple("ImportNode", "prune rescan")
IMPORT_NODES = [ImportNode(*fields) for fields in itertools.product((False, True), repeat=2)]
# Rescans start at the earliest block up to 2 hours before the key timestamp.
TIMESTAMP_WINDOW = 2 * 60 * 60
class ImportRescanTest(LibraTestFramework):
def set_test_params(self):
self.num_nodes = 2 + len(IMPORT_NODES)
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
extra_args = [["-addresstype=legacy"] for _ in range(self.num_nodes)]
for i, import_node in enumerate(IMPORT_NODES, 2):
if import_node.prune:
extra_args[i] += ["-prune=1"]
self.add_nodes(self.num_nodes, extra_args=extra_args)
# Import keys
self.start_nodes(extra_args=[[]] * self.num_nodes)
super().import_deterministic_coinbase_privkeys()
self.stop_nodes()
self.start_nodes()
for i in range(1, self.num_nodes):
connect_nodes(self.nodes[i], 0)
def import_deterministic_coinbase_privkeys(self):
pass
def run_test(self):
# Create one transaction on node 0 with a unique amount for
# each possible type of wallet import RPC.
for i, variant in enumerate(IMPORT_VARIANTS):
variant.label = "label {} {}".format(i, variant)
variant.address = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress(variant.label))
variant.key = self.nodes[1].dumpprivkey(variant.address["address"])
variant.initial_amount = 10 - (i + 1) / 4.0
variant.initial_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.initial_amount)
# Generate a block containing the initial transactions, then another
# block further in the future (past the rescan window).
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
timestamp = self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"]
set_node_times(self.nodes, timestamp + TIMESTAMP_WINDOW + 1)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# For each variation of wallet key import, invoke the import RPC and
# check the results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
variant.expect_disabled = variant.rescan == Rescan.yes and variant.prune and variant.call == Call.single
expect_rescan = variant.rescan == Rescan.yes and not variant.expect_disabled
variant.node = self.nodes[2 + IMPORT_NODES.index(ImportNode(variant.prune, expect_rescan))]
variant.do_import(timestamp)
if expect_rescan:
variant.expected_balance = variant.initial_amount
variant.expected_txs = 1
variant.check(variant.initial_txid, variant.initial_amount, 2)
else:
variant.expected_balance = 0
variant.expected_txs = 0
variant.check()
# Create new transactions sending to each address.
for i, variant in enumerate(IMPORT_VARIANTS):
variant.sent_amount = 10 - (2 * i + 1) / 8.0
variant.sent_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.sent_amount)
# Generate a block containing the new transactions.
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
sync_blocks(self.nodes)
# Check the latest results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
if not variant.expect_disabled:
variant.expected_balance += variant.sent_amount
variant.expected_txs += 1
variant.check(variant.sent_txid, variant.sent_amount, 1)
else:
variant.check()
if __name__ == "__main__":
ImportRescanTest().main()
| [
"durgeshkmr4u@gmail.com"
] | durgeshkmr4u@gmail.com |
f6078be3422fecd7716db41f29450c62290212b6 | 6bf5526a561d825d89ee9f1b739221a2b32d1cb7 | /leads/migrations/0004_lead_organization.py | c735baac3f406923c25722abdb282b61df882bcc | [] | no_license | fdelacruz/djcrm | bac994d939da776fff8fe70c763743f6b7bd3bbb | 16e0c339e3b1e1e37c1c48c34c58b7263203f01d | refs/heads/main | 2023-07-06T21:19:39.561097 | 2021-08-11T21:24:24 | 2021-08-11T21:24:24 | 391,398,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | # Generated by Django 3.1.4 on 2021-08-02 02:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('leads', '0003_auto_20210801_2057'),
]
operations = [
migrations.AddField(
model_name='lead',
name='organization',
field=models.ForeignKey(default=3, on_delete=django.db.models.deletion.CASCADE, to='leads.userprofile'),
preserve_default=False,
),
]
| [
"helloquico@gmail.com"
] | helloquico@gmail.com |
16608f71543a2fe1d5b8b46899fd4afec3deff67 | 4104ef21c5383458ef0005179b77f582ae87844c | /web/migrations/0009_auto_20150728_1334.py | 9321d918ca4040ea4a21f263470e94b1ce9fb7e9 | [] | no_license | kodiers/quests | 8580d4cacd5685e08989f28fc6825117b17ea146 | 006bfbd354c75f6baeac020112cf36adcee9b016 | refs/heads/master | 2021-01-18T22:25:00.676362 | 2016-05-23T23:08:18 | 2016-05-23T23:08:18 | 37,993,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,497 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('web', '0008_organizers_show_on_main_page'),
]
operations = [
migrations.CreateModel(
name='EventsPhotos',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('title', models.TextField(null=True, blank=True, verbose_name='Title')),
('description', models.TextField(null=True, blank=True, verbose_name='Descrition')),
('date', models.DateField(null=True, blank=True, verbose_name='Date')),
('image', models.ImageField(upload_to='images')),
],
options={
'verbose_name_plural': 'Event photos',
'verbose_name': 'Event photo',
},
),
migrations.AlterField(
model_name='events',
name='registered_players',
field=models.ManyToManyField(null=True, related_name='regitered_players', blank=True, to=settings.AUTH_USER_MODEL, verbose_name='Registered users'),
),
migrations.AddField(
model_name='events',
name='event_photos',
field=models.ManyToManyField(null=True, to='web.EventsPhotos', blank=True, verbose_name='Event photos'),
),
]
| [
"kodiers@gmail.com"
] | kodiers@gmail.com |
f818b6399f525cc1aa9a6badce808a7bf8216776 | 717e6aeb996ff3b78f623c35d16b0cb64d9c83f4 | /A_Pyecharts/bar/Learn2.py | ff4841b777b6e412133a2b04ba44ba70ae05e767 | [] | no_license | yanghuizhi/Pyecharts_Learn_YHZ | 61297747207ceecb9d74915499f8cd028eefcd52 | 8e6b580df7224f2f98c715d36b122452453ad3fc | refs/heads/master | 2021-03-25T17:45:08.090987 | 2020-03-30T07:16:27 | 2020-03-30T07:16:27 | 247,636,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,022 | py | # !/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: yanghuizhi
# Time: 2020/3/15 12:42 下午
from pyecharts.charts import Bar
import pyecharts.options as opts
from pyecharts.commons.utils import JsCode
from pyecharts.faker import Faker
from pyecharts.globals import ThemeType
"""
pyecharts Bar 主要配置学习
"""
def bar_border_radius():
c = (
Bar(
init_opts=opts.InitOpts(
animation_opts=opts.AnimationOpts( # 柱状图显示效果动画控制代码
animation_delay=500,
animation_easing="cubicOut"
),
theme=ThemeType.MACARONS, # 柱状图显示主题
page_title="Bar Learn", # 设置html页面标题
)
)
# .reversal_axis() # 翻转XY轴
.add_xaxis(["草莓", "芒果", "葡萄", "雪梨", "西瓜", "柠檬", "车厘子"])
.add_yaxis("A", Faker.values(),
category_gap="50%", # 柱间距对应的控制代码
markpoint_opts=opts.MarkPointOpts(),
is_selected=True # A系列柱子是否显示对应的控制代码
)
.set_global_opts(
title_opts=opts.TitleOpts( # 标题
title="Bar-参数使用例子",
subtitle="副标题"
),
toolbox_opts=opts.ToolboxOpts(), # toolbox 工具箱配置
yaxis_opts=opts.AxisOpts(position="right", name="Y轴"), # Y轴右侧控制
datazoom_opts=opts.DataZoomOpts(), # 数据区域放大缩小设置
)
.set_series_opts(
itemstyle_opts={ # A系列柱子颜色渐变对应的控制代码
"normal": {
"color": JsCode("""
new echarts.graphic.LinearGradient(
0, 0, 0, 1, [{
offset: 0,
color: 'rgba(0, 244, 255, 1)'}, {
offset: 1,
color: 'rgba(0, 77, 167, 1)'}], false)"""
),
"barBorderRadius": [6, 6, 6, 6],
"shadowColor": 'rgb(0, 160, 221)',
}
},
# A系列柱子最大和最小值标记点对应的控制代码
markpoint_opts=opts.MarkPointOpts(
data=[
opts.MarkPointItem(type_="max", name="最大值"),
opts.MarkPointItem(type_="min", name="最小值"),
]
),
# A系列柱子最大和最小值标记线对应的控制代码
markline_opts=opts.MarkLineOpts(
data=[
opts.MarkLineItem(type_="min", name="最小值"),
opts.MarkLineItem(type_="max", name="最大值")
]
)
)
)
return c
# 生成本地 HTML 文件, 可指定文件名字
bar_border_radius().render("页面_Learn2.html") | [
"347818169@qq.com"
] | 347818169@qq.com |
4ccc6cc3eecf18aac8437a276cf90f38ab1eeac1 | 838c763669c3a889c6a7fd82b6a3904f446b09c1 | /reader.py | d45880f3c8be16a36b10146551de420feccfe41f | [
"MIT"
] | permissive | adamdempsey90/cinnamon | 0d569949bcf09161a70b685805e0dc8dc7ee7629 | baf9d62c52f77e717f987e37981451c69ce1a0b5 | refs/heads/master | 2021-08-19T04:54:24.809545 | 2020-04-19T18:00:37 | 2020-04-19T18:00:37 | 166,311,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,238 | py | import h5py
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
class Sim():
def __init__(self,num,base='test_',with_ghost=False,dims=2):
with h5py.File(base + str(num) + '.h5','r') as file:
f = file['/Data']
self.time = float(f['time'][...])
self.xm1 = f['xm1'][...]
nx1 = len(self.xm1) - 1 -6
self.nx1 = nx1
if not with_ghost:
self.xm1 = self.xm1[3:-3]
self.xc1 = .5*(self.xm1[1:] + self.xm1[:-1])
self.dx1 = np.diff(self.xm1)
self.Lx1 = self.xm1[-1]-self.xm1[0]
shape = (nx1 + 6,)
if dims > 1:
self.xm2 = f['xm2'][...]
nx2 = len(self.xm2) - 1 - 6
self.nx2 = nx2
if not with_ghost:
self.xm2 = self.xm2[3:-3]
self.xc2 = .5*(self.xm2[1:] + self.xm2[:-1])
self.dx2 = np.diff(self.xm2)
shape = shape + (nx2+6,)
self.Lx2 = self.xm2[-1]-self.xm2[0]
if dims > 2:
self.xm3 = f['xm3'][...]
nx3 = len(self.xm3) - 1 - 6
self.nx3 = nx3
if not with_ghost:
self.xm3 = self.xm3[3:-3]
self.xc3 = .5*(self.xm3[1:] + self.xm3[:-1])
self.dx3 = np.diff(self.xm3)
self.Lx3 = self.xm3[-1]-self.xm3[0]
shape = shape + (nx3+6,)
shape = tuple([x for x in shape[::-1]])
self.dims = dims
self.shape = shape
self.gamma = float(f['Gamma'][...])
self.rho = f['Density'][...].reshape(*shape)
self.pres = f['Pressure'][...].reshape(*shape)
self.vx1 = f['Vx1'][...].reshape(*shape)
self.ke = .5*self.rho*self.vx1**2
self.vx2 = f['Vx2'][...].reshape(*shape)
self.ke += .5*self.rho*self.vx2**2
self.vx3 = f['Vx3'][...].reshape(*shape)
self.ke += .5*self.rho*self.vx3**2
try:
self.scalar = f['Scalar1'][...].reshape(*shape)
except:
pass
if dims == 1:
self.extent = (self.xm1.min(), self.xm1.max())
elif dims == 2:
self.extent = (self.xm1.min(),self.xm1.max(),self.xm2.min(),
self.xm2.max())
if not with_ghost:
if dims == 1:
self.rho = self.rho[3:-3]
self.pres = self.pres[3:-3]
self.vx1 = self.vx1[3:-3]
self.vx2 = self.vx2[3:-3]
self.vx3 = self.vx3[3:-3]
self.ke = self.ke[3:-3]
try:
self.scalar = self.scalar[3:-3]
except AttributeError:
pass
elif dims == 2:
self.rho = self.rho[3:-3,3:-3]
self.pres = self.pres[3:-3,3:-3]
self.vx1 = self.vx1[3:-3,3:-3]
self.vx2 = self.vx2[3:-3,3:-3]
self.vx3 = self.vx3[3:-3,3:-3]
self.ke = self.ke[3:-3,3:-3]
try:
self.scalar = self.scalar[3:-3,3:-3]
except AttributeError:
pass
self.S = self.pres/self.rho**self.gamma
self.intenergy = self.pres/(self.gamma-1)
self.energy = self.ke + self.intenergy
self.cs = np.sqrt(self.gamma*self.pres/self.rho)
self.temp = self.intenergy*self.gamma/self.rho
delad = 1. - 1./self.gamma
self.entropy = np.log(self.temp * self.pres**(-delad))
if dims > 1:
self.vort = np.gradient(self.vx2,self.dx1[0],axis=1,edge_order=2) - np.gradient(self.vx1,self.dx2[0],axis=0,edge_order=2)
if self.nan_check():
print('NaN detected!')
return
def nan_check(self):
func = lambda x: np.any(np.isnan(x))
return func(self.vx1)|func(self.rho)|func(self.pres)|func(self.energy)
def plot(self,**kargs):
if self.dims == 1:
return self.plot1D(**kargs)
elif self.dims == 2:
return self.plot2D(**kargs)
def plot1D(self,val='rho',func=None,shift=0,scale=1,ax=None,ylbl='',**kargs):
first = ax is None
if first:
fig,ax=plt.subplots(figsize=(8,6))
fig = ax.get_figure()
if func is not None:
q = func(self)
else:
q = (getattr(self,val)-shift)/scale
line,=ax.plot(self.xc1,q,**kargs)
ax.set_ylabel(ylbl,fontsize=20)
ax.minorticks_on()
ax.tick_params(labelsize=20)
ax.text(.05,.05,'$t={:.2f}$'.format(self.time),transform=ax.transAxes,fontsize=20)
fig.tight_layout()
return fig,ax,line
def plot2D(self,val='rho',func = None,norm=None, shift=0,scale=1,ax=None,ylbl='',
cmap='viridis',conts=None,figsize=None,**kargs):
first = ax is None
if first:
if figsize is None:
fig,ax=plt.subplots(figsize=(4*self.Lx2/self.Lx1,4))
else:
fig,ax=plt.subplots(figsize=figsize)
fig = ax.get_figure()
if func is not None:
q = func(self)
else:
q = (getattr(self,val)-shift)/scale
if norm is None:
norm = colors.Normalize()
img = ax.imshow(q,origin='lower',extent=self.extent,norm=norm,cmap=cmap,aspect='equal',**kargs)
if first:
cb = _create_colorbar(ax,norm,cmap=cmap)
else:
cb = None
ax.minorticks_on()
#ax.set_aspect('equal')
ax.tick_params(labelsize=20)
ax.text(.05,.05,'$t={:.2f}$'.format(self.time),transform=ax.transAxes,fontsize=20)
if conts is not None:
cont = ax.contour(q,levels=conts,origin='lower',extent=self.extent,norm=norm,colors='k',**kargs)
fig.tight_layout()
return fig,ax,cb,img
def plotavg(self,val='rho',axis=1,func = None,norm=1,shift=0, ax=None,ylbl='',**kargs):
if ax is None:
fig,ax=plt.subplots(figsize=(8,6))
fig = ax.get_figure()
if func is not None:
q = func(self).mean(axis=axis)
else:
q = (getattr(self,val).mean(axis=axis) - shift) / norm
if axis == 1 or axis == -1:
x = self.xc2
else:
x = self.xc1
print(q.shape,x.shape)
ax.plot(x,q,**kargs)
ax.minorticks_on()
ax.tick_params(labelsize=20)
return fig,ax
def contour(self,val='rho',colorbar=False,func = None,norm=colors.Normalize(), ax=None,ylbl='',**kargs):
if ax is None:
fig,ax=plt.subplots(figsize=(6,6))
fig =ax.get_figure()
if func is not None:
q = func(self)
else:
q = getattr(self,val)
levels = kargs.pop('levels',None)
if levels is None:
cont = ax.contour(q,origin='lower',extent=self.extent,norm=norm,**kargs)
else:
cont = ax.contour(q,levels=levels,origin='lower',extent=self.extent,norm=norm,**kargs)
if colorbar:
cb = _create_colorbar(ax,norm)
else:
cb = None
ax.minorticks_on()
return fig,ax,cb,cont
def presconts(self,conts,streams=True,ax=None,**kargs):
if ax is not None:
self.plot('pres',ax=ax,**kargs)
else:
fig,ax,_,_ = self.plot('pres',**kargs)
self.contour('rho',levels=conts,ax=ax,colors='k',clrbar=False)
xlims = ax.get_xlim()
ylims = ax.get_ylim()
if streams:
ax.streamplot(self.xc1,self.xc2,self.vx1,self.vx2,color='k')
ax.set_xlim(xlims)
ax.set_ylim(ylims)
return fig,ax
def sum(self,axes=None,**kargS):
if axes is None:
fig,axes = plt.subplots(2,2,figsize=(8,8))
fig = axes.flatten()[0].get_figure()
self.plot(val='rho',ax=axes[0,0],ylbl='Density')
self.plot(val='vx1',ax=axes[0,1],ylbl='Velocity')
self.plot(val='pres',ax=axes[1,0],ylbl='Pressure')
self.plot(func=lambda x: x.pres/(x.gamma-1)/x.rho,ax=axes[1,1],ylbl='Energy')
fig.tight_layout()
return fig,axes
def _create_colorbar(ax,norm,cax=None,log=False,cmap='viridis',**kargs):
import matplotlib
import matplotlib.cm
from mpl_toolkits.axes_grid1 import make_axes_locatable
labelsize = kargs.pop('labelsize',12)
if cax is None:
divider = make_axes_locatable(ax)
cax = divider.append_axes('top',size='3%',pad=.05)
cmap = matplotlib.cm.get_cmap(cmap)
cb = matplotlib.colorbar.ColorbarBase(ax=cax,cmap=cmap,norm=norm,orientation='horizontal',**kargs)
cb.ax.xaxis.set_ticks_position('top')
cb.ax.xaxis.set_label_position('top')
cb.ax.tick_params(labelsize=labelsize)
return cb
class Animation():
def __init__(self,sim_kargs={},**kargs):
self.sim_kargs = sim_kargs
self.kargs = kargs
self.fixbar=False
self.fig,self.ax,self.cb,self.img = Sim(0,**sim_kargs).plot(**kargs)
def update(self,i):
fld = Sim(i,**self.sim_kargs)
try:
func =self.kargs['func']
d = func(fld)
except KeyError:
try:
val = self.kargs['val']
except KeyError:
val = 'rho'
self.kargs['val'] = 'rho'
d = getattr(fld,val)
if not self.fixbar:
self.cb.set_clim([d.min(),d.max()])
self.cb.draw_all()
self.img.set_data(d)
self.ax.texts[0].remove()
self.ax.text(.05,.05,'$t={:.2f}$'.format(fld.time),transform=self.ax.transAxes,fontsize=20)
def animate(self,irange,fixbar=False,fname='mov',frames=None):
self.fixbar = fixbar
import matplotlib.animation as animation
frames = len(irange)
anim = animation.FuncAnimation(self.fig, self.update, frames=frames, repeat=False)
anim.save('{}.mp4'.format(fname), writer=animation.FFMpegWriter())
class Animation1D():
def __init__(self,sim_kargs={},**kargs):
self.sim_kargs = sim_kargs
self.kargs = kargs
self.fig,self.ax,self.line = Sim(0,**sim_kargs).plot(**kargs)
def update(self,i):
fld = Sim(i,**self.sim_kargs)
try:
func =self.kargs['func']
d = func(fld)
except KeyError:
try:
val = self.kargs['val']
except KeyError:
val = 'rho'
self.kargs['val'] = 'rho'
d = getattr(fld,val)
self.line.set_ydata(d)
self.ax.autoscale()
self.ax.relim()
self.ax.texts[0].remove()
self.ax.text(.05,.05,'$t={:.2f}$'.format(fld.time),transform=self.ax.transAxes,fontsize=20)
def animate(self,irange,fname='mov',frames=None):
import matplotlib.animation as animation
frames = len(irange)
anim = animation.FuncAnimation(self.fig, self.update, frames=frames, repeat=False)
anim.save('{}.mp4'.format(fname), writer=animation.FFMpegWriter())
| [
"adamdemps@gmail.com"
] | adamdemps@gmail.com |
73938db0d5ed5dc6e4c025daa028c93ba6f042ef | bff30e238a322a390a35fffc3a0e44210c3e7436 | /Motor_Sarah.py | 301fd7112cf1aaa24eba54205120217bae9dd0af | [] | no_license | Timothysit/swc_invivo_analysis | 8dc00ca255d231ca8a2a1a1ca6716988c4966e4b | ae8eeeef802a95aac081a6d6d15cedc6a7044c44 | refs/heads/master | 2020-03-30T19:56:44.054399 | 2018-10-05T14:42:21 | 2018-10-05T14:42:21 | 151,566,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,335 | py |
#Analysis of motor movement
#Input is csv file (motorFile) out put is also csv file
#MotorFile = pd.read_csv('studentCourse_command.csv')
import pandas as pd
import numpy as np
# making a mini dataframe with fake data
# fake_motor_data = {'command':[0,10,15,-5,-10,-15,0], 'time':[0,1,2,3,4,5,6]}
# motor_df = pd.DataFrame(fake_motor_data, columns = ['command','time' ])
# print(motor_df)
commandfile = pd.DataFrame({'command': np.array([0, 1, 2, 3, 7, -7, 8, 9, -2, -20, -25])})
def motor_direction(commandfile):
# save the array containing commandvalues as a new column
command_column = commandfile['command']
print(command_column)
cmd = command_column.values
print(cmd)
# calculate the direction by determining the change from current command to the next command
direction = cmd[:-1] < cmd[1:]
print(direction)
#add a time array
length_direction_array = np.size(direction)
print("length of direction array is:", length_direction_array)
time = np.arange(length_direction_array)
print(time)
# make a dateframe of all the data
real_motor_df = pd.DataFrame({"command": cmd[:-1], "direction":direction, "time":time})
print(real_motor_df)
real_motor_df.to_csv("direction_motor.csv", sep = ",")
return real_motor_df
motor_direction(commandfile)
| [
"saraholesen@hotmail.com"
] | saraholesen@hotmail.com |
e6e5336ab077801216fea9feeba853f708c60dda | 77e1db762bd012e0447b8275df6e24012a3aa82f | /android-unlock-patterns/android-unlock-patterns.py | 3633050be42eecf7bd09c207660bb66f41a5212c | [] | no_license | PigsGoMoo/LeetCode | 1e9d37507b04a238347bcc693e9be3d24313a46e | a5c9bc433ac6c54bebb83b9640273216512f41b8 | refs/heads/main | 2023-06-28T12:58:57.516464 | 2021-08-03T19:49:07 | 2021-08-03T19:49:07 | 362,915,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,516 | py | class Solution:
def __init__(self):
# We'll make our map of variables that are skipped
# We access it by using self.skip[x][y] where x and y are the numbers we're connecting.
self.skip = [[0] * 10 for _ in range(10)]
# Now set the values
self.skip[1][3] = self.skip[3][1] = 2
self.skip[3][9] = self.skip[9][3] = 6
self.skip[7][9] = self.skip[9][7] = 8
self.skip[1][7] = self.skip[7][1] = 4
self.skip[1][9] = self.skip[9][1] = self.skip[3][7] = self.skip[7][3] = self.skip[2][8] = \
self.skip[8][2] = self.skip[4][6] = self.skip[6][4] = 5
# Initialize visited
self.visited = set()
self.visited.add(0)
def make_pattern(self, start, remain):
# Base cases
if remain < 0:
return 0
elif remain == 0:
return 1
# Add to visited
self.visited.add(start)
ans = 0
# Loop through and add all possible numbers to combo
for i in range(10):
# If we haven't added that number yet and we've visted the cross value
if i not in self.visited and self.skip[start][i] in self.visited:
# Call recursively
ans += self.make_pattern(i, remain - 1)
# Remove from visited after looping through all possible values
self.visited.remove(start)
# Return
return ans
def numberOfPatterns(self, m: int, n: int) -> int:
# Brute force way is to backtrack every solution
# But then we need to find a way to keep track of the jumps that are not allowed
# We can map out the numbers that are skipped
# Another possible optimization is that the patterns for 1,3,7,9 are all the same - just rotated 90 degrees
# Same with 2, 4, 6, 8. So we can just find one and multiply by 4 for both of those
# And then do one last search starting at 5
ans = 0
# Search through each length character
for i in range(m - 1, n):
# 1, 3, 7, 9 are symmetrical, so we just need to figure out the ans for 1 and multiply by 4
ans += self.make_pattern(1, i) * 4
# 2, 4, 6, 8 also symmetrical
ans += self.make_pattern(2, i) * 4
# Lastly, solve for 5
ans += self.make_pattern(5, i)
return ans
| [
"33019839+PigsGoMoo@users.noreply.github.com"
] | 33019839+PigsGoMoo@users.noreply.github.com |
8e1cc89b07d89b78e4641642cd8d6beed7f2f9d7 | 1df24101037f5dd88fd756acabad52d3cb005960 | /manage.py | 42312add1dfebbd3c874160abb428ef4b6c144b3 | [] | no_license | Caiseyann/CodeWars_API | 9324cb0b627ba93407baa66729ac0da0ce690be6 | dfdee24a84ab5cc06d9c4bac95189a8038312689 | refs/heads/master | 2022-12-18T20:36:18.762783 | 2020-10-02T06:52:23 | 2020-10-02T06:52:23 | 299,637,614 | 0 | 0 | null | 2020-09-29T14:10:26 | 2020-09-29T14:10:25 | null | UTF-8 | Python | false | false | 656 | py | from app import create_app,db
from flask_script import Manager,Server
from app.models import User
from flask_migrate import Migrate, MigrateCommand
# Creating app instance
app = create_app('development')
manager = Manager(app)
migrate = Migrate(app,db)
manager.add_command('server',Server)
manager.add_command('db',MigrateCommand)
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
@manager.shell
def make_shell_context():
return dict(app = app,db = db,User = User)
if __name__ == '__main__':
manager.run() | [
"msyokimutua@gmail.com"
] | msyokimutua@gmail.com |
62b4bf8f303857f60e9a27468b20de57f71a6cc0 | 7a923ba531688401d023e299116109ebff4a40cc | /GuessingGame.py | 969f590aeaf5e8e2d22da2d22327e5c119f9ea81 | [] | no_license | artoyebi/pythontask3 | 248962c2fa6e404abc789378882f7c376b47b351 | 8bf964f8078dc0519eff56548ef0cce86dd4857f | refs/heads/master | 2022-04-22T09:35:19.761182 | 2020-04-19T03:44:57 | 2020-04-19T03:44:57 | 256,905,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,964 | py | import random
def randint(mini,maxi):
secretNumber=random.randint(mini,maxi)
return secretNumber
print("Number Guessing Game")
level=input("Please Choose Between Difficulty Level 1-3: ")
if level=="1":
print("Difficulty Level 1\nGuess A Number Between 1 and 10")
guess_count=0
guess_limit=6
secretNumber=randint(1,10)
while guess_count<guess_limit:
print(f"Guesses Left: {guess_limit - guess_count}")
guess=int(input("Guess: "))
guess_count+=1
if guess==secretNumber:
print("You Guessed Right!")
break
else:
print("That Was Wrong!")
else:
print("Your 6 guesses were wrong!")
print(f"Correct Number is {secretNumber}\nGame Over!")
elif level=="2":
print("Difficulty Level 2\nGuess A Number Between 1 and 20")
guess_count=0
guess_limit=4
secretNumber=randint(1,20)
while guess_count<guess_limit:
print(f"Guesses Left: {guess_limit - guess_count}")
guess=int(input("Guess: "))
guess_count+=1
if guess==secretNumber:
print("You Guessed Right!")
break
else:
print("That Was Wrong!")
else:
print("Your 4 guesses were wrong!")
print(f"Correct Number is {secretNumber}\nGame Over!")
elif level=="3":
print("Difficulty Level 3\nGuess A Number Between 1 and 50")
guess_count=0
guess_limit=3
secretNumber=randint(1,50)
while guess_count<guess_limit:
print(f"Guesses Left: {guess_limit - guess_count}")
guess=int(input("Guess: "))
guess_count+=1
if guess==secretNumber:
print("You Guessed Right!")
break
else:
print("That Was Wrong!")
else:
print("Your 3 guesses were wrong!")
print(f"Correct Number is {secretNumber}\nGame Over!")
else:
print("You've Not Selected Any Level")
| [
"61976986+artoyebi@users.noreply.github.com"
] | 61976986+artoyebi@users.noreply.github.com |
861ed03b50fc4b4ecb334880a08e25954ec6486a | be8d396b1622e7a765865a5e6046588229b62089 | /model/test_currency.py | 269f0710f80c292119be7afe286be2852d14fe21 | [
"Apache-2.0"
] | permissive | nimakaviani/demo-crypto-bot | 68983d799e50b646f3ac3d47ea8c963264acf34b | 59d61cdec675a038d7c0c46f71893a145df59b97 | refs/heads/master | 2021-08-31T22:39:58.752507 | 2017-12-17T07:49:41 | 2017-12-23T06:07:57 | 114,571,875 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | import pytest
@pytest.fixture
def currency():
from model.account import Currency
return Currency("test-currency", 10)
def test_update(currency):
currency.update(15)
assert currency.value == 25
| [
"nkavian@us.ibm.com"
] | nkavian@us.ibm.com |
dc73cf6e9989804e2dc0f3c2877e0fff14501cb2 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02851/s504076203.py | 4b1b8ee55fac378d844e2fa9052da7b00644e874 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,688 | py | def getN():
return int(input())
def getNM():
return map(int, input().split())
def getList():
return list(map(int, input().split()))
def getArray(intn):
return [int(input()) for i in range(intn)]
def input():
return sys.stdin.readline().rstrip()
def rand_N(ran1, ran2):
return random.randint(ran1, ran2)
def rand_List(ran1, ran2, rantime):
return [random.randint(ran1, ran2) for i in range(rantime)]
def rand_ints_nodup(ran1, ran2, rantime):
ns = []
while len(ns) < rantime:
n = random.randint(ran1, ran2)
if not n in ns:
ns.append(n)
return sorted(ns)
def rand_query(ran1, ran2, rantime):
r_query = []
while len(r_query) < rantime:
n_q = rand_ints_nodup(ran1, ran2, 2)
if not n_q in r_query:
r_query.append(n_q)
return sorted(r_query)
from collections import defaultdict, deque, Counter
from sys import exit
from decimal import *
import heapq
import math
from fractions import gcd
import random
import string
import copy
from itertools import combinations, permutations, product
from operator import mul, itemgetter
from functools import reduce
from bisect import bisect_left, bisect_right
import sys
sys.setrecursionlimit(1000000000)
mod = 10 ** 9 + 7
#############
# Main Code #
#############
# 余りの平均が1のもの
N, K = getNM()
A = getList()
A = [(i % K) - 1 for i in A]
for i in range(N - 1):
A[i + 1] += A[i]
A.insert(0, 0)
A = [i % K if i >= 0 else i for i in A]
num = defaultdict(list)
for i in range(N + 1):
num[A[i]].append(i)
cnt = 0
for key, opt in num.items():
for i in range(len(opt)):
index = bisect_right(opt, opt[i] + K - 1)
cnt += index - i - 1
print(cnt) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
9f08b88ac5f5532156b9eae0a46fa0324b93ce00 | 3dd946915cdef29f7043c151deebc68200e3075c | /taxi/settings.py | cfa1e334cf53b1d657ea920ecf8f7697933080e8 | [] | no_license | PierceAndy/rpc-taxi-booking-sim | a3926bdf52b7f0130fe9bbe4a5ba3cd72fc30256 | 1cb1331d03e56f3cae19240662f6042fa3d2ea26 | refs/heads/master | 2021-06-19T05:49:09.490511 | 2019-06-01T08:29:09 | 2019-06-01T08:33:53 | 129,953,917 | 0 | 0 | null | 2021-06-10T20:26:33 | 2018-04-17T19:19:10 | Python | UTF-8 | Python | false | false | 1,166 | py | """
Django settings for taxi project.
Generated by 'django-admin startproject' using Django 2.0.3.
"""
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
with open('secret_key.txt') as f:
SECRET_KEY = f.read().strip()
DEBUG = False
ALLOWED_HOSTS = [
'localhost',
'127.0.0.1'
]
INSTALLED_APPS = [
"django.contrib.admindocs",
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CSRF_COOKIE_SECURE = True
SESSION_COOKIE_SECURE = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
SECURE_SSL_REDIRECT = False
X_FRAME_OPTIONS = 'DENY'
ROOT_URLCONF = "taxi.urls"
TEMPLATES = []
WSGI_APPLICATION = 'taxi.wsgi.application'
DATABASES = {}
AUTH_PASSWORD_VALIDATORS = []
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Singapore'
USE_I18N = True
USE_L10N = True
USE_TZ = True
| [
"pierceanderson.fu+github@gmail.com"
] | pierceanderson.fu+github@gmail.com |
39d76c5ac93ca62d22d8102da6ae57b798a0abc1 | 41b59a9c8381fa3a92f5d2c37c91261afb9c82c4 | /QCDEventShape/2017/MC/test/Run_QCD_test_76x_data_cfg.py | a70d898a68603a21318aca5063817cdfeee12de7 | [] | no_license | Sumankkundu/ChargedParticle | c6d4f90b55df49321df2ecd758bb1f39db896f8c | eb5bada24b37a58ded186d6e5d2d7bd00898fefe | refs/heads/master | 2023-07-15T03:34:33.377203 | 2021-08-31T05:01:32 | 2021-08-31T05:01:32 | 231,091,587 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,329 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("Test")
## switch to uncheduled mode
#process.options.allowUnscheduled = cms.untracked.bool(True)
#process.Tracer = cms.Service("Tracer")
process.load("PhysicsTools.PatAlgos.producersLayer1.patCandidates_cff")
process.load("PhysicsTools.PatAlgos.selectionLayer1.selectedPatCandidates_cff")
# source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(#'/store/mc/Spring14dr/TTJets_MSDecaysCKM_central_Tune4C_13TeV-madgraph-tauola/AODSIM/PU20bx25_POSTLS170_V5-v1/00000/00B6F8B6-90F1-E311-B72C-0025905A6092.root'
'/store/data/Run2017F/JetHT/MINIAOD/17Nov2017-v1/70000/FEA2ED14-5CDF-E711-ACA6-02163E012AF0.root',
'/store/data/Run2017F/JetHT/MINIAOD/17Nov2017-v1/70000/FE211553-36DF-E711-BAB7-02163E019BD0.root',
#'/store/data/Run2017F/JetHT/MINIAOD/17Nov2017-v1/70000/FE155D02-00DF-E711-BA34-02163E011A55.root',
#'/store/data/Run2017F/JetHT/MINIAOD/17Nov2017-v1/70000/FE08F446-63DF-E711-A338-A4BF0112BCF8.root',
#'/store/data/Run2015D/JetHT/MINIAOD/PromptReco-v4/000/258/750/00000/28938773-BD72-E511-A479-02163E01432A.root',
#'/store/data/Run2015D/JetHT/MINIAOD/PromptReco-v4/000/258/159/00000/0075E33B-3B6C-E511-BCC8-02163E01455C.root'
#'/store/data/Run2015D/JetHT/MINIAOD/PromptReco-v4/000/258/159/00000/0CE8F23E-3B6C-E511-B68A-02163E013744.root',
#'/store/data/Run2015D/JetHT/MINIAOD/PromptReco-v4/000/258/159/00000/36DC8060-3B6C-E511-BC73-02163E0143DD.root',
#'/store/data/Run2015D/JetHT/MINIAOD/PromptReco-v4/000/258/159/00000/50A3A073-3B6C-E511-A997-02163E0144CD.root'
#'/store/data/Run2015D/JetHT/MINIAOD/16Dec2015-v1/00000/301A497D-70B0-E511-9630-002590D0AFA8.root',
#'/store/data/Run2015D/JetHT/MINIAOD/16Dec2015-v1/00000/7210C351-67B0-E511-A34C-7845C4FC37AF.root'
#'/store/data/Run2015D/JetHT/MINIAOD/16Dec2015-v1/00000/745E2A4F-67B0-E511-9DA3-0090FAA57620.root',
#'/store/data/Run2015D/JetHT/MINIAOD/16Dec2015-v1/00000/7E46D250-67B0-E511-BB96-0025905C3E66.root'
)
)
#process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(100) )
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
#process.load("Configuration.StandardSequences.Geometry_cff")
#process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff")
process.load("Configuration.Geometry.GeometryRecoDB_cff")
#process.GlobalTag.globaltag = cms.string('POSTLS170_V5')
process.load("Configuration.StandardSequences.MagneticField_cff")
from Configuration.AlCa.GlobalTag import GlobalTag
#process.GlobalTag = GlobalTag(process.GlobalTag,'GR_P_V56::All')
#process.GlobalTag = GlobalTag(process.GlobalTag,'GR_R_44_V11::All')
#process.GlobalTag = GlobalTag(process.GlobalTag,'74X_dataRun2_Prompt_v1')
#process.GlobalTag = GlobalTag(process.GlobalTag,'94X_dataRun2_ReReco_EOY17_v6')
#process.GlobalTag = GlobalTag(process.GlobalTag,'94X_dataRun2_ReReco_EOY17_v2')
process.GlobalTag = GlobalTag(process.GlobalTag,'94X_dataRun2_v6')
from PhysicsTools.PatAlgos.tools.coreTools import *
# produce PAT Layer 1
process.load("PhysicsTools.PatAlgos.patSequences_cff")
process.MessageLogger = cms.Service("MessageLogger",
cout = cms.untracked.PSet(
default = cms.untracked.PSet( ## kill all messages in the log
limit = cms.untracked.int32(0)
),
FwkJob = cms.untracked.PSet( ## but FwkJob category - those unlimitted
limit = cms.untracked.int32(-1)
)
),
categories = cms.untracked.vstring('FwkJob'),
destinations = cms.untracked.vstring('cout')
)
#process.load("HLTrigger.HLTcore.hltPrescaleRecorder_cfi")
#ak5 PF & Gen Jets
#from RecoJets.JetProducers.ak5PFJets_cfi import ak5PFJets
#from RecoJets.JetProducers.ak5GenJets_cfi import ak5GenJets
#from RecoMET.METProducers.PFMET_cfi import pfMet
#process.ak5PFJets = ak5PFJets.clone(src = 'packedPFCandidates')
#process.ak5GenJets = ak5GenJets.clone(src = 'packedGenParticles')
# Select candidates that would pass CHS requirements
#process.chs = cms.EDFilter("CandPtrSelector", src = cms.InputTag("packedPFCandidates"), cut = cms.string("fromPV"))
#makes chs ak5 jets (instead of ak4 that are default in miniAOD )
#process.ak5PFJetsCHS = ak5PFJets.clone(src = 'chs')
process.TFileService=cms.Service("TFileService",
fileName=cms.string("Test_Data_QCD_char_2017.root")
)
print "test1"
process.analyzeBasicPat = cms.EDAnalyzer("QCDEventShape",
# photonSrc = cms.untracked.InputTag("cleanPatPhotons"),
# electronSrc = cms.untracked.InputTag("cleanPatElectrons"),
# muonSrc = cms.untracked.InputTag("cleanPatMuons"),
# tauSrc = cms.untracked.InputTag("cleanPatTaus"),
jetSrc = cms.InputTag("slimmedJets"),
metSrc = cms.InputTag("slimmedMETs"),
genSrc = cms.untracked.InputTag("packedGenParticles"),
pfSrc = cms.InputTag("packedPFCandidates"),
bits = cms.InputTag("TriggerResults","","HLT"),
prescales = cms.InputTag("patTrigger"),
objects = cms.InputTag("selectedPatTrigger"),
vertices = cms.InputTag("offlineSlimmedPrimaryVertices"),
bsSrc = cms.InputTag("offlineBeamSpot"),
genjetSrc = cms.InputTag("slimmedGenJets"),
pileupSrc =cms.InputTag("slimmedAddPileupInfo"),
ak5pfJetSrc = cms.InputTag("ak5PFJets"),
ak5genJetSrc = cms.InputTag("ak5GenJets"),
evtinfo =cms.InputTag("generator"),
rho = cms.InputTag('fixedGridRhoAll'),
LHEEventProductInputTag = cms.InputTag('externalLHEProducer'),
LHERunInfoProductInputTag = cms.InputTag('externalLHEProducer'),
PDFCTEQWeightsInputTag = cms.InputTag('pdfWeights:CT14'),
PDFMMTHWeightsInputTag = cms.InputTag('pdfWeights:MMHT2014lo68cl'),
PDFNNPDFWeightsInputTag = cms.InputTag('pdfWeights:NNPDF30'),
#ak5PFJetCHSSrc = cms.InputTag("ak5PFJetsCHS")
RootFileName = cms.untracked.string('pythia8_test_13tev.root'),
GenJET = cms.untracked.bool(False),
HistFill = cms.untracked.bool(True),
MonteCarlo = cms.untracked.bool(False),
ParticleLabel = cms.untracked.bool(False),
Reconstruct =cms.untracked.bool(True),
# EtaRange = cms.untracked.double(5.0),
# PtThreshold = cms.untracked.double(12.0),
EtaRange = cms.untracked.double(3.0),
PtThreshold = cms.untracked.double(55.0), #effective is 21
LeadingPtThreshold = cms.untracked.double(150.0), #effective is 81
# scaleFactorsFile = cms.FileInPath('CondFormats/JetMETObjects/data/Summer15_V0_MC_JER_AK4PFchs.txt'),
# resolutionsFile = cms.FileInPath('CondFormats/JetMETObjects/data/Summer15_V0_MC_JER_AK4PFchs.txt'),
# scaleFactorsFile = cms.FileInPath('Fall15_25nsV2_MC_SF_AK4PFchs.txt'),
# resolutionsFile = cms.FileInPath('Fall15_25nsV2_MC_PtResolution_AK4PFchs.txt'),
# scaleFactorsFile = cms.FileInPath('Fall15_25nsV2_MC_SF_AK4PFchs.txt'),
# resolutionsFile = cms.FileInPath('Fall15_25nsV2_MC_PtResolution_AK4PFchs.txt'),
)
#process.ak5PFJets = ak5PFJets.clone(src = 'packedPFCandidates')
#process.analyzeBasicPat.append("keep *_ak5PFJets_*_EX")
#process.analyzeBasicPat.append("keep *_ak5PFJetsCHS_*_EX")
process.p = cms.Path(process.analyzeBasicPat)
print "test2"
#process.p = cms.Path(process.ak5PFJets*process.ak5GenJets*process.analyzeBasicPat)
| [
"skundu91phys@gmail.com"
] | skundu91phys@gmail.com |
a4764a0a2e167cbde191dc1e9237743b359681a0 | a151cb067175a2e262fcee632e28382344a4fe07 | /support/proxyControl.py | 33aecfec2d243303baf2e4a467649b520c1964bd | [
"Apache-2.0"
] | permissive | mspieth/hdhr_browser_recorder | 9d92c15b1bc884da480a53b926dbea11efa229dd | 46c2af1dffe3d2ca8f505f97db57e97a3e9d711c | refs/heads/master | 2021-06-21T06:50:10.856287 | 2019-12-22T23:10:43 | 2019-12-22T23:10:43 | 194,482,825 | 1 | 0 | Apache-2.0 | 2021-06-11T17:56:55 | 2019-06-30T06:24:11 | Python | UTF-8 | Python | false | false | 14,328 | py | from io import BytesIO
import os
import socket
import struct
import asyncio
from contextlib import suppress
from . import crc32c
import logging
logger = logging.getLogger(__name__)
HDHOMERUN_DISCOVER_UDP_PORT = 65001
HDHOMERUN_CONTROL_TCP_PORT = 65001
HDHOMERUN_MAX_PACKET_SIZE = 1460
HDHOMERUN_MAX_PAYLOAD_SIZE = 1452
HDHOMERUN_TYPE_DISCOVER_REQ = 0x0002
HDHOMERUN_TYPE_DISCOVER_RPY = 0x0003
HDHOMERUN_TYPE_GETSET_REQ = 0x0004
HDHOMERUN_TYPE_GETSET_RPY = 0x0005
HDHOMERUN_TYPE_UPGRADE_REQ = 0x0006
HDHOMERUN_TYPE_UPGRADE_RPY = 0x0007
HDHOMERUN_TAG_DEVICE_TYPE = 0x01
HDHOMERUN_TAG_DEVICE_ID = 0x02
HDHOMERUN_TAG_GETSET_NAME = 0x03
HDHOMERUN_TAG_GETSET_VALUE = 0x04
HDHOMERUN_TAG_GETSET_LOCKKEY = 0x15
HDHOMERUN_TAG_ERROR_MESSAGE = 0x05
HDHOMERUN_TAG_TUNER_COUNT = 0x10
HDHOMERUN_TAG_DEVICE_AUTH_BIN = 0x29
HDHOMERUN_TAG_BASE_URL = 0x2A
HDHOMERUN_TAG_DEVICE_AUTH_STR = 0x2B
HDHOMERUN_DEVICE_TYPE_WILDCARD = 0xFFFFFFFF
HDHOMERUN_DEVICE_TYPE_TUNER = 0x00000001
HDHOMERUN_DEVICE_ID_WILDCARD = 0xFFFFFFFF
verbose = 0
config = {
'FriendlyName': 'HDHRBrowserRecorderProxy',
'Manufacturer': 'Silicondust',
'ModelNumber': 'HDTC-2USA',
'FirmwareName': 'hdhomeruntc_atsc',
'TunerCount': 1,
'FirmwareVersion': '20190410',
'DeviceID': '12345687',
'DeviceAuth': 'test1234',
'BaseURL': '',
'LineupURL': '',
}
info = {
'/tuner0/lockkey': 'none',
'/tuner0/channelmap': 'us-cable',
'/tuner0/target': 'udp://127.0.0.1:55555',
'/tuner0/channel': 'none',
}
def get_set(d, k, v, lockkey):
if v is not None:
d[k] = v
else:
v = d[k]
return [v]
def get_only(d, k):
return d[k]
def get_tuner_status(tuner):
# 'ch=qam:33 lock=qam256 ss=83 snq=90 seq=100 bps=38807712 pps=0'
channel_info = info[f'/tuner{tuner}/channel']
logger.info(f'Channel {channel_info} status')
if info[f'/tuner{tuner}/channel'] != 'none':
mod, channel = channel_info.split(':')
mod = 'qam'
return f'ch={mod}:{channel} lock={mod} ss=83 snq=90 seq=100 bps=38807712 pps=0'
return f'ch=none lock=none ss=0 snq=0 seq=0 bps=0 pps=0'
def get_tuner_streaminfo(tuner):
channel_info = info[f'/tuner{tuner}/channel']
logger.info(f'Channel {channel_info} status')
if info[f'/tuner{tuner}/channel'] != 'none':
mod, channel = channel_info.split(':')
channel = (int(channel)//100000 - 450) // 60
return f"{channel}: {channel} My Channel\n"
return None
get_set_values = {
'/sys/model': lambda v, lk: get_only(config, 'ModelNumber'),
'/tuner0/channelmap': lambda v, lk: get_only(info, '/tuner0/channelmap'),
'/tuner0/lockkey': lambda v, lk: get_set(info, '/tuner0/lockkey', v, lk),
'/tuner0/target': lambda v, lk: get_set(info, '/tuner0/target', v, lk),
'/tuner0/channel': lambda v, lk: get_set(info, '/tuner0/channel', v, lk),
'/tuner0/status': lambda v, lk: get_tuner_status(0),
'/tuner0/streaminfo': lambda v, lk: get_tuner_streaminfo(0),
}
get_set_values['help'] = lambda v: [x for x in get_set_values.keys()]
def set_config(new_config):
global config
config = new_config
def hdhomerun_validate_device_id(device_id: int) -> bool:
lookup_table = [0xA, 0x5, 0xF, 0x6, 0x7, 0xC, 0x1, 0xB, 0x9, 0x2, 0x8, 0xD, 0x4, 0x3, 0xE, 0x0]
checksum = 0
checksum ^= lookup_table[(device_id >> 28) & 0x0F]
checksum ^= (device_id >> 24) & 0x0F
checksum ^= lookup_table[(device_id >> 20) & 0x0F]
checksum ^= (device_id >> 16) & 0x0F
checksum ^= lookup_table[(device_id >> 12) & 0x0F]
checksum ^= (device_id >> 8) & 0x0F
checksum ^= lookup_table[(device_id >> 4) & 0x0F]
checksum ^= (device_id >> 0) & 0x0F
return checksum == 0
def retrieve_type_and_payload(packet):
header = packet[:4]
checksum = packet[-4:]
payload = packet[4:-4]
packet_type, payload_length = struct.unpack('>HH', header)
if payload_length != len(payload):
logger.info('Bad packet payload length')
return False
if checksum != struct.pack('>I', crc32c.cksum(header + payload)):
logger.info('Bad checksum')
return False
return packet_type, payload
def create_packet(packet_type, payload):
header = struct.pack('>HH', packet_type, len(payload))
data = header + payload
checksum = crc32c.cksum(data)
packet = data + struct.pack('>I', checksum)
return packet
def process_packet(packet, client, log_prefix=''):
packet_type, request_payload = retrieve_type_and_payload(packet)
logger.info(log_prefix + f'Processing Packet {packet_type}')
if packet_type == HDHOMERUN_TYPE_DISCOVER_REQ:
logger.info(log_prefix + 'Discovery request received from ' + client[0])
# Device Type Filter (tuner)
response_payload = struct.pack('>BBI', HDHOMERUN_TAG_DEVICE_TYPE, 0x04, HDHOMERUN_DEVICE_TYPE_TUNER)
# Device ID Filter (any)
response_payload += struct.pack('>BBI', HDHOMERUN_TAG_DEVICE_ID, 0x04, int(config['DeviceID'], 10))
# Device ID Filter (any)
response_payload += struct.pack('>BB{0}s'.format(len(config['BaseURL'])), HDHOMERUN_TAG_GETSET_NAME,
len(config['BaseURL'].encode('utf-8')), config['BaseURL'].encode('utf-8'))
response_payload += struct.pack('>BB{0}s'.format(len(config['BaseURL'])), HDHOMERUN_TAG_BASE_URL,
len(config['BaseURL'].encode('utf-8')), config['BaseURL'].encode('utf-8'))
# Device ID Filter (any)
response_payload += struct.pack('>BBB', HDHOMERUN_TAG_TUNER_COUNT, 0x01, config['TunerCount'])
return create_packet(HDHOMERUN_TYPE_DISCOVER_RPY, response_payload)
# TODO: Implement request types
if packet_type == HDHOMERUN_TYPE_GETSET_REQ:
logger.info(log_prefix + 'Get set request received from ' + client[0])
get_set_name = None
get_set_value = None
payload_io = BytesIO(request_payload)
lockkey = ''
while True:
header = payload_io.read(2)
if not header:
break
tag, length = struct.unpack('>BB', header)
if length > 127:
header_extra = payload_io.read(1)
length_msb, = struct.unpack('>B', header_extra)
if length_msb > 127:
logger.info(log_prefix + 'Unable to determine tag length, the correct way to determine '
'a length larger than 127 must still be implemented.')
return False
length = (length & 127) + (length_msb << 7)
logger.info(f'Tag {tag}')
# TODO: Implement other tags
if tag == HDHOMERUN_TAG_GETSET_NAME:
get_set_name, zero = struct.unpack('>{0}sB'.format(length - 1), payload_io.read(length))
elif tag == HDHOMERUN_TAG_GETSET_VALUE:
get_set_value, zero = struct.unpack('>{0}sB'.format(length - 1), payload_io.read(length))
elif tag == HDHOMERUN_TAG_GETSET_LOCKKEY:
# set lockkey
lockkey, = struct.unpack('>I'.format(length), payload_io.read(length))
logger.info(f'lockkey {lockkey}')
else:
logger.warning(f'Unknown tag {tag} length {length}')
p = struct.unpack('>{0}s'.format(length), payload_io.read(length))
logger.info(f' data {p}')
continue
logger.info(f'Name {get_set_name} Value {get_set_value}')
if get_set_name is None:
return False
else:
name = get_set_name.decode('utf-8')
logger.info(f'Name is {name}')
if name in get_set_values:
item = get_set_values[name]
if get_set_value is None:
if callable(item):
get_set_value = item(None, None)
else:
get_set_value = item
if get_set_value is not None:
if not isinstance(get_set_value, list):
get_set_value = [get_set_value]
try:
get_set_value_orig = get_set_value
get_set_value = [x.encode('utf-8') for x in get_set_value_orig]
except AttributeError:
logger.exception(f'bad data {get_set_value_orig}')
else:
# is a set
if callable(item):
# TODO pass lockkey
item(get_set_value.decode('utf-8'), lockkey)
if not isinstance(get_set_value, list):
get_set_value = [get_set_value]
# response_payload = struct.pack('>BB{0}sB'.format(len(get_set_name)), HDHOMERUN_TAG_GETSET_NAME,
# len(get_set_name) + 1, get_set_name, 0)
response_payload = b''
if get_set_value is not None and None not in get_set_value:
logger.info(f'Value is {get_set_value}')
for x in get_set_value:
response_payload += struct.pack('>BB{0}sB'.format(len(x)), HDHOMERUN_TAG_GETSET_VALUE,
len(x) + 1, x, 0)
return create_packet(HDHOMERUN_TYPE_GETSET_RPY, response_payload)
logger.info(log_prefix + 'Unhandled request %02x received from %s' % (packet_type, client[0]))
return False
class HDHomeRunTcpServerProtocol(asyncio.Protocol):
def __init__(self, *cwargs, **kwargs):
super().__init__(*cwargs, **kwargs)
self.log_prefix = 'TCP Server - '
def connection_made(self, transport):
self.peername = transport.get_extra_info('peername')
logger.info(self.log_prefix + 'Connection from {}'.format(self.peername))
self.transport = transport
def data_received(self, data):
response_packet = process_packet(data, self.peername)
if response_packet:
logger.info(self.log_prefix + f'Sending TCP reply over TCP to {self.peername}')
self.transport.write(response_packet)
else:
logger.info(self.log_prefix + 'No valid TCP request received, nothing to send to client')
logger.info(self.log_prefix + 'Close the client socket')
self.transport.close()
class HDHomeRunUdpProtocol(asyncio.DatagramProtocol):
def __init__(self, loop):
super().__init__()
self.transport = None
self.loop = loop
self.log_prefix = 'UDP Server - '
self.on_connection_lost = loop.create_future()
def connection_made(self, transport):
# logger.info('connected')
self.transport = transport
def connection_lost(self, ex):
if ex is not None:
self.transport.close()
self.on_connection_lost.set_result(True)
def datagram_received(self, data, addr):
response_packet = process_packet(data, addr)
if response_packet:
logger.info(self.log_prefix + f'Sending UDP reply over udp to {addr}')
self.transport.sendto(response_packet, addr)
else:
logger.info(self.log_prefix + 'No discovery request received, nothing to send to client')
class HDHomeRunTcpServer:
def __init__(self, bind_addr='192.168.1.1', bind_port=HDHOMERUN_DISCOVER_UDP_PORT):
self._task = asyncio.create_task(self._serve(bind_addr, bind_port))
self._transport = None
self._protocol = None
self._server = None
async def _serve(self, bind_addr, bind_port):
loop = asyncio.get_running_loop()
try:
server = await loop.create_server(
lambda: HDHomeRunTcpServerProtocol(),
bind_addr, bind_port)
logger.info(f'HDHR TCP Server listening on {bind_addr, bind_port}')
async with server:
await server.serve_forever()
except asyncio.CancelledError:
logger.info('HDHR TCP Server cancelled')
logger.info('HDHR TCP Server closed')
async def stop(self):
# logger.info('tcp stop 1')
# self._transport.close()
self._task.cancel()
# logger.info('tcp stop 2')
await self._task
# logger.info('tcp stop 3')
self._task = None
class HDHomeRunUdpServer:
def __init__(self, bind_addr='192.168.1.1', bind_port=HDHOMERUN_DISCOVER_UDP_PORT):
self._task = asyncio.create_task(self._serve(bind_addr, bind_port))
self._transport = None
self._protocol = None
self._server = None
async def _serve(self, bind_addr, bind_port):
loop = asyncio.get_running_loop()
# logger.info('_serve')
is_broadcast = True # bind_addr.endswith('.255')
try:
self._server = loop.create_datagram_endpoint(lambda _loop=loop: HDHomeRunUdpProtocol(_loop),
local_addr=(bind_addr, bind_port),
allow_broadcast=is_broadcast)
self._transport, self._protocol = await self._server
# logger.info('_serve create done')
sock_addr, sock_port = self._transport.get_extra_info("sockname")
logger.info(f'HDHR UDP Server listening on {sock_addr, sock_port}')
await self._protocol.on_connection_lost
except asyncio.CancelledError:
logger.info('HDHR UDP Server cancelled')
logger.info('HDHR UDP Server closed')
async def stop(self):
# logger.info('udp stop 1')
self._transport.close()
# logger.info('udp stop 2')
await self._task
# logger.info('udp stop 3')
self._task = None
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--port_type')
parser.add_argument('--verbose', type=int, default=0)
args = parser.parse_args()
verbose = args.verbose
try:
if args.port_type == 'tcp':
tcp_server()
else:
udp_server()
except KeyboardInterrupt:
exit(0)
| [
"mspieth@digivation.com.au"
] | mspieth@digivation.com.au |
b273a522405da407d7497a6ed50c640a6b3b2718 | 435a186432a75738f2f50e23376e88939cf483eb | /examine_plugin.py | 786af0cc202637b572f397b79b3ce4e380a0602c | [] | no_license | dwink/gst-examples | 1f5f3fc3ae571175b233b7748af6c036ad1394d8 | 1d0e29056442bb8d1b6584f03cbf09bdb286d688 | refs/heads/master | 2021-03-12T20:27:14.358893 | 2014-06-06T12:00:31 | 2014-06-06T12:00:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 868 | py | #!/usr/bin/env python3
# Examine a Plugin
import gi
gi.require_version("Gst", "1.0")
from gi.repository import Gst
Gst.init(None)
def main():
print("Using Gst: {}.{}.{}.{}".format(*Gst.version()))
registry = Gst.Registry.get()
playback = registry.find_plugin("playback")
if playback is not None:
print("{}: {} {} {} {}".format(
playback.get_name(),
playback.get_version(),
playback.get_source(),
playback.get_package(),
playback.get_version())
)
playbin = Gst.ElementFactory.make("playbin", None)
# This utility function allows you to set properties
# using the same syntax as gst-launch.
Gst.util_set_object_arg(playbin, "flags", "video+buffering")
print(playbin.get_property("flags"))
if __name__ == "__main__":
main()
| [
"dave@dwink.net"
] | dave@dwink.net |
3313cca37c36ff5633dd7b08a512e1906a874299 | ac73eca2536edba153903328a2d0be7479f2d396 | /reverse.py | 780fb3839e87668b021a31b342cb336f43c4e21f | [] | no_license | fcamilalima/exercicios-python | cf7e63e8734172258da0efd4a330a1f2e4ecdb06 | eb32bd6b6e3e98a025804076af1348593826486b | refs/heads/main | 2023-07-31T06:30:22.558663 | 2021-09-30T23:50:02 | 2021-09-30T23:50:02 | 410,785,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | def reverse(filename):
f =open(filename).read().split('\n')
g=f[::-1]
for i in g:
print(i)
return g
reverse('she.txt')
| [
"noreply@github.com"
] | noreply@github.com |
967aae2c9b64081141856dee77e6af6a40952980 | d637643039a2bdb97aef3485be24c6fc9bdc7177 | /RpiArduinoCommunication/RPiArduino/readSerialData.py | 9786df6cd2696e9fd0c5cfaddd787b210ab4b18a | [] | no_license | BusraOzer/InternetofThingsExamples | d3a437ca45112bbe05555c168aee596ffb714138 | b4857cc33db86b68140a5dfc24551f3c185734de | refs/heads/master | 2021-06-21T02:33:50.411855 | 2017-08-14T19:05:29 | 2017-08-14T19:05:29 | 100,300,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | import serial
#Arduino'nun dosya yolu ve 9600 bps
#Eger bu dosya yolunu ogrenmek istiyorsaniz ls /dev/tty* i kullanabilirsiniz
ser=serial.Serial('/dev/ttyUSB0',9600)
while 1:
print(ser.readline())
| [
"busraozer.64@gmail.com"
] | busraozer.64@gmail.com |
0c8cb171fb64ea11da3f0d44dcaede3ef4a1c947 | 3f8f0ea5b5bbf2e6b0d569db078200d07b4f3b10 | /DjangoApp/anubis/models.py | 5ada46652cce7ae02e237a8dd6f37b513812fe62 | [] | no_license | Lsmuel/ETEtapa1_SamuelHurtado_007D | 8a401a3797eafc4743eaed9f0a5ded4fa0dbeba1 | e0df2df933caf5b277fac9d29ba0d1a4f4280784 | refs/heads/main | 2023-06-26T11:04:44.731804 | 2021-07-06T21:50:11 | 2021-07-06T21:50:11 | 381,492,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | from django.db import models
# Create your models here.
class Colaboradores(models.Model):
rut = models.CharField(primary_key=True, max_length=10, verbose_name='rut')
image = models.ImageField(upload_to='images/', null=True, blank = True)
nombreComp = models.CharField(max_length=40, verbose_name='Nombre completo')
telefono = models.CharField(max_length=9, verbose_name='Numero de telefono')
direccion = models.CharField(max_length=30, verbose_name='Direccion')
email = models.CharField(max_length=30, verbose_name='Email')
contrasenia = models.CharField(max_length=30, verbose_name='Contraseña')
pais = models.CharField(max_length=15, verbose_name='Pais')
def __str__(self):
return(self.nombreComp) | [
"aamacanitan@gmail.com"
] | aamacanitan@gmail.com |
93989c16fa5b859400d6592f55c2c95cc9606f81 | 723a064ec3e0273e30ccab9b1a3c8acb58aa7b45 | /beat/Descarga.py | 55a7c9a05fab77fad362070bcf78feb528fef7ee | [] | no_license | jorge11696/7.-CELERY-PYTHON | c3a3131b12a442b7847c691ebaf7eba06db67232 | 904eebe5bf3957d0f9c6158818fda382259cbc73 | refs/heads/master | 2022-12-06T06:20:27.371703 | 2020-08-15T11:06:25 | 2020-08-15T11:06:25 | 287,726,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,612 | py | from celery import Celery
from celery.schedules import crontab
from bs4 import BeautifulSoup
from urllib.request import Request, urlopen
import pprint
import json
import re
dl = Celery()
pp = pprint.PrettyPrinter(indent=4)
@dl.on_after_configure.connect
def setup_periodic_tasks(sender, **kwargs):
sender.add_periodic_task(10.0, descarga.s(), name='Descarga agenda Platzi')
@dl.task
def descarga():
print("Descargando")
req = Request('https://platzi.com/agenda',headers={'User-Agent': 'Mozilla/63.0'})
page = urlopen(req).read()
soup = BeautifulSoup( page, "lxml" )
agenda = soup.find_all('script')[26].text # Cambia algunas veces...
schedule = re.findall( r'scheduleItems: (.+?),\n', agenda )
datos = json.loads(schedule[0])
eventos = {}
for i in ['agenda_all','agenda_calendar']:
for j in datos[i]['agenda_items']:
este_dato = datos[i]['agenda_items'][j]
eventos[ este_dato['course'] ] = { "comienzo" : este_dato['start_time'],
"final" : este_dato['end_time'],
"tipo": este_dato['agenda_item_type'] }
for j in datos[i]['agenda_courses']:
este_dato = datos[i]['agenda_courses'][j]
course= datos[i]['agenda_courses'][j]['id']
eventos[course]['titulo'] = este_dato['title']
eventos[course]['URL'] = este_dato['url']
eventos[course]['lanzamiento'] = este_dato['launch_date']
# Imprime los resultados.
f=open("cursos.json","w")
f.write(json.dumps(eventos))
| [
"jorgewebempresa@gmail.com"
] | jorgewebempresa@gmail.com |
1d056c81396c0deb5c63b84b521470c9728e0747 | ce0e9c993482fcb4f20a9151a15a43864ffcf02e | /wtformsexer/venv/bin/easy_install | 957a86eb4257b9abe8fa8cc6bcb155150312634b | [] | no_license | apandora-seertech/pythonexer | e95c5b89b0e8ecb1239236c1bfdd73083b6f5253 | 75bcaafe79e699bc881375cd1a2c5192f3509d0d | refs/heads/master | 2021-05-29T19:57:09.281328 | 2015-10-23T14:57:36 | 2015-10-23T14:57:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | #!/Users/andrewandora/flasktraining/wtformstraining/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"andrew.andora@seer-technologies.com"
] | andrew.andora@seer-technologies.com | |
12e7a08649d0b1c145f0beb96fbbc6ffee75a2d7 | 7f68e9cabee3d78cd9b312d6cc701a30e4a6b1c0 | /QODE/Applications/component_tests/test-Lanczos/degenerate-case/testLanczos.py | e42495363a706af58055316f6ca4fa9bbcccfc6c | [] | no_license | sskhan67/GPGPU-Programming- | b855ea4255d7917972e0bb25d3b262ae14b574ea | 9daa6ede30c8753ea7d4feb4b06bcc802578af53 | refs/heads/master | 2022-07-03T12:35:52.984388 | 2020-05-12T04:49:32 | 2020-05-12T04:49:32 | 256,408,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,591 | py | # (C) Copyright 2018 Anthony D. Dutoi
#
# This file is part of Qode.
#
# Qode is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Qode is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Qode. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import time
import numpy
import qode
def io_setup(argv):
debug = False
numpy.set_printoptions(linewidth=200)
mat = numpy.load("input-output/Be2_5_0ang_h_spin.npy")
vecs = [ numpy.random.random( mat.shape[0] ) for _ in range(10) ]
#vecs = [ qode.util.basis_vector(i,mat.shape[0]) for i in range(10) ]
thresh = 1e-14
psize = 10
if debug:
print(mat.shape[0])
print(mat)
print(vecs)
return mat,vecs,thresh,psize
def control(mat,n):
debug = False
eigenvals,eigenvecs = numpy.linalg.eigh(mat)
if debug:
print(eigenvals)
diag = eigenvecs.T * mat * eigenvecs
print(diag)
print("Lowest eigenvalues from full diagonalization: {}".format(sorted(eigenvals)[:n]))
def experiment(mat,vecs,thresh,psize):
traits = qode.math.numpy_space.real_traits(len(vecs[0]))
S = qode.math.linear_inner_product_space(traits)
H = S.lin_op(mat)
v = [ S.member(vec) for vec in vecs ]
eigenpairs = qode.math.lanczos.lowest_eigen(H,v,thresh,psize)
print("Lowest eigenvalues from Lanczos extraction: {}".format(list(zip(*eigenpairs))[0]))
def main(argv):
debug = False
t0 = time.time()
#
mat,vecs,thresh,psize = io_setup(argv)
t1 = time.time()
#
if debug:
print("Skipping full LAPACK diagonalization.")
t2 = t1
else:
control(mat,len(vecs))
t2 = time.time()
#
experiment(mat,vecs,thresh,psize)
t3 = time.time()
#
tmat,tdiag,tlanc,ttot = (t1-t0)/60 , (t2-t1)/60 , (t3-t2)/60 , (t3-t0)/60
print("matrix build: {} min".format(tmat))
print("full diagonalization: {} min".format(tdiag))
print("lanczos extraction: {} min".format(tlanc))
print("Total: {} min".format(ttot))
if __name__ == "__main__": main(sys.argv[1:])
| [
"sayedkhan@dhcp-10-11-207-87.wireless.pacific.edu"
] | sayedkhan@dhcp-10-11-207-87.wireless.pacific.edu |
ffffee1166d50a0e214b19fc1af993b35be523e3 | b6739df812fbc321812310277407f6d90f1de8e7 | /Session2/SVM_with_sklearn.py | b6736252ed01b46d373b3dc900075dfd02a174b4 | [] | no_license | SonDuongHUST/DSLab_Training | eacb22511c47869c9c9ac44aa1624d6114dd5696 | 48cb4ef1ce009120b03e9fd53a0d70726f07f2b1 | refs/heads/master | 2022-09-26T17:24:53.021873 | 2020-06-09T07:25:09 | 2020-06-09T07:25:09 | 257,641,048 | 0 | 0 | null | 2020-04-22T00:17:51 | 2020-04-21T15:42:05 | Python | UTF-8 | Python | false | false | 1,732 | py |
import numpy as np
from sklearn.svm import LinearSVC
def load_data(path):
def sparse_to_dense(sparse_r_d, vocab_size):
r_d = [0.0 for i in range(vocab_size)]
ind_tfidf = sparse_r_d.split()
for index_tfidf in ind_tfidf:
index = int(index_tfidf.split(":")[0])
tfidf = float(index_tfidf.split(':')[1])
r_d[index] = tfidf
return np.array(r_d)
with open(path) as f:
d_lines = f.read().splitlines()
with open('words_idfs.txt') as f:
vocab_size = len(f.read().splitlines())
# self._data = []
# self._label_count = defaultdict(int)
x = []
label_data = []
for data_id, d in enumerate(d_lines):
features = d.split('<fff>')
label, doc_id = int(features[0]), int(features[1])
# self._label_count[label] += 1
r_d = sparse_to_dense(sparse_r_d=features[2], vocab_size=vocab_size)
# r_d là mảng các giá trị của văn bản d
# self._data.append(Member(r_d=r_d, label=label, doc_id=doc_id))
x.append(r_d)
label_data.append(label)
return np.array(x), np.array(label_data)
def compute_accuracy(predicted_y, test_y):
matchs= np.equal(predicted_y, test_y)
accuracy= np.sum(matchs.astype(float)/ test_y.size)
return accuracy
def classifying_with_linear_SVMs():
train_x, train_y= load_data("20news-train-tfidf.txt")
classifier= LinearSVC(
C=10.0,
tol=0.001,
verbose=True
)
classifier.fit(train_x,train_y)
test_x, test_y= load_data('20news-test-tfidf.txt')
predicted_y= classifier.predict(test_x)
accuracy= compute_accuracy(predicted_y, test_y)
print("accuracy: {}".format(accuracy))
classifying_with_linear_SVMs() | [
"duonghongson1999@gmail.com"
] | duonghongson1999@gmail.com |
a9826ba4a43ea9f0095b716f50ec0537f5ef5130 | c7ca46fd2c84cb82ff270144de9565c104858484 | /src/borg/testsuite/benchmark.py | 1e70a101f2f92f9f5b88df7fc86c0ef881bc6f88 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | aeveris/borg | 07f232124bf72034ecbd6162f0eb1ac731f37bc1 | 4041bdf1690c1988557827b5af80a9ebb760139c | refs/heads/master | 2023-02-14T21:31:43.134214 | 2021-01-11T15:51:49 | 2021-01-11T15:51:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,863 | py | """
Do benchmarks using pytest-benchmark.
Usage:
py.test --benchmark-only
"""
import os
import pytest
from .archiver import changedir, cmd
@pytest.fixture
def repo_url(request, tmpdir, monkeypatch):
monkeypatch.setenv('BORG_PASSPHRASE', '123456')
monkeypatch.setenv('BORG_CHECK_I_KNOW_WHAT_I_AM_DOING', 'YES')
monkeypatch.setenv('BORG_DELETE_I_KNOW_WHAT_I_AM_DOING', 'YES')
monkeypatch.setenv('BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK', 'yes')
monkeypatch.setenv('BORG_KEYS_DIR', str(tmpdir.join('keys')))
monkeypatch.setenv('BORG_CACHE_DIR', str(tmpdir.join('cache')))
yield str(tmpdir.join('repository'))
tmpdir.remove(rec=1)
@pytest.fixture(params=["none", "repokey"])
def repo(request, cmd, repo_url):
cmd('init', '--encryption', request.param, repo_url)
return repo_url
@pytest.fixture(scope='session', params=["zeros", "random"])
def testdata(request, tmpdir_factory):
count, size = 10, 1000*1000
p = tmpdir_factory.mktemp('data')
data_type = request.param
if data_type == 'zeros':
# do not use a binary zero (\0) to avoid sparse detection
def data(size):
return b'0' * size
elif data_type == 'random':
def data(size):
return os.urandom(size)
else:
raise ValueError("data_type must be 'random' or 'zeros'.")
for i in range(count):
with open(str(p.join(str(i))), "wb") as f:
f.write(data(size))
yield str(p)
p.remove(rec=1)
@pytest.fixture(params=['none', 'lz4'])
def archive(request, cmd, repo, testdata):
archive_url = repo + '::test'
cmd('create', '--compression', request.param, archive_url, testdata)
return archive_url
def test_create_none(benchmark, cmd, repo, testdata):
result, out = benchmark.pedantic(cmd, ('create', '--compression', 'none', repo + '::test', testdata))
assert result == 0
def test_create_lz4(benchmark, cmd, repo, testdata):
result, out = benchmark.pedantic(cmd, ('create', '--compression', 'lz4', repo + '::test', testdata))
assert result == 0
def test_extract(benchmark, cmd, archive, tmpdir):
with changedir(str(tmpdir)):
result, out = benchmark.pedantic(cmd, ('extract', archive))
assert result == 0
def test_delete(benchmark, cmd, archive):
result, out = benchmark.pedantic(cmd, ('delete', archive))
assert result == 0
def test_list(benchmark, cmd, archive):
result, out = benchmark(cmd, 'list', archive)
assert result == 0
def test_info(benchmark, cmd, archive):
result, out = benchmark(cmd, 'info', archive)
assert result == 0
def test_check(benchmark, cmd, archive):
repo = archive.split('::')[0]
result, out = benchmark(cmd, 'check', repo)
assert result == 0
def test_help(benchmark, cmd):
result, out = benchmark(cmd, 'help')
assert result == 0
| [
"tw@waldmann-edv.de"
] | tw@waldmann-edv.de |
19f120d58d3c87b2f9b24e49df5aa497f9961c58 | 11addfaa34cf33a50cdc2cc9616fd17e62337c87 | /support/scripts/w2adoc.py | 876edfa1debacbc8c0913fddcca866bf43594b4f | [] | no_license | sergey-yurchenko/BIK-App-Test | 27d5aeac9eff6591ca260ac8cec248592babdc35 | d3752d3a20695ac69a06b59ed9224f01bfb0134f | refs/heads/master | 2023-06-24T18:30:26.862307 | 2021-07-16T09:42:01 | 2021-07-16T09:42:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 717 | py | import os, sys
import subprocess
from typing import List
params = ""
cwd = os.getcwd()
file_list = os.listdir(cwd)
tdir = "adoc"
print("test")
for filename in file_list:
print(filename)
if filename.endswith(".docx"):
file_path = '"'+os.sep.join([cwd, filename])+'"'
adoc_path = '"'+os.sep.join([tdir, filename.replace(".docx", ".adoc")])\
+'"'
print(file_path)
print(adoc_path)
if not os.path.isdir(tdir):
os.mkdir(tdir)
params = "--wrap=none --atx-headers --extract-media=EXTRACTEDMEDIA \
-f docx -t asciidoc -o "+\
adoc_path+" "+file_path
subprocess.run("pandoc "+params, shell=True)
| [
"helms@dias.de"
] | helms@dias.de |
11dfab65b6124613750e7a9b42287f134b836d34 | 3c748e18e4955bf50f7f487d8733eb51cc0d18f8 | /jobscraper/bin/pip3.7 | af5e4a932993c427fb769f175329634d0506f073 | [
"MIT"
] | permissive | pyrish/scrapers | d1bf12d6fc1eb010278dc3b631afe51712e14f57 | c1f4e82dc7a054dfcd81d047406f78dd60b363b1 | refs/heads/master | 2021-04-15T14:00:20.132728 | 2019-10-06T05:32:46 | 2019-10-06T05:32:46 | 126,647,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | 7 | #!/Users/mariano/Desktop/scrapers/jobscraper/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"mariano.vazquez@live.com"
] | mariano.vazquez@live.com |
37f2a774f750224dc8e9aa45726c5e4d43d15a98 | 78cb6dadc7599e01b078682b175f21be673ed199 | /289. Game of Life.py | 8f9a3ce969e8b1a9a93adcdce1c8c56a5b245d10 | [] | no_license | AlexWufan/leetcode-python | 5cf5f13dbc7d1e425fde646df618e50c488fa79f | 435323a9fcea6a4d09266785e88fb78735e0cc3e | refs/heads/master | 2021-01-13T00:49:49.870468 | 2018-04-13T18:44:19 | 2018-04-13T18:44:19 | 51,347,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | class Solution(object):
def gameOfLife(self, board):
"""
:type board: List[List[int]]
:rtype: void Do not return anything, modify board in-place instead.
"""
tbc | [
"mengnanszw@gmail.com"
] | mengnanszw@gmail.com |
bb3d996477ea3256b3518ad93eab79a70b2c47b7 | b8de48551cbadf1738f3eafeb738b80dbcfc4494 | /students/jjakub/lessons/lesson05/assignment/database.py | eb7a6eb2f1b38f347d204479fb034ccbed6fad20 | [] | no_license | UWPCE-PythonCert-ClassRepos/py220BV201901 | 41a6d4a73109ca4b5b22675972397e7445e6b8bd | 23ceab6bcb0b9f086d72dfad9b021ac0b53095e8 | refs/heads/master | 2020-04-15T19:22:48.976024 | 2019-03-18T14:16:23 | 2019-03-18T14:16:23 | 164,947,759 | 0 | 6 | null | 2019-03-18T14:16:24 | 2019-01-09T22:25:01 | Python | UTF-8 | Python | false | false | 5,133 | py | # Database
# pylint: disable=too-many-locals
""""
Module to handle database operations
"""
import csv
import datetime
import logging
import sys
from pathlib import Path
from pymongo import MongoClient
MONGO = MongoClient(host='127.0.0.1', port=27017)
DB = MONGO['HPN_database']
PRDCT = DB["product"]
CUST = DB["customer"]
RNTL = DB["rental"]
DB.PRDCT.create_index("product_id")
DB.CUST.create_index("user_id")
LOG_FORMAT = "%(asctime)s %(filename)s:%(lineno)-3d %(levelname)s %(message)s"
FORMATTER = logging.Formatter(LOG_FORMAT)
LOG_FILE = datetime.datetime.now().strftime("%Y-%m-%d") + ".log"
FILE_HANDLER = logging.FileHandler(LOG_FILE)
FILE_HANDLER.setFormatter(FORMATTER)
LOGGER = logging.getLogger()
LOGGER.addHandler(FILE_HANDLER)
LOGGER.setLevel("INFO")
def import_data(directory_name, product_file, customer_file, rental_file):
""" import csv files into database """
file_list = [product_file, customer_file, rental_file]
for file in file_list:
success_cnt, error_cnt = 0, 0
try:
data = open(Path(directory_name, file), "r", encoding="utf-8-sig")
header = next(csv.reader(data))
for row in csv.reader(data):
if file == file_list[0]:
DB.PRDCT.insert_one({header[0]:row[0],
header[1]:row[1],
header[2]:row[2],
header[3]:row[3]})
if DB.PRDCT.acknowledged:
success_cnt += 1
LOGGER.info("Item added to product collection: %s", row[0])
else:
error_cnt += 1
LOGGER.error("Prodcut collection insert error: %s", row[0])
prdct_success, prdct_error = success_cnt, error_cnt
if file == file_list[1]:
DB.CUST.insert_one({header[0]:row[0],
header[1]:row[1],
header[2]:row[2],
header[3]:row[3],
header[4]:row[4],
header[5]:row[5]})
if DB.CUST.acknowledged:
success_cnt += 1
LOGGER.info("Item added to customer collection: %s", row[0])
else:
error_cnt += 1
LOGGER.error("Customer collection insert error: %s", row[0])
cust_success, cust_error = success_cnt, error_cnt
if file == file_list[2]:
DB.RNTL.insert_one({header[0]:row[0],
header[1]:row[1]})
if DB.RNTL.acknowledged:
success_cnt += 1
LOGGER.info("Item added to rental collection: %s", row[0])
else:
error_cnt += 1
LOGGER.error("Rental collection insert error: %s", row[0])
rntl_success, rntl_error = success_cnt, error_cnt
except FileNotFoundError:
LOGGER.error("File does not exist: %s", Path(directory_name, file))
sys.exit(1)
return (prdct_success, cust_success, rntl_success), (prdct_error, cust_error, rntl_error)
def show_available_products():
""" list products with available inventory """
result = {}
for doc in DB.PRDCT.find({"quantity_available": {"$gt": "0"}}):
result[doc["product_id"]] = {"description" : doc["description"],
"product_type" : doc["product_type"],
"quantity_available" : doc["quantity_available"]}
return result
def show_rentals(product_id):
""" show information for customers that have rented a product """
user_list, result = [], {}
for doc in DB.RNTL.find({"product_id": product_id}):
user_list.append(doc["user_id"])
if user_list == []:
LOGGER.info("Product ID not found: %s", product_id)
else:
for doc in DB.CUST.find({"user_id": {"$in": user_list}}):
result[doc["user_id"]] = {"name" : doc["name"],
"address" : doc["address"],
"phone_number" : doc["phone_number"],
"email" : doc["email"]}
LOGGER.info("Customer data reported: %s", doc["user_id"])
return result
def delete_all():
""" delete all data from collections """
prdct_result = DB.PRDCT.delete_many({})
LOGGER.info("Items deleted from product collection: %s", prdct_result.deleted_count)
cust_result = DB.CUST.delete_many({})
LOGGER.info("Items deleted from customer collection: %s", cust_result.deleted_count)
rntl_result = DB.RNTL.delete_many({})
LOGGER.info("Items deleted from rental collection: %s", cust_result.deleted_count)
return prdct_result.deleted_count, cust_result.deleted_count, rntl_result.deleted_count
| [
"Jason.Jakubiak@t-mobile.com"
] | Jason.Jakubiak@t-mobile.com |
093eb01cb9bf4ae11cfb9818c5f70388bfd695f6 | 7bd4b4559ff802e3d7025023b71ea8e3820f906d | /download_free_PTtorrents_v2_debug_py2.py | a42c521b48863beccd3fa9e8e5a42368910472fd | [] | no_license | monokoo/ptscripts | 27da300cd6c4142f001bd5405966b90f0be64d37 | 0cce42dcd88689767b2abb83eef57f944192cdb6 | refs/heads/master | 2020-05-25T19:16:08.503194 | 2019-05-13T07:06:07 | 2019-05-13T07:06:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,807 | py | import bs4
import requests
import re
import os
# Complete the variables below:
# Some examples:
#site_name = "M-TEAM"
#site_url = "https://tp.m-team.cc/torrents.php"
#site_cookie = "c_lang_folder=cht; tp=I2ODOGYNDFmZDdhYWGI2ODU3ZDA1ZU3ZDAYxNDFmZDdhYWRhZmRlOA%3D%3D"
#url_half = "https://tp.m-team.cc/download.php?id="
site_name = "xxxxx"
site_url = "https://xxxxxxxxx/torrents.php"
site_cookie = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
url_half = "https://xxxxxxxxxxxxxxxxx/download.php?id="
# Check the download url, especially when you are using a https(SSL) url.
# Some torrents' download pages url could be "https://tp.m-team.cc/download.php?id=xxxxx&https=1", in this case, you need to assign the variable of "url_last". Examples:
# url_last = "&https=1"
url_last = ""
# If you couldn't downlaod the torrents to your directory where the *.py script is, you could just define the variables below. Make sure the format of your path because of the difference between Windows and Linux.
# Example of windows: monitor_path = r'C:\\Users\\DELL-01\\Desktop\\' Don't forget the last '\\'
# Example of Linux and MacOS: monitor_path = r'/home/user/Downloads/' Don't forget the last '/'
monitor_path = r''
# Other informations for safer sites. Complete it if you cannot download torrents.
# Some examples:
#user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36'
#referer = 'https://tp.m-team.cc/login.php'
#host = 'tp.m-team.cc'
user_agent = ''
referer = ''
host = ''
##^^^^^^^^^^^^^^^^^^^^^^^ YOU ONLY NEED TO ASSIGN VARIABLES SHOWS BEFORE ^^^^^^^^^^^^^^^^^^^^^^^^^^^##
# Using Session to keep cookie
cookie_dict = {"cookie":site_cookie}
s = requests.Session()
s.cookies.update(cookie_dict)
my_headers = {'User-Agent':user_agent, 'Referer':referer, 'Host':host}
pattern = r'id=(\d+)'
#####
class Torrents():
'''
Define a torrent
'''
def __init__(self,torrent):
self.torrent = torrent
def __str__(self):
return '{}_{}.torrent'.format(site_name,self.torrent[0])
def DL(self):
'''
A function to download a free torrent.
'''
down_url = url_half + self.torrent[0] + url_last
if self.torrent[1]:
res = s.get(down_url)
#vvvvvvvvvvvvvvvvvvvvv# Command for debug #vvvvvvvvvvvvvvvvv# Command for debug #vvvvvvvvvvvvvvvvvvvvvv#
print('\n\nPrinting the download statements: ')
try:
print('Downloading' + self.__str__())
except:
print('Cannot print the torrent name.')
try:
print('Writing torrent to your path ...')
#^^^^^^^^^^^^^^^^^^^^^# Command for debug #^^^^^^^^^^^^^^^^^# Command for debug #^^^^^^^^^^^^^^^^^^^^^^#
with open(monitor_path + self.__str__(),'wb') as f:
f.write(res.content)
#vvvvvvvvvvvvvvvvvvvvv# Command for debug #vvvvvvvvvvvvvvvvv# Command for debug #vvvvvvvvvvvvvvvvvvvvvv#
except:
print('Cannot write torrent file in your path!!')
#^^^^^^^^^^^^^^^^^^^^^# Command for debug #^^^^^^^^^^^^^^^^^# Command for debug #^^^^^^^^^^^^^^^^^^^^^^#
else:
pass
#####
class Page():
'''
Getting a torrent page with all torrents in it
'''
def __init__(self):
self.torrent_list = []
self.raw_list = []
# Requesting page information of torrents by session
if user_agent or referer or host:
res = s.get(site_url, headers=my_headers)
else:
res = s.get(site_url)
soup = bs4.BeautifulSoup(res.text,'lxml')
self.raw_list = soup.select('.torrentname')
#vvvvvvvvvvvvvvvvvvvvv# Command for debug #vvvvvvvvvvvvvvvvv# Command for debug #vvvvvvvvvvvvvvvvvvvvvv#
print('\n\nThe website shows: ')
try:
print(soup)
except:
print('Cannot print soup')
print('\n\nThe torrents informations(raw_list) shows below: ')
try:
print(raw_list)
except:
print('Cannot print raw_list')
#^^^^^^^^^^^^^^^^^^^^^# Command for debug #^^^^^^^^^^^^^^^^^# Command for debug #^^^^^^^^^^^^^^^^^^^^^^#
def __str__(self):
return self.raw_list
def Free(self):
free_state = []
# Check free and add states
for entry in self.raw_list:
#if entry.find(class_='pro_free' 'pro_free2up'):
if entry.find(class_='pro_free') or entry.find(class_='pro_free2up'):
details = entry.a['href']
torrent_id = re.search(pattern, details).group(1)
free_state.append((torrent_id,True))
else:
details = entry.a['href']
torrent_id = re.search(pattern, details).group(1)
free_state.append((torrent_id,False))
#vvvvvvvvvvvvvvvvvvvvv# Command for debug #vvvvvvvvvvvvvvvvv# Command for debug #vvvvvvvvvvvvvvvvvvvvvv#
print("\n\nThe torrents' free state tuples list shows below: ")
try:
print(free_state)
except:
print('Cannot print the free_tuple_list')
#^^^^^^^^^^^^^^^^^^^^^# Command for debug #^^^^^^^^^^^^^^^^^# Command for debug #^^^^^^^^^^^^^^^^^^^^^^#
return free_state
#####
#####
task = Page() ## The site would inform you that you have loged in this site when you run Page() at the very beginning.
task = Page() ## So just run this command again to make sure that you can get the informations of torrents page.
task_list = task.Free()
#####
for torrent in task_list:
torrent_name = str(Torrents(torrent))
if os.path.isfile(torrent_name) == False:
Torrents(torrent).DL()
else:
continue
##### | [
"csx921124@gmail.com"
] | csx921124@gmail.com |
aab84e3b0cfa096c303cef34bb53750dc3f1ee59 | 618acf7dbdbadfd277530fdf028b08352037ab0d | /train_at.py | 684b8f66a3386c32c694b377abbf81df7e33569a | [] | no_license | lilujunai/Knowledge-Distillation-Zoo | cbf9e71778dbbb7dd977c5d8ec5ce347b7c291f8 | b7bce273d9ee85331a75f5423d7b65c8834b04df | refs/heads/master | 2020-12-11T21:13:00.838982 | 2019-02-16T09:06:14 | 2019-02-16T09:06:14 | 233,961,529 | 0 | 1 | null | 2020-01-15T00:12:48 | 2020-01-15T00:12:47 | null | UTF-8 | Python | false | false | 8,895 | py | from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
import torchvision.datasets as dst
import argparse
import os
import time
from util import AverageMeter, accuracy, transform_time
from util import load_pretrained_model, save_checkpoint
from network import define_tsnet
parser = argparse.ArgumentParser(description='attention transfer (sum of absolute values raised to the power of p)')
# various path
parser.add_argument('--save_root', type=str, default='./results', help='models and logs are saved here')
parser.add_argument('--img_root', type=str, default='./datasets', help='path name of image dataset')
parser.add_argument('--s_init', type=str, required=True, help='initial parameters of student model')
parser.add_argument('--t_model', type=str, required=True, help='path name of teacher model')
# training hyper parameters
parser.add_argument('--print_freq', type=int, default=10, help='frequency of showing training results on console')
parser.add_argument('--epochs', type=int, default=200, help='number of total epochs to run')
parser.add_argument('--batch_size', type=int, default=128, help='The size of batch')
parser.add_argument('--lr', type=float, default=0.1, help='initial learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=1e-4, help='weight decay')
parser.add_argument('--num_class', type=int, default=10, help='number of classes')
parser.add_argument('--cuda', type=int, default=1)
# net and dataset choosen
parser.add_argument('--data_name', type=str, required=True, help='name of dataset')# cifar10/cifar100
parser.add_argument('--t_name', type=str, required=True, help='name of teacher')
parser.add_argument('--s_name', type=str, required=True, help='name of student')
# hyperparameter lambda
parser.add_argument('--lambda_at', type=float, default=100.0)
parser.add_argument('--p', type=float, default=2.0, help='power of absolute values')
def main():
global args
args = parser.parse_args()
print(args)
if not os.path.exists(os.path.join(args.save_root,'checkpoint')):
os.makedirs(os.path.join(args.save_root,'checkpoint'))
if args.cuda:
cudnn.benchmark = True
print('----------- Network Initialization --------------')
snet = define_tsnet(name=args.s_name, num_class=args.num_class, cuda=args.cuda)
checkpoint = torch.load(args.s_init)
load_pretrained_model(snet, checkpoint['net'])
tnet = define_tsnet(name=args.t_name, num_class=args.num_class, cuda=args.cuda)
checkpoint = torch.load(args.t_model)
load_pretrained_model(tnet, checkpoint['net'])
tnet.eval()
for param in tnet.parameters():
param.requires_grad = False
print('-----------------------------------------------')
# initialize optimizer
optimizer = torch.optim.SGD(snet.parameters(),
lr = args.lr,
momentum = args.momentum,
weight_decay = args.weight_decay,
nesterov = True)
# define loss functions
if args.cuda:
criterionCls = torch.nn.CrossEntropyLoss().cuda()
criterionAT = torch.nn.MSELoss().cuda()
else:
criterionCls = torch.nn.CrossEntropyLoss()
criterionAT = torch.nn.MSELoss()
# define transforms
if args.data_name == 'cifar10':
dataset = dst.CIFAR10
mean = (0.4914, 0.4822, 0.4465)
std = (0.2470, 0.2435, 0.2616)
elif args.data_name == 'cifar100':
dataset = dst.CIFAR100
mean = (0.5071, 0.4865, 0.4409)
std = (0.2673, 0.2564, 0.2762)
else:
raise Exception('invalid dataset name...')
train_transform = transforms.Compose([
transforms.Pad(4, padding_mode='reflect'),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=mean,std=std)
])
test_transform = transforms.Compose([
transforms.CenterCrop(32),
transforms.ToTensor(),
transforms.Normalize(mean=mean,std=std)
])
# define data loader
train_loader = torch.utils.data.DataLoader(
dataset(root = args.img_root,
transform = train_transform,
train = True,
download = True),
batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True)
test_loader = torch.utils.data.DataLoader(
dataset(root = args.img_root,
transform = test_transform,
train = False,
download = True),
batch_size=args.batch_size, shuffle=False, num_workers=4, pin_memory=True)
for epoch in range(1, args.epochs+1):
epoch_start_time = time.time()
adjust_lr(optimizer, epoch)
# train one epoch
nets = {'snet':snet, 'tnet':tnet}
criterions = {'criterionCls':criterionCls, 'criterionAT':criterionAT}
train(train_loader, nets, optimizer, criterions, epoch)
epoch_time = time.time() - epoch_start_time
print('one epoch time is {:02}h{:02}m{:02}s'.format(*transform_time(epoch_time)))
# evaluate on testing set
print('testing the models......')
test_start_time = time.time()
test(test_loader, nets, criterions)
test_time = time.time() - test_start_time
print('testing time is {:02}h{:02}m{:02}s'.format(*transform_time(test_time)))
# save model
print('saving models......')
save_name = 'at_r{}_r{}_{:>03}.ckp'.format(args.t_name[6:], args.s_name[6:], epoch)
save_name = os.path.join(args.save_root, 'checkpoint', save_name)
if epoch == 1:
save_checkpoint({
'epoch': epoch,
'snet': snet.state_dict(),
'tnet': tnet.state_dict(),
}, save_name)
else:
save_checkpoint({
'epoch': epoch,
'snet': snet.state_dict(),
}, save_name)
def train(train_loader, nets, optimizer, criterions, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
cls_losses = AverageMeter()
at_losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
snet = nets['snet']
tnet = nets['tnet']
criterionCls = criterions['criterionCls']
criterionAT = criterions['criterionAT']
snet.train()
end = time.time()
for idx, (img, target) in enumerate(train_loader, start=1):
data_time.update(time.time() - end)
if args.cuda:
img = img.cuda()
target = target.cuda()
_, rb1_s, rb2_s, rb3_s, output_s = snet(img)
_, rb1_t, rb2_t, rb3_t, output_t = tnet(img)
cls_loss = criterionCls(output_s, target)
at_loss = criterionAT(attention_map(rb3_s), attention_map(rb3_t).detach()) * args.lambda_at
loss = cls_loss + at_loss
prec1, prec5 = accuracy(output_s, target, topk=(1,5))
cls_losses.update(cls_loss.item(), img.size(0))
at_losses.update(at_loss.item(), img.size(0))
top1.update(prec1.item(), img.size(0))
top5.update(prec5.item(), img.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
if idx % args.print_freq == 0:
print('Epoch[{0}]:[{1:03}/{2:03}] '
'Time:{batch_time.val:.4f} '
'Data:{data_time.val:.4f} '
'Cls:{cls_losses.val:.4f}({cls_losses.avg:.4f}) '
'AT:{at_losses.val:.4f}({at_losses.avg:.4f}) '
'prec@1:{top1.val:.2f}({top1.avg:.2f}) '
'prec@5:{top5.val:.2f}({top5.avg:.2f})'.format(
epoch, idx, len(train_loader), batch_time=batch_time, data_time=data_time,
cls_losses=cls_losses, at_losses=at_losses, top1=top1, top5=top5))
def test(test_loader, nets, criterions):
cls_losses = AverageMeter()
at_losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
snet = nets['snet']
tnet = nets['tnet']
criterionCls = criterions['criterionCls']
criterionAT = criterions['criterionAT']
snet.eval()
end = time.time()
for idx, (img, target) in enumerate(test_loader, start=1):
if args.cuda:
img = img.cuda()
target = target.cuda()
with torch.no_grad():
_, _, _, rb3_s, output_s = snet(img)
_, _, _, rb3_t, output_t = tnet(img)
cls_loss = criterionCls(output_s, target)
at_loss = criterionAT(attention_map(rb3_s), attention_map(rb3_t).detach()) * args.lambda_at
prec1, prec5 = accuracy(output_s, target, topk=(1,5))
cls_losses.update(cls_loss.item(), img.size(0))
at_losses.update(at_loss.item(), img.size(0))
top1.update(prec1.item(), img.size(0))
top5.update(prec5.item(), img.size(0))
f_l = [cls_losses.avg, at_losses.avg, top1.avg, top5.avg]
print('Cls: {:.4f}, AT: {:.4f}, Prec@1: {:.2f}, Prec@5: {:.2f}'.format(*f_l))
def adjust_lr(optimizer, epoch):
scale = 0.1
lr_list = [args.lr] * 100
lr_list += [args.lr*scale] * 50
lr_list += [args.lr*scale*scale] * 50
lr = lr_list[epoch-1]
print('epoch: {} lr: {}'.format(epoch, lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def attention_map(fm, eps=1e-5):
am = torch.pow(torch.abs(fm), args.p)
am = torch.sum(am, dim=1, keepdim=True)
norm = torch.norm(am, dim=(2,3), keepdim=True)
am = torch.div(am, norm+eps)
return am
if __name__ == '__main__':
main() | [
"huyibo871079699@163.com"
] | huyibo871079699@163.com |
ac9c78bb4e9952ff2ffcdc6feeb5a0e862a80e4b | 43a506c479f6a63c2bca2065db9f1febf890764c | /code/3/nums.py | d664655c0d7a984dc0ede630eb26025d4999b778 | [] | no_license | gui-rangel/fss16gui | de49d99a97b94519813d8f2e801e19ed80235023 | 44485a2e3c616304f9456dbba021d4a111be802a | refs/heads/master | 2020-11-28T16:38:11.381105 | 2016-12-08T17:27:43 | 2016-12-08T17:27:43 | 65,944,194 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 947 | py | def max(x,y) : return x if x>y else y
def min(x,y) : return x if x<y else y
class Num:
def __init__(i,name):
i.mu,i.n,i.m2,i.up,i.lo,i.name,i.pos = 0,0,0,-10e32,10e32,name,0
def add(i,x,count):
i.n += 1
x = float(x)
if x > i.up: i.up=x
if x < i.lo: i.lo=x
delta = x - i.mu
i.mu += delta/i.n
i.m2 += delta*(x - i.mu)
i.pos = count
return x
def sub(i,x):
i.n = max(0,i.n - 1)
delta = x - i.mu
i.mu = max(0,i.mu - delta/i.n)
i.m2 = max(0,i.m2 - delta*(x - i.mu))
def sd(i):
return 0 if i.n <= 2 else (i.m2/(i.n - 1))**0.5
def norm(i,x):
tmp= (x - i.lo) / (i.up - i.lo + 10**-32)
if tmp > 1: return 1
elif tmp < 0: return 0
else: return tmp
def dist(i,x,y):
return i.norm(x) - i.norm(y)
def furthest(i,x) :
return i.up if x <(i.up-i.lo)/2 else i.lo
def show(i):
print "{} - Mean: {}; Standard Deviation: {}".format(i.name,i.mu,i.sd())
| [
"guilherangel@hotmail.com"
] | guilherangel@hotmail.com |
d5415f607dd31adae279661a33d4bee445418136 | 76084379c92ba50a7dd273072c828e1fb886ac66 | /s3iotools/io/dataframe.py | 23a9451c51516f2e9c1369c1b027ddbd284b631c | [
"MIT"
] | permissive | MacHu-GWU/s3iotools-project | 19a08698b3f41fdb165a5df266860afdfe82d10e | 6e8a12d30792464c6ffa13cfb105578aed9f67da | refs/heads/master | 2020-04-25T18:05:13.116604 | 2019-05-20T13:00:02 | 2019-05-20T13:00:02 | 172,972,132 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,796 | py | # -*- coding: utf-8 -*-
"""
s3 IO tools.
"""
import attr
import pandas as pd
from six import string_types, StringIO, BytesIO, PY3
from ..compat import gzip_compress, gzip_decompress
@attr.s
class S3Dataframe(object):
"""
S3 object backed pandas DataFrame.
"""
s3_resource = attr.ib(default=None)
bucket_name = attr.ib(
validator=attr.validators.optional(
attr.validators.instance_of(string_types)
),
default=None,
)
_bucket = attr.ib(default=None)
key = attr.ib(
validator=attr.validators.optional(
attr.validators.instance_of(string_types)
),
default=None,
)
_object = attr.ib(default=None)
df = attr.ib(
validator=attr.validators.optional(
attr.validators.instance_of(pd.DataFrame)
),
default=None,
)
@property
def bucket(self):
"""
access the ``s3.Bucket`` instance.
Ref: https://boto3.readthedocs.io/en/latest/reference/services/s3.html#bucket
"""
if self._bucket is None:
self._bucket = self.s3_resource.Bucket(self.bucket_name)
return self._bucket
@property
def object(self):
"""
access the ``s3.Object`` instance.
Ref: https://boto3.readthedocs.io/en/latest/reference/services/s3.html#object
"""
if self._object is None:
self._object = self.bucket.Object(self.key)
return self._object
def prepare_args(self, bucket, key, kwargs, default_kwargs):
if bucket is None:
bucket = self.bucket
if key is None:
key = self.key
extra_kwargs = default_kwargs.copy()
extra_kwargs.update(kwargs)
return bucket, key, extra_kwargs
to_csv_kwargs_default = {
"encoding": "utf-8",
"index": False,
}
def to_csv(self, bucket=None, key=None, gzip_compressed=False, **to_csv_kwargs):
"""
Save a dataframe to a s3 object in csv format.
It will overwrite existing one.
:param bucket: :class:`s3.Bucket`, optional if self.bucket_name is defined
:param key: str, optional if self.key is defined
:param gzip_compressed: bool
:param to_csv_kwargs: key word arguments for :meth:`pandas.DataFrame.to_csv`
:return: s3.Bucket.put_object() response
"""
bucket, key, kwargs = self.prepare_args(
bucket, key, to_csv_kwargs, self.to_csv_kwargs_default)
body = self.df.to_csv(**kwargs)
if PY3:
body = body.encode("utf-8")
if gzip_compressed is True:
body = gzip_compress(body)
response = bucket.put_object(Body=body, Key=key)
return response
read_csv_kwargs_default = {
"encoding": "utf-8"
}
def read_csv(self, bucket=None, key=None, gzip_compressed=False, **read_csv_kwargs):
"""
Read dataframe data from a s3 object in csv format.
:param bucket: :class:`s3.Bucket`, optional if self.bucket_name is defined
:param key: str, optional if self.key is defined
:param gzip_compressed: bool
:param read_csv_kwargs: key word arguments for :meth:`pandas.read_csv`
:return: s3.Object.get() response
"""
bucket, key, kwargs = self.prepare_args(
bucket, key, read_csv_kwargs, self.read_csv_kwargs_default)
obj = bucket.Object(key)
response = obj.get()
body = response["Body"].read()
if gzip_compressed:
body = gzip_decompress(body)
self.df = pd.read_csv(StringIO(body.decode("utf-8")), **kwargs)
return response
to_json_kwargs_default = {
"force_ascii": False,
}
def to_json(self, bucket=None, key=None, gzip_compressed=False, **to_json_kwargs):
"""
Save a dataframe to a s3 object in csv format.
It will overwrite existing one.
:param bucket: :class:`s3.Bucket`, optional if self.bucket_name is defined
:param key: str, optional if self.key is defined
:param gzip_compressed: bool
:param to_json_kwargs: key word arguments for :meth:`pandas.DataFrame.to_json`
:return: s3.Bucket.put_object() response
"""
bucket, key, kwargs = self.prepare_args(
bucket, key, to_json_kwargs, self.to_json_kwargs_default)
body = self.df.to_json(**kwargs)
if PY3:
body = body.encode("utf-8")
if gzip_compressed is True:
body = gzip_compress(body)
response = bucket.put_object(Body=body, Key=key)
return response
read_json_kwargs_default = {
"encoding": "utf-8"
}
def read_json(self, bucket=None, key=None, gzip_compressed=False, **read_json_kwargs):
"""
Read dataframe data from a s3 object in json format.
:param bucket: :class:`s3.Bucket`, optional if self.bucket_name is defined
:param key: str, optional if self.key is defined
:param gzip_compressed: bool
:param read_json_kwargs: key word arguments for :meth:`pandas.read_json`
:return: s3.Object.get() response
"""
bucket, key, kwargs = self.prepare_args(
bucket, key, read_json_kwargs, self.read_json_kwargs_default)
obj = bucket.Object(key)
response = obj.get()
body = response["Body"].read()
if gzip_compressed:
body = gzip_decompress(body)
self.df = pd.read_json(StringIO(body.decode("utf-8")), **kwargs)
return response
write_table_kwargs_default = {
}
class ParquestCompression:
gzip = "gzip"
snappy = "snappy"
brotli = "brotli"
lz4 = "lz4"
zstd = "zstd"
none = None
def to_parquet(self, bucket=None, key=None, compression=None, **write_table_kwargs):
"""
Save a dataframe to a s3 object in parquet format.
It will overwrite existing one.
:param bucket: :class:`s3.Bucket`, optional if self.bucket_name is defined
:param key: str, optional if self.key is defined
:param gzip_compressed: bool
:param to_json_kwargs: key word arguments for :meth:`pyarrow.parquet.write_table_kwargs`
:return: s3.Bucket.put_object() response
"""
import pyarrow
from pyarrow import parquet
bucket, key, kwargs = self.prepare_args(
bucket, key, write_table_kwargs, self.write_table_kwargs_default)
buffer = BytesIO()
parquet.write_table(
pyarrow.Table.from_pandas(self.df),
buffer,
compression=compression,
**write_table_kwargs
)
body = buffer.getvalue()
response = bucket.put_object(Body=body, Key=key)
return response
read_table_kwargs_default = {}
def read_parquet(self, bucket=None, key=None, **read_table_kwargs):
"""
Read dataframe data from a s3 object in parquet format.
:param bucket: :class:`s3.Bucket`, optional if self.bucket_name is defined
:param key: str, optional if self.key is defined
:param read_table_kwargs: key word arguments for :meth:`pyarrow.parquet.read_table`
:return: s3.Object.get() response
"""
from pyarrow import parquet
bucket, key, kwargs = self.prepare_args(
bucket, key, read_table_kwargs, self.read_table_kwargs_default)
obj = bucket.Object(key)
response = obj.get()
# boto3 StreamingBody has not implemented closed attribute
buffer = BytesIO()
buffer.write(response["Body"].read())
self.df = parquet.read_table(buffer, **read_table_kwargs).to_pandas()
return response
| [
"husanhe@gmail.com"
] | husanhe@gmail.com |
716e24f833117790ab5298a92011c308a6ea8355 | 56be7f6b6a1243c532af9ea98310ccea165a1e66 | /day18/课件/day18mysite/app01/migrations/0002_publisher.py | fe58096c46afa0d01cbc643d0689ddf1f3992ce0 | [] | no_license | 214031230/Python21 | 55b0405ec4ad186b052cde7ebfb3f4bb636a3f30 | d7fc68d3d23345df5bfb09d4a84686c8b49a5ad7 | refs/heads/master | 2021-05-26T06:00:53.393577 | 2019-01-09T02:29:04 | 2019-01-09T02:29:04 | 127,778,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 569 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-08-19 04:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app01', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Publisher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=24)),
],
),
]
| [
"21403123@qq.com"
] | 21403123@qq.com |
fd9ff5a0d7cc102326adcbb8f214243b7df59c30 | 6bb599997d835672dc50c06ce59a517076835e35 | /cvety/admin.py | aec3dfb8cdc0fa8a13b81037369cd73339302a91 | [] | no_license | lookonkz/svadba | 3f5e8cae894b5d0391d669e85058ee670ec3577e | ba7407d1a17bd92a6e3bf873f3223e47fe1d27aa | refs/heads/master | 2021-09-06T04:33:44.867624 | 2018-02-02T10:55:29 | 2018-02-02T10:55:29 | 119,970,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | from django.contrib import admin
from .models import Cvety
# Register your models here.
@admin.register(Cvety)
class AdminCvety(admin.ModelAdmin):
list_display = ["name", "id", "price"]
| [
"user1@iMac-user1.local"
] | user1@iMac-user1.local |
282fb2a3fc530178816655ec27adeb0f02038d26 | f81e953240152d3ee6f09b121d1afd3b2c6c21c5 | /setup.py | cae21648e4cfcfd5116a37e8246eafa0ada5d545 | [] | permissive | rasca/django-cuit | 0d9c54556d37f252fe5d899447803cbdcbf12926 | 94ccaad1ff2a1c17f82bbb734c48f94b3f022727 | refs/heads/master | 2021-05-16T03:00:36.884341 | 2019-01-03T00:43:13 | 2019-01-03T00:43:13 | 1,658,060 | 2 | 3 | BSD-3-Clause | 2019-01-03T00:43:14 | 2011-04-24T23:24:46 | Python | UTF-8 | Python | false | false | 458 | py | from distutils.core import setup
setup(
name='django-cuit',
version='0.1.dev',
author='Ivan Raskovsky (rasca)',
author_email='raskovsky@gmail.com',
packages=['cuit',],
license='BSD',
description='django app for verification of Argentinian AFIP CUITs',
long_description=open('README.rst').read(),
keywords = 'django ar cuit afip',
url = 'http://github.com/rasca/django-cuit',
install_requires = ['Django>=1.0'],
)
| [
"raskovsky+git@gmail.com"
] | raskovsky+git@gmail.com |
95ac8d6e1cd48ef413e647eba9db8996cf4f9756 | 204ec78fcebcea9e1e1da4905cf3fad0a514b01f | /pyocd/commands/commander.py | dda3def7d561c8cd2b153987fadbdc9bd23fe83c | [
"Apache-2.0"
] | permissive | ARMmbed/pyOCD | 659340bf8753aa8e15a72890b8bea64dff2c2f42 | d4cdcf7e532cae17caad866839287bbe1e0d952b | refs/heads/master | 2023-05-31T13:45:15.797588 | 2020-10-12T13:55:47 | 2020-10-12T13:55:47 | 190,203,829 | 3 | 1 | Apache-2.0 | 2019-07-05T11:05:40 | 2019-06-04T13:09:56 | Python | UTF-8 | Python | false | false | 8,726 | py | # pyOCD debugger
# Copyright (c) 2015-2020 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import logging
import os
import traceback
from ..core.helpers import ConnectHelper
from ..core import (exceptions, session)
from ..utility.cmdline import convert_session_options
from ..commands.repl import (PyocdRepl, ToolExitException)
from ..commands.execution_context import CommandExecutionContext
LOG = logging.getLogger(__name__)
## Default SWD clock in Hz.
DEFAULT_CLOCK_FREQ_HZ = 1000000
class PyOCDCommander(object):
"""! @brief Manages the commander interface.
Responsible for connecting the execution context, REPL, and commands, and handles connection.
@todo Replace use of args from argparse with something cleaner.
"""
def __init__(self, args, cmds=None):
"""! @brief Constructor."""
# Read command-line arguments.
self.args = args
self.cmds = cmds
self.context = CommandExecutionContext(no_init=self.args.no_init)
self.context.command_set.add_command_group('commander')
self.session = None
self.exit_code = 0
def run(self):
"""! @brief Main entry point."""
try:
# If no commands, enter interactive mode.
if self.cmds is None:
if not self.connect():
return self.exit_code
# Print connected message, unless not initing.
if not self.args.no_init:
try:
# If the target is locked, we can't read the CPU state.
if self.session.target.is_locked():
status = "locked"
else:
try:
status = self.session.target.get_state().name.capitalize()
except (AttributeError, KeyError):
status = "<no core>"
# Say what we're connected to.
print("Connected to %s [%s]: %s" % (self.context.target.part_number,
status, self.session.board.unique_id))
except exceptions.TransferFaultError:
pass
# Run the REPL interface.
console = PyocdRepl(self.context)
console.run()
# Otherwise, run the list of commands we were given and exit. We only connect when
# there is a command that requires a connection (most do).
else:
self.run_commands()
except ToolExitException:
self.exit_code = 0
except exceptions.TransferError:
print("Error: memory transfer failed")
# Use get_current() in case our session hasn't been created yet.
if session.Session.get_current().log_tracebacks:
traceback.print_exc()
self.exit_code = 2
except exceptions.CommandError as e:
print("Error:", e)
self.exit_code = 1
finally:
# Ensure the session is closed.
if self.session is not None:
self.session.close()
return self.exit_code
def run_commands(self):
"""! @brief Run commands specified on the command line."""
did_connect = False
for args in self.cmds:
# Extract the command name.
cmd = args[0].lower()
# Handle certain commands without connecting.
needs_connect = (cmd not in ('list', 'help', 'exit'))
# For others, connect first.
if needs_connect and not did_connect:
if not self.connect():
return self.exit_code
did_connect = True
# Merge commands args back to one string.
# FIXME this is overly complicated
cmdline = " ".join('"{}"'.format(a) for a in args)
# Invoke action handler.
result = self.context.process_command_line(cmdline)
if result is not None:
self.exit_code = result
break
def connect(self):
"""! @brief Connect to the probe."""
if (self.args.frequency is not None) and (self.args.frequency != DEFAULT_CLOCK_FREQ_HZ):
self.context.writei("Setting SWD clock to %d kHz", self.args.frequency // 1000)
options = convert_session_options(self.args.options)
# Set connect mode. The --connect option takes precedence when set. Then, if --halt is set
# then the connect mode is halt. If connect_mode is set through -O then use that.
# Otherwise default to attach.
if hasattr(self.args, 'connect_mode') and self.args.connect_mode is not None:
connect_mode = self.args.connect_mode
elif self.args.halt:
connect_mode = 'halt'
elif 'connect_mode' in options:
connect_mode = None
else:
connect_mode = 'attach'
# Connect to board.
self.session = ConnectHelper.session_with_chosen_probe(
blocking=(not self.args.no_wait),
project_dir=self.args.project_dir,
config_file=self.args.config,
user_script=self.args.script,
no_config=self.args.no_config,
pack=self.args.pack,
unique_id=self.args.unique_id,
target_override=self.args.target_override,
connect_mode=connect_mode,
frequency=self.args.frequency,
options=options,
option_defaults=dict(
auto_unlock=False,
resume_on_disconnect=False,
))
if self.session is None:
self.exit_code = 3
return False
self._post_connect()
result = self.context.attach_session(self.session)
if not result:
self.exit_code = 1
return result
def _post_connect(self):
"""! @brief Finish the connect process.
The session is opened. The `no_init` parameter passed to the constructor determines whether the
board and target are initialized.
If an ELF file was provided on the command line, it is set on the target.
@param self This object.
@param session A @ref pyocd.core.session.Session "Session" instance.
@retval True Session attached and context state inited successfully.
@retval False An error occurred when opening the session or initing the context state.
"""
assert self.session is not None
assert not self.session.is_open
# Open the session.
try:
self.session.open(init_board=not self.args.no_init)
except exceptions.TransferFaultError as e:
if not self.session.target.is_locked():
self.context.writei("Transfer fault while initing board: %s", e)
if self.session.log_tracebacks:
self.context.write(traceback.format_exc())
return False
except exceptions.Error as e:
self.context.writei("Exception while initing board: %s", e)
if self.session.log_tracebacks:
self.context.write(traceback.format_exc())
return False
# Set elf file if provided.
if self.args.elf:
self.session.target.elf = os.path.expanduser(self.args.elf)
# Handle a device with flash security enabled.
if not self.args.no_init and self.session.target.is_locked():
self.context.write("Warning: Target is locked, limited operations available. Use 'unlock' "
"command to mass erase and unlock, then execute 'reinit'.")
return True
| [
"flit@me.com"
] | flit@me.com |
ffc316b832df3622880682ce1b1a0180196bb06f | 07006aa772d01e9099c0be3c0932e282ee209716 | /env/bin/django-admin | 53e0b21381ede57ca3cb9d67e77578718cd3533f | [] | no_license | trankmichael/merv | a1c80b26210ca653a70db3800c86e4be6ceb96d6 | a5bfdb83e5c0115f9dde4407b96a7a609873fd17 | refs/heads/master | 2021-01-16T02:27:22.218861 | 2016-05-13T23:58:46 | 2016-05-13T23:58:46 | 56,453,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | #!/home/mumbosauce/merv/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"trankmichael@gmail.com"
] | trankmichael@gmail.com | |
fde1300aaf4414e664e6205cc6827f757613ff87 | 60f446736d8866350a52e0921c4bcbb265797780 | /util/mq.py | 768656c3ef5388569dc4af8c7a779fe5408f8179 | [] | no_license | yaeSakura/guest_sign | 9932dc6d1ea365ac273c4117c0a62e1d6d520723 | 5aaea0213f1b7951f71d3dfb3bd523b8447ee344 | refs/heads/master | 2022-12-15T14:13:38.245792 | 2018-02-28T03:26:07 | 2018-02-28T03:26:07 | 123,223,311 | 0 | 0 | null | 2022-12-07T23:46:01 | 2018-02-28T03:23:29 | Python | UTF-8 | Python | false | false | 4,859 | py | #! /usr/bin/env python
# -*- coding:utf-8 -*-
import pika
import logging
import threading
logger = logging.getLogger(__name__)
class Consumer():
EXCHANGE = 'location'
EXCHANGE_TYPE = 'direct'
# PUBLISH_INTERVAL = 1
QUEUE = 'text'
ROUTING_KEY = 'hola'
def __init__(self, event=None, process_func=None):
credentials = pika.PlainCredentials("heziqi", "heziqi")
conn_params = pika.ConnectionParameters("47.95.208.252",
virtual_host="test",
credentials=credentials,
connection_attempts=3,
heartbeat=3600)
self._process_func = process_func
self._connection = None
self._channel = None
self._closing = False
self._consumer_tag = None
self._conn_params = conn_params
def connect(self):
print('Connecting to ', self._conn_params.host)
if self._connection:
self._connection.connect()
else:
self._connection = pika.SelectConnection(parameters=self._conn_params,
on_open_callback=self.on_connection_open,
on_close_callback=self.on_connection_closed,
stop_ioloop_on_close=False)
return self._connection
def on_connection_open(self, unused_connection):
print('Connection opend')
self.open_channel()
def on_connection_closed(self,connection, reply_code, reply_text):
print('Connection closed, reopening in 1 seconds: (%s) %s' % (reply_code, reply_text))
self._connection.add_timeout(1, self.reconnect)
def open_channel(self):
print('Create a new channel')
self._connection.channel(on_open_callback=self.on_channel_open)
def on_channel_open(self, channel):
print('Channel opened')
self._channel = channel
self.setup_exchange(self.EXCHANGE)
def setup_exchange(self, exchange_name):
print('Declaring exchange ', exchange_name)
self._channel.exchange_declare(callback=self.on_exchange_declareok,
exchange=exchange_name,
exchange_type=self.EXCHANGE_TYPE,
passive=False,
durable=True,
auto_delete=False)
def on_exchange_declareok(self, unused_frame):
print('Exchange declared')
self.setup_queue(self.QUEUE)
def setup_queue(self, queue_name):
print('Declarig queue ', queue_name)
self._channel.queue_declare(callback=self.on_queue_declareok,
queue=queue_name)
def on_queue_declareok(self, method_frame):
print('Binding {} to {} with {}'.format(self.EXCHANGE,
self.QUEUE,
self.ROUTING_KEY))
self._channel.queue_bind(callback=self.on_bindok,
queue=self.QUEUE,
exchange=self.EXCHANGE,
routing_key=self.ROUTING_KEY)
def on_bindok(self, unused_frame):
print('Queue bound')
self.start_consuming()
def start_consuming(self):
print('Issuing consumer related RPC commands')
self._consumer_tag = self._channel.basic_consume(consumer_callback=self.on_message,
queue=self.QUEUE)
def on_message(self, unused_channel, basic_deliver, properties, body):
print('Recevied message # {} from {}:{}'.format(
basic_deliver.delivery_tag, properties.app_id, body
))
if self._process_func:
self._process_func(body)
self.acknowledge_message(basic_deliver.delivery_tag)
def acknowledge_message(self, delivery_tag):
print('Acknowlegding message {}'.format(delivery_tag))
self._channel.basic_ack(delivery_tag)
def run(self):
self._connection = self.connect()
self._connection.ioloop.start()
def async_start_consume(f=None):
t = threading.Thread(target=start_consume, args=(f,))
t.daemon = True
t.start()
def start_consume(f=None):
consumer = Consumer(process_func=f)
consumer.run()
def main():
async_start_consume()
if __name__ == "__main__":
LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '
'-35s %(lineno) -5d: %(message)s')
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
main() | [
"1668488211@qq.com"
] | 1668488211@qq.com |
ff3f382ed538349416ae4f342bfd129c0b9ba0e3 | 5277e5dffd03bc73e2de330072015c122c8b09c6 | /Method 8/ganuggets2.py | b941866c13789b30737ac3f2a0210df6c3dcec81 | [] | no_license | notesby/CARDALG | 5d6d67cea6fc91a56b0653414bd24014d1d949b1 | b28baa65fea107b947d51ab1956861e1a1852f7b | refs/heads/master | 2022-09-19T12:44:26.369511 | 2020-03-02T18:29:26 | 2020-03-02T18:29:26 | 176,398,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,337 | py | import numpy as np
import math
import random
from copy import copy
def encodeKey(index,value):
return "{},{}".format(index,value)
def decodeKey(key):
return list(map(lambda x: int(x),key.split(",")))
#returns an expression to get the transformed coordinates
# from the original dimensions to the 1 dimension flattened data
def getExpr(size):
val = ""
lst = []
if len(size) > 1:
for i in range(1,len(size)):
temp = "xi[{}]".format(i-1)
for j in range(i,len(size)):
temp += "*{}".format(size[j])
lst.append(temp)
else:
i = 0
val += "+".join(lst)
val += "+xi[{}]".format(i)
return val
#returns an array with the position in the flattened data
#coords is an array with coordinate relative to the cell in the original dimensions
# size = np.shape(data)
def getNeighbors(cell,coords,size):
newCoords = []
expr = getExpr(size)
for coord in coords:
xi = []
outOfBounds = False
for i,c in enumerate(cell):
if type(coord) != int:
v = c+coord[i]
if v >= size[i] or v < 0:
outOfBounds = True
else:
xi.append(v)
else:
v = c+coord
if v >= size[0] or v < 0:
outOfBounds = True
else:
xi.append(c+coord)
if outOfBounds:
newCoords.append(-1)
else:
newCoord = eval(expr)
newCoords.append(newCoord)
return newCoords
#returns the values of the neighbors of a certain cell
#data = flattened array of the data
#neighbors = the positions of neighbors of a certain cell
#paddingtype = 0 => don't get values,1=> fill with padding value, 2 => don't fill and return empty dict
#paddingvalue = the values to fill when the padding type equals 1
def getNeighborsValue(data,neighbors,paddingType = 0,paddingValue=0):
values = {}
for i,n in enumerate(neighbors):
val = None
if n >= 0 and n < len(data):
val = data[n]
else:
if paddingType == 0: continue
elif paddingType == 1:
val = paddingValue
elif paddingType == 2:
values = None
break
if val != None:
values[i] = val
return values
#returns in each iteration an array with the indexes of each dimension
def multiDimensionalGenerator(size):
counters = np.array([size[i]-1 for i in range(len(size)-1)])
counters = np.append(counters,size[-1])
count = len(size)-1
while (counters[0] >= 0):
counters[count] -= 1
yield [int(i) for i in counters]
if counters[count] <= 0:
while(counters[count] <= 0 and count > 0):
count -= 1
counters[count] -= 1
while(count+1 < len(size)):
if count+1 == len(size)-1:
counters[count+1] = size[count+1]
else:
counters[count+1] = size[count+1]-1
count += 1
def manhattanDistance(arr):
res = 0
for i in arr:
res += abs(i)
return res
def vonNeumann(radious,distance):
expr = lambda x: manhattanDistance(x) <= distance
return getNeighborhood(radious,expr)
def moore(radious):
expr = lambda x: True
neighborhood = getNeighborhood(radious,expr)
return neighborhood
#returns an array with the neighborhood
#expression = function to filter the neighborhood, receives a list of the indexes according to the dimension
#radious = array with the distance from each dimension
def getNeighborhood(radious,expression):
neighborhood = []
spaces = []
dimensions = len(radious)
for i in range(dimensions):
size = radious[i]
spaces.append(np.arange(-size, size+1, 1))
mesh = np.meshgrid(*spaces)
stack = np.stack(mesh,axis=dimensions)
stackShape = np.shape(stack)[:-1]
for index in multiDimensionalGenerator(stackShape):
tIndex = tuple(index)
if expression(stack[tIndex]):
neighborhood.append(stack[tIndex])
for i in range(dimensions-1,-1,-1):
neighborhood.sort(key = lambda x: x[i])
return neighborhood
def dictToMat(dic):
matrix = []
for key in dic:
for key2 in dic[key]:
temp = copy(dic[key][key2])
if type(temp) == np.ndarray:
temp = np.append(temp,key)
else:
temp.append(key)
matrix.append(temp)
return matrix
def getDomain(data):
#encontramos los valores observados de cada atributo en los subproblemas de aprendizaje
domain = {}
for row in data:
for j,col in enumerate(row):
if j not in domain:
domain[j] = []
if col not in domain[j]:
domain[j].append(col)
for key in domain:
domain[key].sort()
return domain
def binarize(data,domain):
#binarizamos los subproblemas de aprendizaje
binarizedMatrix = []
for row in data:
rtemp = []
for j,col in enumerate(row):
if j < len(row)-1:
for val in domain[j]:
if col >= val:
rtemp.append(1)
else:
rtemp.append(0)
else:
rtemp.append(col)
binarizedMatrix.append(rtemp)
return binarizedMatrix
def getLearningProblem(data,neighborhood,paddingType,paddingValue):
problem = {}
dataSize = np.shape(data)
stateSize = dataSize[1:]
noStates = dataSize[0]
for iState in range(1,noStates):
currentState = data[iState-1].reshape(-1)
for cell in multiDimensionalGenerator(stateSize):
index = tuple([iState]+cell)
cls = data[index]
if cls not in problem:
problem[cls] = {}
neighbors = getNeighbors(cell,neighborhood,stateSize)
values = getNeighborsValue(currentState,neighbors,paddingType,paddingValue)
if values != None:
values = [values[key] for key in values]
problem[cls][str(values)] = values
return problem
#individual format (ant,cons) where:
# ant = [[attrInd,val],...,[attrInd,val]]
# cons = [attrInd,val]
#Returns the number of active attributes in the antecedent
def getNumberOfAttributes(ant):
count = 0;
for attr in ant:
if attr[1] != -1:
count += 1
return count
#Returns the interestingess degree
#totalInfoGain = the summatory of the infoGain for each attribute in the antecedent
#noOfAttr = the number of attributes in the antecedent
#domainCardinality = the Cardinality of the goal attribute
def antInterestignessDegree(totalInfoGain,noOfAttr,domainCardinality):
if (noOfAttr == 0 ):
return 1
return 1 - ((totalInfoGain/noOfAttr)/math.log2(domainCardinality))
def consInterestignessDegree(consAttr,noEvents,beta):
noFavEvents = noEvents[consAttr[0]][consAttr[0]][consAttr[1]][consAttr[1]]
totalNoEvents = noEvents["totalNoEvents"]
return math.pow( 1 - probability(noFavEvents,totalNoEvents),(1/beta) )
#returns the infoGain of an antecedent attribute with a given goal attribute
#attAnt = the antecedent attribute (pair of attr index and value)
#attCons = the consequent attribute (pair of attr index and value)
#domain = the domain of the attributes
#noEvents = tha total number of events for the probability calculation
def infoGain(attAnt,attCons,domain,noEvents):
#print("---",info(domain,noEvents,attCons),info(domain,noEvents,attCons,attAnt),"----")
return info(domain,noEvents,attCons) - info(domain,noEvents,attCons,attAnt)
#returns the entropy of the goal attribute or the entropy ot he goal attribute given antecedent attribute
#domain = the domain of the attributes
#noEvents = tha total number of events for the probability calculation
#attCons = the consequent attribute (pair of attr index and value)
#attAnt = the antecedent attribute (pair of attr index and value)
def info(domain,noEvents,attCons,attAnt = None):
res = 0
if attAnt == None:
for val in domain[attCons[0]]:
noFavEvents = noEvents[attCons[0]][attCons[0]][val][val]
totalNoEvents = noEvents["totalNoEvents"]
pr = probability(noFavEvents,totalNoEvents)
res += (pr*math.log2(pr))
res = res * -1
else:
for val in domain[attAnt[0]]:
totalNoEvents = noEvents["totalNoEvents"]
noFavEvents = 0
for gAttr in noEvents[attAnt[0]]:
for gVal in noEvents[attAnt[0]][gAttr]:
noFavEvents += noEvents[attAnt[0]][gAttr][gVal][val]
prAntAtt = probability(noFavEvents,totalNoEvents)
sumCondInfo = 0
for cVal in domain[attCons[0]]:
probCA = probability(noEvents[attAnt[0]][attCons[0]][cVal][val],totalNoEvents)
probA = probability(noFavEvents,totalNoEvents)
condProb = probCA / probA
if (condProb>0):
sumCondInfo += (condProb*math.log2(condProb))
sumCondInfo *= -1
res += sumCondInfo * prAntAtt
return res
def probability(noFavEvents,noEvents):
return noFavEvents/noEvents
#Calculate the number of events given each possible value of the goal attributes indexes specified
#goalAttributes = an array with the goal attributes
#domain = the domain of the attributes
#dataset = the dataset where the data that will be processed
def calculateNoEvents(goalAttributes,domain,dataset):
noEventsC = {}
noEvents = 1
#for step in dataset:
# noEvents += len(step)
for val in np.shape(dataset)[:-1]:
noEvents = noEvents*val
for attr in domain:
noEventsC[attr] = {}
for g in goalAttributes:
noEventsC[attr][g] = {}
for gval in domain[g]:
noEventsC[attr][g][gval] = {}
for val in domain[attr]:
noEventsC[attr][g][gval][val] = 0
size = np.shape(dataset)
for index in multiDimensionalGenerator(size):
ind = tuple(index)
val = dataset[ind]
attr = index[-1]
for g in goalAttributes:
ind2 = tuple(index[:-1]+[g])
gval = dataset[ind2]
noEventsC[attr][g][gval][val] += 1
noEventsC["totalNoEvents"] = noEvents
return noEventsC
#Returns the accuracy of the antecedent with the consequent
#ant = the array of attributes
#cons = the attribute
#dataset = the data that will be processed
def predictionAccuracy(ant,cons,dataset):
acCount = {}
aCount = 0
size = np.shape(dataset)[:-1]
for index in multiDimensionalGenerator(size):
ind = tuple(index)
vAnt = True
row = dataset[ind]
for att in ant:
vAnt = vAnt and ((row[att[0]] == att[1]) if att[1] != -1 else True)
if row[cons[0]] not in acCount:
acCount[row[cons[0]]] = 0
if vAnt:
acCount[row[cons[0]]] += 1
aCount += 1
for key in acCount:
if aCount > 0:
acCount[key] = (acCount[key] - 1/2)/aCount
return acCount
def predictionAccuracy2(ant,cons,dataset):
acCount = {"accepted":{},"rejected":{}}
aCount = 0
size = np.shape(dataset)[:-1]
for index in multiDimensionalGenerator(size):
ind = tuple(index)
vAnt = True
row = dataset[ind]
for att in ant:
vAnt = vAnt and ((row[att[0]] == att[1]) if att[1] != -1 else True)
if row[cons[0]] not in acCount["accepted"]:
acCount["accepted"][row[cons[0]]] = 0
if row[cons[0]] not in acCount["rejected"]:
acCount["rejected"][row[cons[0]]] = 0
if vAnt:
acCount["accepted"][row[cons[0]]] += 1
aCount += 1
else:
acCount["rejected"][row[cons[0]]] += 1
return acCount
def f1score(acc):
recall = {}
precision = {}
f1 = {}
for key in acc["accepted"]:
recall[key] = acc["accepted"][key]
precision[key] = acc["accepted"][key]
f1[key] = 0
for key2 in acc["rejected"]:
if key == key2:
recall[key] += acc["rejected"][key]
for key2 in acc["accepted"]:
if key != key2:
precision[key] += acc["accepted"][key2]
recall[key] = acc["accepted"][key] / recall[key]
precision[key] = (acc["accepted"][key] / precision[key]) if precision[key] != 0 else 0
if (precision[key] + recall[key]) != 0:
f1[key] = recall[key] * precision[key] / (precision[key] + recall[key])
else:
f1[key] = 0
return f1
#Returns the fitnes of an individual
def gafitness(w1,w2,beta,ant,cons,domain,noEvents,dataset):
bestGoalValue = 0
noAttr = 0
noAttr = getNumberOfAttributes(ant)
consInt = {}
sumInfoGain= {}
antInt = {}
acc = predictionAccuracy(ant,cons,dataset)
for val in domain[cons[0]]:
consInt[val] = consInterestignessDegree([cons[0],val],noEvents,beta)
if val not in sumInfoGain:
sumInfoGain[val] = 0
for attr in ant:
if attr[1] != -1:
sumInfoGain[val] += infoGain(attr,[cons[0],val],domain,noEvents)
antInt[val] = antInterestignessDegree(sumInfoGain[val],noAttr,len(domain[cons[0]]))
fit = ((w1*(antInt[val] + consInt[val]) / 2) + (w2 * acc[val])) / (w1 + w2)
#print("fit {},antInt {},consInt {},acc {}".format(fit,antInt[val],consInt[val],acc[val]))
#print(fit)
if fit > bestGoalValue:
bestGoalValue = fit
cons[1] = val
return bestGoalValue
#Returns the fitnes of an individual
def gafitness2(w1,w2,beta,ant,cons,domain,noEvents,dataset):
bestGoalValue = 0
noAttr = 0
noAttr = getNumberOfAttributes(ant)
consInt = {}
sumInfoGain= {}
antInt = {}
acc = predictionAccuracy2(ant,cons,dataset)
acc = f1score(acc)
#print(acc)
for val in domain[cons[0]]:
consInt[val] = consInterestignessDegree([cons[0],val],noEvents,beta)
if val not in sumInfoGain:
sumInfoGain[val] = 0
for attr in ant:
if attr[1] != -1:
sumInfoGain[val] += infoGain(attr,[cons[0],val],domain,noEvents)
antInt[val] = antInterestignessDegree(sumInfoGain[val],noAttr,len(domain[cons[0]]))
fit = ((w1*(antInt[val] + consInt[val]) / 2) + (w2 * acc[val])) / (w1 + w2)
#print("fit {},antInt {},consInt {},acc {}".format(fit,antInt[val],consInt[val],acc[val]))
#print(fit)
if fit > bestGoalValue:
bestGoalValue = fit
cons[1] = val
return bestGoalValue
def initialize(populationSize,antMinSize,antMaxSize,objAttrInd,domain,seed=-1):
population = []
if seed != -1:
random.seed(seed)
for i in range(populationSize):
antSize = random.randint(antMinSize,antMaxSize)
ant = [[i,-1] for i in range(len(domain))]
for j in range(antSize):
attr = random.randint(0,len(domain)-1)
val = random.randint(-1,max(domain[attr]))
ant[attr][1]= val
valC = random.randint(min(domain[objAttrInd]),max(domain[objAttrInd]))
cons = [objAttrInd,valC]
population.append([ant,cons])
return population
def countActiveGenes(ant):
count = 0
for gen in ant:
if gen[1] != -1:
count += 1
return count
def insertCondition(ant,antMaxSize,domain):
active = countActiveGenes(ant)
prob = 1-(active/antMaxSize)
if random.random() < prob:
for gen in ant:
if random.random() < .2 and active < antMaxSize:
if gen[1] == -1:
ind = random.randint(0,len(domain[gen[0]])-1)
gen[1] = domain[gen[0]][ind]
active += 1
prob = 1-(active/antMaxSize)
def removeCondition(ant,antMaxSize,domain):
active = countActiveGenes(ant)
prob = (active/antMaxSize)
if random.random() < prob:
for gen in ant:
if active > 1:
if random.random() < .2:
if gen[1] != -1:
gen[1] = -1
active -= 1
prob = (active/antMaxSize)
def tournament(fitnessTbl,k):
best = None
for i in range(k+1):
ind = random.randint(0, len(fitnessTbl)-1)
if (best == None) or fitnessTbl[ind][1] > fitnessTbl[best][1]:
best = ind
return fitnessTbl[best]
def crossover(parents,population,crossprob):
offsprings = []
for i in range(1,len(parents),2):
p1 = population[parents[i-1][0]][0]
p2 = population[parents[i][0]][0]
child1 = [[],[population[parents[i-1][0]][1][0],population[parents[i-1][0]][1][1]]]
child2 = [[],[population[parents[i][0]][1][0],population[parents[i][0]][1][1]]]
for j in range(len(p1)):
if random.random() < crossprob:
child1[0].append([p2[j][0],p2[j][1]])
child2[0].append([p1[j][0],p1[j][1]])
else:
child1[0].append([p1[j][0],p1[j][1]])
child2[0].append([p2[j][0],p2[j][1]])
offsprings.append(child1)
offsprings.append(child2)
return offsprings
def mutate(ant,domain,mutationRate):
for gen in ant:
if random.random() <= mutationRate:
ind = random.randint(0,len(domain[gen[0]])-1)
gen[1] = domain[gen[0]][ind]
def removePopulation(population,fitnessTbl,populationSize):
newPopulation = []
newPopulation = [population[fitnessTbl[0][0]] ]#for x in fitnessTbl[:populationSize]]
for i in range(populationSize):
elite = tournament(fitnessTbl,2)
newPopulation.append(population[elite[0]])
fitnessTbl.remove(elite)
if len(fitnessTbl) <= 0: break
return newPopulation
def ganuggets(populationSize,noOffsprings,antMinSize,antMaxSize,beta,w1,w2,mutationRate,crossprob,dataset,domain,goalAttr,noEvents,seed,maxIter = 0):
population = initialize(populationSize,antMinSize,antMaxSize,goalAttr,domain,seed)
fitnessTbl = []
for i in range(len(population)):
fit = gafitness(w1,w2,beta,population[i][0],population[i][1],domain,noEvents,dataset)
fitnessTbl.append([i,fit,population[i][1][1]])
it = 0
fitGoalReached = False
fitnessHistory = {}
while it < maxIter and not fitGoalReached:
print(it)
it += 1
fitnessTbl = sorted(fitnessTbl,key = lambda x: x[1],reverse = True)
#select individuals based on fitness
groupedFitness = {}
for fit in fitnessTbl:
if fit[2] not in groupedFitness:
groupedFitness[fit[2]] = []
if fit[2] not in fitnessHistory:
fitnessHistory[fit[2]] = []
groupedFitness[fit[2]].append(fit)
parents = {}
offsprings = []
for key in groupedFitness:
if len(groupedFitness[key]) > 0:
#print("1.- ",groupedFitness[key][0])
fitnessHistory[key].append(groupedFitness[key][0][1])
if key not in parents:
parents[key] = []
for i in range(noOffsprings*2):
best = tournament(groupedFitness[key],2)
parents[key].append(best)
offsprings += crossover(parents[key],population,crossprob)
for child in offsprings:
mutate(child[0],domain,mutationRate)
insertCondition(child[0],antMaxSize,domain)
removeCondition(child[0],antMaxSize,domain)
population = population+offsprings
fitnessTbl = []
for i in range(len(population)):
fit = gafitness(w1,w2,beta,population[i][0],population[i][1],domain,noEvents,dataset)
fitnessTbl.append([i,fit,population[i][1][1]])
fitnessTbl = sorted(fitnessTbl,key = lambda x: x[1],reverse = True)
groupedFitness = {}
for fit in fitnessTbl:
if fit[2] not in groupedFitness:
groupedFitness[fit[2]] = []
groupedFitness[fit[2]].append(fit)
temPop = []
for key in groupedFitness:
#print("2.- ",groupedFitness[key][0])
if len(groupedFitness[key]) > 0:
temPop += removePopulation(population,groupedFitness[key],populationSize)
population = temPop
fitnessTbl = []
for i in range(len(population)):
fit = gafitness(w1,w2,beta,population[i][0],population[i][1],domain,noEvents,dataset)
fitnessTbl.append([i,fit,population[i][1][1]])
fitnessTbl = sorted(fitnessTbl,key = lambda x: x[1],reverse = True)
return fitnessTbl,population,fitnessHistory
def populationPostprocessing(population):
rules = {}
for ind in population:
if ind[1][1] not in rules:
rules[ind[1][1]] = []
for gen in ind[0]:
if gen[1] != -1:
rules[ind[1][1]].append(["{},{}".format(gen[0],gen[1])])
return rules
def binarizedToDomain(rules,domain):
keys = list(domain.keys())
oRules = {}
for cls in rules:
if cls not in oRules:
oRules[cls] = {}
prop = []
for clause in rules[cls]:
expr = []
for term in clause:
col = 0
ind,val = decodeKey(term)
bottom = 0
for key in keys:
if ind >= bottom and ind < (bottom + len(domain[key])):
col = key
ind -= bottom
break
bottom += len(domain[key])
relational = (">=" if val== 1 else "<")
t = "A[{}] {} {}".format(col,relational,domain[col][ind])
expr.append(t)
prop.append("({})".format(" or ".join(expr)))
oRules[cls] = " and ".join(prop)
return oRules | [
"hector2_10@hotmail.com"
] | hector2_10@hotmail.com |
2eb4ef768fc0f3d2c3c93937b8793072ef95e285 | 3dd9de9b7bd4c555a8426d59edf81ad60c6a6496 | /LuggageTrackerWebApp/migrations/0001_initial.py | 52589f5e690ed0fe4fe3d3a2a0ef11355f964c4b | [] | no_license | kpatel1293/LuggageTracker | 7eddeaf53f88b18d572a53ba23ccf1ef105ccfcd | f72292df41dc4d5b7bf2318a80df8cf0ba040a68 | refs/heads/master | 2023-05-21T12:59:21.503574 | 2021-06-09T00:30:50 | 2021-06-09T00:30:50 | 364,411,565 | 0 | 0 | null | 2021-06-09T00:30:50 | 2021-05-04T23:24:08 | JavaScript | UTF-8 | Python | false | false | 2,444 | py | # Generated by Django 3.2.3 on 2021-05-29 21:05
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Airport',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(blank=True)),
('municipality', models.CharField(max_length=150)),
('iatacode', models.CharField(max_length=10, verbose_name='IATA Code')),
],
),
migrations.CreateModel(
name='Luggage',
fields=[
('tag_id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True)),
('time_stamp', models.TimeField(default=django.utils.timezone.now, verbose_name='Time Last Scanned')),
('origin_airport', models.CharField(max_length=150)),
('destination_airport', models.CharField(max_length=150)),
('status', models.CharField(choices=[('Checked In', 'Checked In'), ('In Transit', 'In Transit'), ('Arrived At Destination', 'Arrived At Destination'), ('Retrived', 'Retrived')], default='Checked In', max_length=30)),
('flagged', models.CharField(choices=[('N', 'N'), ('Y', 'Y')], default='N', max_length=1)),
('digital_signature', models.CharField(choices=[('Awaiting Signture', 'Awaiting Signture'), ('Missing Luggage', 'Missing Luggage'), ('Delayed', 'Delayed'), ('Approved', 'Approved'), ('Disapproved', 'Disapproved')], default='Awaiting Signture', max_length=50)),
],
),
migrations.CreateModel(
name='Blocks',
fields=[
('index', models.IntegerField(primary_key=True, serialize=False)),
('timestamp', models.FloatField()),
('prevHash', models.CharField(max_length=150)),
('nonce', models.IntegerField(default=0)),
('hash_curr', models.CharField(max_length=150)),
('transactions', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='LuggageTrackerWebApp.luggage')),
],
),
]
| [
"kpatel1293@gmail.com"
] | kpatel1293@gmail.com |
29ad851548a733c2004a6f60535eca99397759b5 | 64943ee24949f6eb957515778f1b03f7bf17dabd | /rabbitmq/src/config.py | 757a2fb037c4459c92a880a487a92094c9287192 | [
"Apache-2.0"
] | permissive | upworkroozbeh/k8-test-data | d8a1c716fd6cf308805a42c686a205402d679d03 | c877639cee5c5b129a71cb7214564387d8d66406 | refs/heads/master | 2022-12-30T15:28:09.595343 | 2020-10-20T18:32:11 | 2020-10-20T18:32:11 | 305,796,737 | 0 | 0 | Apache-2.0 | 2020-10-20T18:19:19 | 2020-10-20T18:19:18 | null | UTF-8 | Python | false | false | 2,164 | py | import os
def get_envar(k, required=True):
val = os.environ.get(k, None)
if not val and required:
exit(f"{k} not supplied")
return val
class Config(object):
DEBUG = True
MINIO_ENDPOINT = get_envar("MINIO_ENDPOINT", required=True)
MINIO_ACCESS_KEY_ID = get_envar("MINIO_ACCESS_KEY_ID", required=True)
MINIO_SECRET_ACCESS_KEY = get_envar("MINIO_SECRET_ACCESS_KEY", required=True)
MINIO_SECURE = get_envar("MINIO_SECURE", required=True)
if MINIO_SECURE == "True":
MINIO_SECURE = True
else:
MINIO_SECURE = False
MINIO_BUCKET = get_envar("MINIO_BUCKET", required=True)
MQ_USERNAME = get_envar("MQ_USERNAME", required=True)
MQ_PASSWORD = get_envar("MQ_PASSWORD", required=True)
MQ_HOST = get_envar("MQ_HOST", required=True)
MQ_PORT = get_envar("MQ_PORT", required=True)
MQ_CONNECTION_ATTEMPTS = get_envar("MQ_CONNECTION_ATTEMPTS", required=False)
if MQ_CONNECTION_ATTEMPTS is None:
MQ_CONNECTION_ATTEMPTS = str(3)
MQ_HEART_BEAT = get_envar("MQ_HEART_BEAT", required=False)
if MQ_HEART_BEAT is None:
MQ_HEART_BEAT = str(600)
MQ_EXCHANGE = get_envar("MQ_EXCHANGE", required=False)
if MQ_EXCHANGE is None:
MQ_EXCHANGE = ''
MQ_EXCHANGE_TYPE = get_envar("MQ_EXCHANGE_TYPE", required=False)
MQ_QUEUE = get_envar("MQ_QUEUE", required=True)
MQ_ROUTING_KEY = get_envar("MQ_ROUTING_KEY", required=True)
MQ_PROTO = get_envar("MQ_PROTO", required=False)
if MQ_PROTO is None:
MQ_PROTO = 'amqp://'
MQ_VHOST = get_envar("MQ_VHOST", required=False)
if MQ_VHOST is None:
MQ_VHOST = '%2F'
MQ_PUBLISH_INTERVAL = get_envar("MQ_PUBLISH_INTERVAL", required=False)
if MQ_PUBLISH_INTERVAL is None:
MQ_PUBLISH_INTERVAL = 0.1
MQ_URL = (
MQ_PROTO
+ MQ_USERNAME
+ ":"
+ MQ_PASSWORD
+ "@"
+ MQ_HOST
+ ":"
+ MQ_PORT
+ "/"
+ MQ_VHOST
+ "?connection_attempts="
+ MQ_CONNECTION_ATTEMPTS
+ "&heartbeat="
+ MQ_HEART_BEAT
)
| [
"samarth@smalldaytech.com"
] | samarth@smalldaytech.com |
269e9e1b86bc64707b0de01397627cdcbd9ee4d4 | cb4ccf95b004425f6ae4957420b1a11a2b1e4357 | /PIC/data_create.py | c2da8c7960cf92496e693a8ec9a9042ff9dd51f4 | [] | no_license | Jaymondyu/J | bda6a35e86f29052d63c4c9e361fcaa013e04ffc | 473b7c16f4b7990e1c82ef64ffd53f066b44b457 | refs/heads/master | 2020-06-03T17:36:14.824264 | 2019-09-25T09:48:52 | 2019-09-25T09:48:52 | 191,665,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,393 | py |
import cv2
import os
def generate(dirname):
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
# 带眼镜的时候可以用下面这个
eye_cascade = cv2.CascadeClassifier('haarcascade_eye_tree_eyeglasses.xml')
# 创建目录
if (not os.path.isdir(dirname)):
os.makedirs(dirname)
# 打开摄像头进行人脸图像采集
camera = cv2.VideoCapture(0)
count = 0
while (True):
ret, frame = camera.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray,1.3,5)
for (x, y, w, h) in faces:
img = cv2.rectangle(frame,(x,y),(x+w, y+h),(255,0,0), 2)
# 重设置图像尺寸
200 * 200
f = cv2.resize(gray[y:y+h,x:x+w], (200, 200))
cv2.imwrite(dirname + '/%s.pgm' % str(count), f)
print(count)
count += 1
cv2.imshow("camera", frame)
if cv2.waitKey(100) & 0xff == ord("q"):
break
# 下面是你想要多少张图片就停止
if count > 30:
break
camera.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
generate("C:/Users/Administrator/Desktop/mygit/J/PIC/MingwangLiu")
# 你生成的图片放在的电脑中的地方
| [
"18875001129@163.com"
] | 18875001129@163.com |
ad2d96ef0ca49def732a77dd51abf52f2273c665 | db00cfab06c47708a792fef92c9878e4e4dc646a | /src/practices/practice/heap/heapq.py | 7018e6b48783d3f76ff9529c6aad9c458607f314 | [
"Apache-2.0"
] | permissive | rahul38888/coding_practice | cf001f5e95f22d56623f453bb6c9ebd0d4c2695b | 8445c379310aa189147c4805c43bed80aa9e9fac | refs/heads/main | 2023-08-16T10:16:45.896939 | 2021-09-30T03:35:17 | 2021-09-30T03:35:17 | 390,193,422 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,163 | py | from heapq import heapify, heappop, heappush
import math
class MinHeap:
def __init__(self):
self.heap = []
def setHeap(self,heap):
self.heap = heap
def getParent(self, i):
return math.floor((i - 1) / 2)
def getLeftChild(self, i):
return 2*i+1
def getRighChild(self, i):
return 2*i+2
def getMin(self):
if len(self.heap) <= 0:
return None
return self.heap[0]
def insertKey(self, k):
heappush(self.heap, k)
def decreaseKey(self, i, val):
if len(self.heap) <= 0:
return None
self.heap[i] = val
heapify(self.heap)
def extractMin(self):
return heappop(self.heap)
def deleteKey(self, i):
self.decreaseKey(i, -float('inf'))
self.extractMin()
if __name__ == '__main__':
a = [1, 3, 6, 5, 9, 8]
min_heap = MinHeap()
min_heap.setHeap(a)
print(str(min_heap))
min_heap.insertKey(2)
print(str(min_heap))
min_heap.decreaseKey(3, 0)
print(str(min_heap))
min_heap.extractMin()
print(str(min_heap))
min_heap.deleteKey(4)
print(str(min_heap)) | [
"rahul.s@timesinternet.in"
] | rahul.s@timesinternet.in |
ff59967efcded29c65ebd694f14b298509f754d9 | d2f76d0f61a11f34d9fc3f9d3adc659ff5d718cf | /clase1/test_dr_lothar.py | 172894db1e0ac4db5f9ed2e13469bc0ecec922b1 | [
"MIT"
] | permissive | martinadwek/python_course | 2b37c6c6438123a71b3ff7c8e0a59a39dcdbaf26 | a2c29bed2c616e1382dd5768588af1e27312ce52 | refs/heads/main | 2023-01-09T19:49:52.228308 | 2020-11-11T21:52:29 | 2020-11-11T21:52:29 | 301,562,045 | 0 | 0 | MIT | 2020-10-11T20:21:29 | 2020-10-05T23:14:56 | null | UTF-8 | Python | false | false | 744 | py | import unittest
from clase1.dr_lothar import dr_lothar, dr_lothar_rec
class DrLotharTestCase(unittest.TestCase):
def test_even_number(self):
self.assertEqual(8, dr_lothar(6))
def test_odd_number(self):
self.assertEqual(7, dr_lothar(3))
def test_leq_zero_number(self):
with self.assertRaises(ValueError):
dr_lothar(-1)
class DrLotharRecTestCase(unittest.TestCase):
def test_even_number(self):
self.assertEqual(8, dr_lothar_rec(6, 0))
def test_odd_number(self):
self.assertEqual(7, dr_lothar_rec(3, 0))
def test_leq_zero_number(self):
with self.assertRaises(ValueError):
dr_lothar_rec(-1, 0)
if __name__ == '__main__':
unittest.main()
| [
"matiasdwek@gmail.com"
] | matiasdwek@gmail.com |
8b3e2dbf5c46821979c1ca48b41a984d33e0cfe9 | c1f00241c37d1dfabdd45ed8f6740f993b5f0e88 | /tools/time_conversion.py | 521501b9dd4ba79157e809626d804b6eff7f4e9e | [] | no_license | gxf8934/mobileManage | 9550c90f216b5b6489aa03b363df436233f2bd60 | 395b1b668969cca1c17dd84bcace1a1b0bbac5cb | refs/heads/master | 2022-09-29T08:01:16.849450 | 2020-06-08T06:59:16 | 2020-06-08T06:59:16 | 265,202,406 | 0 | 1 | null | 2020-06-05T09:00:36 | 2020-05-19T09:22:37 | HTML | UTF-8 | Python | false | false | 1,143 | py | # -*- coding:utf-8 -*-
# author:yangcong
# datetime:2020/5/14 11:19 上午
# software: PyCharm
import time
def timestamp_to_date(time_stamp):
'''
将时间戳转换为指定日志
:param time_stamp: timestamp
:return: %Y-%m-%d %H:%M:%S
'''
if time_stamp is None:
return None
date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time_stamp))
return date
def date_to_timestamp(date:str):
'''
将日志转换为时间戳
:param date: %Y-%m-%d %H:%M:%S
:return: timestamp
'''
if date is None:
return None
timeArray = time.strptime(str(date), "%Y-%m-%d %H:%M:%S")
timestamp = time.mktime(timeArray)
return timestamp
def date_now():
'''
现在时间日期
:return: %Y-%m-%d %H:%M:%S
'''
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
if __name__ == '__main__':
print(time.time())
t = timestamp_to_date(time.time())
print(t)
t1 = date_to_timestamp('2020-05-13 16:51:09')
print(t1)
print(time.time())
print(date_now())
# a=[1,2,3,4]
# b= a.copy()
# a.append(5)
# print(a,b)
| [
"18335598934@163.com"
] | 18335598934@163.com |
8bcaa7d786ed39bcb15ec478a9664265c2efe24b | 164203a8a6e1dad22906dfef189ed7aacb975ff8 | /RNNBiLSTMCRFATTENTION/filesplit.py | 74930093ced4229f9effc5c916cbbc31fc8b87c9 | [] | no_license | JohnSkyReal/chapter-structure-recognition | f8701cafe27fcf36d0b7466061b74e59f7c9ed50 | 2efcac6c4e3f7a2ccabfbcf0bb87318c62813862 | refs/heads/master | 2021-04-23T00:19:08.146249 | 2020-03-31T07:27:10 | 2020-03-31T07:27:10 | 249,883,391 | 1 | 1 | null | 2020-03-31T07:27:12 | 2020-03-25T04:03:04 | Python | UTF-8 | Python | false | false | 765 | py | import random
fr=open('CORPUS.txt', encoding='utf-8')
sentences=fr.read().strip().split('\n\n')
fr.close()
random.shuffle(sentences)
num=len(sentences)
folder =10
trash=[]
for i in range(folder):
chosen=set()
while len(chosen)<num//folder:
id=random.randint(0, num-1)
while id in trash:
id = random.randint(0, num-1)
chosen.add(id)
trash.extend(chosen)
fw = open(str(i)+'.txt','w', encoding='utf-8')
for id in chosen:
fw.write(sentences[id])
fw.write('\n\n')
fw.close()
fw = open(str(i) + '_rest.txt', 'w', encoding='utf-8')
for id in range(num):
if id not in chosen:
fw.write(sentences[id])
fw.write('\n\n')
fw.close() | [
"noreply@github.com"
] | noreply@github.com |
fd156e1a6725e04bb2ae33ba8277a84892e0647c | 113e791a999542241da600a03d3225051c947cdb | /packages/examples/c/lib/shared/wscript | 8cfae402c907ba0cdd8be3c7e49c364ce00f29d1 | [
"MIT"
] | permissive | michelm/beehive | d1d2c310c4f4e55b53598069e4acc233a6a77418 | b1fc63b3c7227112f54f2d6dc2f9c0298e327a32 | refs/heads/master | 2016-08-08T15:06:12.460266 | 2013-10-02T20:18:52 | 2013-10-02T20:18:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
def build(bld):
bld.shlib(
target='cshlib-example',
source=bld.path.ant_glob('src/*.c'),
includes=['./include'],
export_includes=['./include']
)
| [
"michel.mooij@gmail.com"
] | michel.mooij@gmail.com | |
def7d53c45ac5636dbf465c21bd23e68f4c0277e | f2a678afb6152de57635c503ed532a205664b413 | /items/migrations/0001_initial.py | a97ac753a607b444c4082ac3d4068767d265ff07 | [] | no_license | phrac/onepercentgame | 4d4a6247d5f587a65faef8d05a22a2522de3e6f8 | 113d01a3c6641d90f1ce4674ec565ed40ee7c093 | refs/heads/master | 2016-09-16T00:49:36.489216 | 2015-08-30T21:30:51 | 2015-08-30T21:30:51 | 41,644,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 565 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=32)),
('cash_value', models.DecimalField(max_digits=11, decimal_places=2)),
],
),
]
| [
"derek@disflux.com"
] | derek@disflux.com |
a87f3be260a2ffc2d5615f2787bf040826b4e15c | e0797607cd1331f651b7bb0d5601786a8fbb0428 | /math/reverse_integer.py | c786aea993476a89d1aac4e70dddd64f85b75d3c | [] | no_license | samshaq19912009/Leetcode_in_Python_my_practise | c5b19babd381a311797e3a822971331e70ad51ae | 38619be61c62b91a227b335f7b5d951bcb0531d9 | refs/heads/master | 2021-01-17T17:19:54.011368 | 2016-07-25T17:43:01 | 2016-07-25T17:43:01 | 61,684,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | """
Reverse digits of an integer.
Example1: x = 123, return 321
Example2: x = -123, return -321
Pay attention to overflow!!!!!!
"""
class Solution:
def reverse_int(self, x):
if x > 0:
flag = 1
else:
flag = -1
x = abs(x)
ans = 0
while x > 0:
ans = 10*ans + x%10
x = x / 10
if ans > 2147483647:
return 0
else:
return ans*flag
| [
"sccnju@gmail.com"
] | sccnju@gmail.com |
c8da836db2a3042a010ac8a9517324364b250c36 | a72f488701be69d46551c2dc0b4382cb2cb9d1a4 | /seiya/analysis/job.py | 97ecd674c57dff27fd7f0b0480d787ffe54ea1f8 | [] | no_license | Pinzi123/info | 90694050bd313a0fdd562d2717fdd5b39cac2b5d | 2f00eefe3b2785999ae9bb46144a1f6c3d546ca1 | refs/heads/master | 2020-04-04T23:51:59.445009 | 2018-11-06T12:52:12 | 2018-11-06T12:52:12 | 156,374,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,490 | py | from io import BytesIO
from sqlalchemy import func, Float, select, and_
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from seiya.db import engine, Session, JobModel
def count_top10():
"""职位数排名前十的城市
"""
session = Session()
rows = session.query(
JobModel.city,
func.count(JobModel.city).label('count')
).group_by(JobModel.city).order_by('count desc').limit(10)
return [row._asdict() for row in rows]
def salary_top10():
"""薪资排名前十的城市
组内求平均值
类型转换
列重命名
数据过滤
"""
session = Session()
rows = session.query(
JobModel.city,
func.avg(
(JobModel.salary_lower + JobModel.salary_upper) / 2
).cast(Float).label('salary')
).filter(
and_(JobModel.salary_lower > 0, JobModel.salary_upper > 0)
).group_by(JobModel.city).order_by('salary desc').limit(10)
return [row._asdict() for row in rows]
def _hot_tags():
"""热门职位标签
使用 Pandas 从数据库表读取数据并进行分析
返回结果类型为 [pandas.Series]
"""
df = pd.read_sql(select([JobModel.tags]), engine)
# 把标签" "分开成Series,再把每个标签加入df,重制index
# "撒大 奥迪 丢掉","撒大发 撒啊" 转换成 撒大, 奥迪, 丢掉,撒大发, 撒啊
df = pd.concat([pd.Series(row['tags'].split(' '))
for _, row in df.iterrows()]).reset_index()
del df['index']
df.columns = ['tag']
df = df[df['tag'] != '""']
df = df[df['tag'] != '']
return df.groupby(['tag']).size().sort_values(ascending=False)
def hot_tags():
"""热门职位标签
返回结果类型为 [list]
"""
rows = []
for item in _hot_tags().items():
rows.append({'tag': item[0], 'count': item[1]})
return rows
def hot_tags_plot(format='png'):
"""热门职位标签
返回结果类型为图片
"""
mpl.rcParams['font.sans-serif'] = ['SimHei']
mpl.rcParams['axes.unicode_minus'] = False
mpl.rcParams['figure.figsize'] = 10, 5
s = _hot_tags()
plt.bar(s.index[:10], s.values[:10])
img = BytesIO()
plt.savefig(img, format=format)
return img.getvalue()
def experience_stat():
"""工作经验统计
"""
session = Session()
rows = session.query(
func.concat(
JobModel.experience_lower, '-', JobModel.experience_upper, '年'
).label('experience'),
func.count('experience').label('count')
).group_by('experience').order_by('count desc')
return [row._asdict() for row in rows]
def education_stat():
"""学历要求统计
"""
session = Session()
rows = session.query(
JobModel.education,
func.count(JobModel.education).label('count')
).group_by('education').order_by('count desc')
return [row._asdict() for row in rows]
def salary_by_city_and_education():
"""同等学历不同城市薪资对比
"""
session = Session()
rows = session.query(
JobModel.city,
JobModel.education,
func.avg(
(JobModel.salary_lower + JobModel.salary_upper) / 2
).cast(Float).label('')
).filter(
and_(JobModel.salary_lower > 0, JobModel.salary_upper > 0)
).group_by(JobModel.city, JobModel.education).order_by(JobModel.city.desc())
return [row._asdict() for row in rows]
| [
"995537534@qq.com"
] | 995537534@qq.com |
911dc1a4721c02884df0423246b265a52c5c38e8 | b914ee0f23ddafa487e5cb35c35c3d41517a47a8 | /Ornek2_7.py | 8874dbd3f5b9f090d4b56a7557a26203c85a08b6 | [] | no_license | suacalis/VeriBilimiPython | 0dc45402b09936c82cecca9d6447873d24d1b241 | 85dbfa98ccf1a6a4e8916d134dc1ad41f99535ad | refs/heads/main | 2023-08-24T17:03:48.627831 | 2021-09-28T07:28:21 | 2021-09-28T07:28:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | '''
Örnek 2.7:
Kullanıcı tarafından girilen yarıçap (r) değerine göre bir dairenin çevresini
hesaplayan programı kodlayalım.
'''
import math #pi için gerekli
import easygui #enterbox(), msgbox() için gerekli
r = easygui.enterbox("Dairenin yarıçapı.:")
r = float(r) #girilen float tipine dönüştü.
Cevre = 2*math.pi*r
easygui.msgbox(msg=Cevre,title="Dairenin Çevresi") | [
"noreply@github.com"
] | noreply@github.com |
313e8b9405659fc705d953a76b6a1501f1f048b7 | d1ec5016fc946a5b9558cb29ff6eb2579ac246bb | /utils.py | bf2c024dd31803c0ee71ecc6d8594a997427654c | [] | no_license | marilynle/Utils | 41f53642b38796902c17ab179e7eee5e3e791f92 | 777e73b3948cd1cb0e103aafa4d3aafc4cd9d2fc | refs/heads/master | 2021-01-03T03:42:19.091807 | 2016-12-09T06:53:05 | 2016-12-09T06:53:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,942 | py | import cv2
import numpy as np
import os
def batch(data, batch_size=32):
"""Create a new batch of data
Uses the size of the first element in the list to get the max index
Parameters
----------
data : list(numpy.ndarray) or numpy.ndarray
A list of the data to batch, or one array of data
batch_size : int
The size of the batch to generate
Returns
-------
numpy.ndarray, numpy.ndarray
An subset of data, and a subset of labels
"""
results = None
if type(data) == list:
indices = np.random.choice(len(data[0]), batch_size, False)
results = []
for x in data:
results.append(x[indices])
elif type(data) == np.ndarray:
indices = np.random.choice(len(data), batch_size, False)
results = data[indices]
else:
raise RuntimeError("Invalid type for data, must be a list or numpy array")
return results
def load_image(path, color=True, resize=False,
height=100, width=100, maintain_aspect_ratio=True,
crop=False, padding="White"):
"""Loads a single image
If resize and crop, it will be cropped
If resize and not crop and maintain_aspect_ratio it will be padded
If resize and not maintain_aspect_ration it will be squashed/stretched
Parameters
----------
path : str
The full or relative path of the image
color : bool
True for loading the image in color, or False for grayscale
resize : bool
True to resize the images when loading them
height : int
The height to load the picture at (only if resize == True)
width : int
The width to load the picture at (only if resize == True)
maintain_aspect_ratio : bool
True to maintain aspect ratio (will be padded) (only if resize == True)
crop : bool
If True, it will fit the shortest side and crop the rest (only if resize == True)
padding : str
"White" or "Black"
Returns
-------
numpy.ndarray
The image in a numpy array
"""
img = cv2.imread(path, color)
if resize:
if maintain_aspect_ratio:
if crop:
if img.shape[0] / float(height) < img.shape[1] / float(height):
img = resize_image(img, height=height)
else:
img = resize_image(img, width=width)
else:
if img.shape[0] / float(height) < img.shape[1] / float(width):
img = resize_image(img, width=width)
else:
img = resize_image(img, height=height)
else:
img = resize_image(img, height=height, width=width)
img = crop_or_pad(img, height, width, padding)
return img
def save_image(img, path):
"""Saves a single image
Parameters
----------
img : numpy.ndarray
The image to save
path : str
The path and name to save the image to
Returns
-------
None
"""
cv2.imwrite(path, img)
def load_image_names(path):
"""Loads the name of all images in a directory
Will look for all .jpeg, .jpg, .png, and .tiff files
Parameters
----------
path : str
The full or relative path of the directory
Returns
-------
list(str)
A sorted list of all the file names
"""
return sorted( img for img in os.listdir(path) if (os.path.isfile(os.path.join(path, img))
and img.lower().endswith(('.jpg', '.jpeg', '.png', '.tiff'))))
def load_images_in_directory(path, color=True,
height=100, width=100, maintain_aspect_ratio=True,
crop=False, padding="White"):
"""Loads all images in a directory
Will look for all .jpeg, .jpg, .png, and .tiff files
Parameters
----------
path : str
The full or relative path of the image
color : bool
True for loading the image in color, or False for grayscale
height : int
The height to load the picture at
width : int
The width to load the picture at
maintain_aspect_ratio : bool
True to maintain aspect ratio (will be padded)
crop : bool
If True, it will fit the shortest side and crop the rest
padding : str
"White" or "Black"
Returns
-------
numpy.ndarray
An array of all the images of shape [num_images, height, width, channels]
"""
img_names = load_image_names(path)
result = np.empty([len(img_names), height, width, 3 if color else 1], dtype=np.uint8)
for i, x in enumerate(img_names):
result[i] = load_image(os.path.join(path, x), color=color,
resize=True, height=height, width=width,
maintain_aspect_ratio=maintain_aspect_ratio, crop=crop,
padding=padding)
return result
def resize_image(img, height=0, width=0):
"""Resize an image to a desired height and width
Parameters
----------
img : numpy.ndarray
The image to resize
height : int
The max height you want the image to be. If 0, it is calculated from Width
width : int
The max width you want the image to be. If 0, it is calculated from Height
Returns
-------
numpy.ndarray
The resized image
"""
# If both are zero, we don't know what to resize it to!
if (height == 0 and width == 0):
raise ValueError("Height and Width can't both be 0!")
elif (height < 0 or width < 0):
raise ValueError("Height or Width can't be below 0")
elif (height == 0):
# We need to caluclate the scale from the width
scale = float(width) / img.shape[1]
elif (width == 0):
# we need to calculate the scale from the height
scale = float(height) / img.shape[0]
else:
# In this case, the image will not maintain aspect ratio
if img.shape[0] > height:
return cv2.resize(img, (width, height), interpolation=cv2.INTER_AREA)
else:
return cv2.resize(img, (width, height), interpolation=cv2.INTER_CUBIC)
# If the scale factor is larger:
if scale > 1:
return cv2.resize(img, None, fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
else:
return cv2.resize(img, None, fx=scale, fy=scale, interpolation=cv2.INTER_AREA)
def crop_or_pad(img, height, width, padding="White"):
"""Puts the image into a new array of Height x Width, and crops or pads as necessary
Parameters
----------
img : numpy.ndarray
The image to put onto the canvas
height : int
The desired height of the returned image
width : int
The desired width of the returned image
padding : str
The color to pad with. "White" or "Black"
Returns
-------
numpy.ndarray
The image after having been cropped or padded
"""
start_y = int(round((height - img.shape[0]) / 2.0))
start_x = int(round((width - img.shape[1]) / 2.0))
end_x = 0
end_y = 0
# If these are less than 0, we must trim some
img_start_y = 0
img_start_x = 0
img_end_y = img.shape[0]
img_end_x = img.shape[1]
if start_y < 0:
img_start_y -= start_y
img_end_y = img_start_y + height
start_y = 0
end_y = height
else:
end_y = start_y + img.shape[0]
if start_x < 0:
img_start_x -= start_x
img_end_x = img_start_x + width
start_x = 0
end_x = width
else:
end_x = start_x + img.shape[1]
# If it is a full color image
if img.shape[2] == 3:
if padding == "White":
array = np.full((height, width, 3), 255, dtype=np.uint8)
elif padding == "Black":
array = np.zeros((height, width, 3), dtype=np.uint8)
else:
raise ValueError("Unknown parameter for pading, " + padding + ". Must be Black or White")
else:
if padding == "White":
array = np.full((height, width, 1), 255, dtype=np.uint8)
elif padding == "Black":
array = np.zeros((height, width, 1), dtype=np.uint8)
else:
raise ValueError("Unknown parameter for padding, " + padding + ". Must be Black or White")
# Insert the image into the array
array[start_y:end_y, start_x:end_x] = img[img_start_y:img_end_y, img_start_x:img_end_x]
return array
def display_images(imgs):
"""Displays all images in a batch
Parameters
----------
img : numpy.ndarray
The image in a numpy array [batch, height, width, channels]
Returns
-------
None
"""
for i, x in enumerate(img):
cv2.imshow("img" + str(i), x)
cv2.waitKey()
cv2.destroyAllWindows() | [
"joshua.greaves@googlemail.com"
] | joshua.greaves@googlemail.com |
fb4f7bf460cee7a3d927c2ecf3c4e57ffb78b877 | 981e949fd9bed8fff1c1a2e9965dfe2fad98735d | /food and entertainment/IndustriesDemand.py | 7b6255228ce866f885fd3aee54dcb746d6ebe162 | [] | no_license | ErikaNicole/Covid19-Environmental-and-Economic-Analysis | 41d25a5ae66138e949c8cbfa9bc09b7c29308969 | 3a83a7b151025f7e9b63c223a8292cda86a5bfaf | refs/heads/master | 2022-12-08T15:51:18.517674 | 2020-09-18T20:45:07 | 2020-09-18T20:45:07 | 295,363,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,036 | py | # Using Online Job Adverts as indicators of Industry's Status
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as md
import seaborn as sns
import datetime
sns.set_style('whitegrid')
# - - - - - - - - - - - - - - - - - - - - - - -
# 1. Collect OnlineJobAdverts Postings
df = pd.read_excel(r'data/OnlineJobAdverts.xlsx')
# 2. Clean for use
# Focus on Catering and Hospitality
Catering_Hospitality = df[12:14]
Months = df[1:2].to_numpy()
Fig = plt.figure()
plt.title("2019 to 2020 Catering and Hospitality Job Adverts Postings")
x = md.DateFormatter('%Y-%m-%d')
ax = plt.gca()
ax.xaxis.set_major_formatter(x)
plt.plot(Months[0, 2:], Catering_Hospitality.to_numpy()[0,2:]) # 2019
plt.plot(Months[0, 2:38], Catering_Hospitality.to_numpy()[1,2:38]) # 2020
# Web Scraping for more specific 'Delivery' jobs available?
# - - - - - - - - - - - - - - - - - - - - - - -
# 2. Online Food Delivery Market
# https://www.statista.com/outlook/374/100/online-food-delivery/worldwide#market-users
| [
"63018077+ErikaNicole@users.noreply.github.com"
] | 63018077+ErikaNicole@users.noreply.github.com |
a1a6c37bea117ebdfa4eb7defc5ef80e649c0bad | fe948e8b11ab1b8a26e6c8c4b4973c97f747691c | /letor1.py | 7cdbdad514dcb2b5c48589d7ba7b98472caf8100 | [] | no_license | khansamad47/LETOR | 582898933ddc4f793fa7613150f605b6fab037ac | 47e28eb0c8b1a12d54568d6747b11b6a8d8f9c33 | refs/heads/master | 2020-12-24T11:38:09.029600 | 2016-11-06T21:28:31 | 2016-11-06T21:28:31 | 73,020,362 | 0 | 0 | null | 2016-11-06T21:32:13 | 2016-11-06T21:32:11 | null | UTF-8 | Python | false | false | 3,711 | py | import numpy as np
import pandas as pd
import logging
import os
import mimetypes
import re
#File logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
filename='LETOR'
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
fh = logging.FileHandler('log_'+filename+'.txt')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
class Data_Reader(object):
__doc__="""
Data_Reader
Class for reading the input data excel file
Notes
-----
**Attributes**
filename : xlsx file
The name the input file containing the data to be read.
Default is None
input_filepath : str
The path of the folder where the above input file is to be read from.
Default is current working directory
Final Script Usage Examples
---------
1.) Example 1
>>> inst_dr=Data_Reader()
>>> inst_lr=Logistic_Model.model_fit(inst_dr)
"""
__slots__=('_filename','_input_filepath','df_out')
#Initializing Data_Reader Class
def __init__(self,**kwargs):
self._filename=kwargs.pop('filename',"Querylevelnorm.txt")
self._input_filepath=kwargs.pop('input_filepath',os.getcwd())
self.df_out=self.file_parser()
if kwargs:
raise TypeError("Unexpected **kwargs: %r" % kwargs)
#Setting property attributes for class instance variables
@property
def filename(self):
return self._filename
@filename.setter
def filename(self,filename):
if not os.path.isfile(os.path.join(self.filepath,filename)):
logging.debug("%s does not exist in this folder: %s!" %(filename,self.filepath))
if mimetypes.guess_type(os.path.join(self.filepath,filename))[0] == 'text/plain':
logging.debug("%s file does not appear to be in xlsx format" %filename)
else:
fileloc=self.filepath+"\\"+filename
logging.debug("%s file does not appear to be in xlsx format" %fileloc)
self._filename=filename
@filename.deleter
def filename(self):
del self._filename
@property
def input_filepath(self):
return self._input_filepath
@input_filepath.setter
def input_filepath(self,value):
if type(value)!=str:
logging.debug("Please set the attribute in str form!")
self._input_filepath=value
@input_filepath.deleter
def input_filepath(self):
del self._input_filepath
def file_parser(self):
input_file=self.filename
pattern = r'(^\d+)|((?<=:)([\d.]+)(?=\s+))'
feature_cols=["feature_"+str(x) for x in range(1,47)]
col_names=["relevance_lbl","qid"]+feature_cols
with open(input_file) as f:
numlines = len(f.readlines())
f.close()
df=pd.DataFrame(index=np.arange(numlines),columns=col_names)
with open(input_file,'r') as rdr:
for line_number, line in enumerate(rdr):
init_list = [match.group(0) for match in re.finditer(pattern,line)]
df.loc[line_number]=init_list
return df
#Returns a clean dataframe for feeding to the logistic regression model
#def clean_df(self):
# raw_df = pd.read_excel(self.filename)
# #Determining columns with all NaNs
# colnames = raw_df.columns[pd.isnull(raw_df).all()].tolist()
# cleandf = raw_df[raw_df.columns[~raw_df.columns.str.contains('Unnamed:')]]
# return cleandf
| [
"amandp0@gmail.com"
] | amandp0@gmail.com |
9e1b9a64af56a30e72495aa962585606ff16f5c4 | 58ee73dcb5d50e1aaf3d52cf05547c95725d3d93 | /proj/urls.py | d213d5b5a91242764f6a7a9fb3d20d667c9470c2 | [] | no_license | Zanudaaa/kolkhoz | d3ae28c79db9dfce6c920b1d700d3a8eba9813e3 | c3a5f7adbc5112be4033ddbdcfbd2f5655a1c7e4 | refs/heads/master | 2020-09-12T06:41:26.549041 | 2019-11-18T19:00:18 | 2019-11-18T19:00:18 | 222,256,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,039 | py | """proj URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import url, include
from accounts.views import (login_view, register_view, logout_view)
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('main.urls')),
path('login/', login_view, name='login'),
path('logout/', logout_view, name='logout'),
path('reg/', register_view, name='register')
]
| [
"Zanudaaa099@gmail.com"
] | Zanudaaa099@gmail.com |
480e50e61a4de904119a1f25a2d1f7019506f66c | a05ce7d365917c2ae6430e522381792a011d5979 | /atomtoolbox/gap.py | 3041ff805d1ffc5c30b69e00b8a5786062da505f | [
"MIT"
] | permissive | eschmidt42/AtomToolBox | 9d41770e9ebf545ac6cefce5c81226c14ee024c2 | 25b80675dcaa332fc0c0d702b60ea61dfe2501a0 | refs/heads/master | 2021-09-14T21:45:57.408383 | 2018-05-20T09:15:26 | 2018-05-20T09:15:26 | 113,569,858 | 1 | 0 | null | 2018-02-10T14:37:36 | 2017-12-08T12:00:18 | null | UTF-8 | Python | false | false | 4,937 | py | import sklearn
from sklearn.gaussian_process import kernels
import numpy as np
def get_GAP_matrix_v1(Phi, Phi_list, t, idx, L, beta=1., kernel=None):
beta = 1.
y = np.copy(t_e).ravel()
kernel = kernels.RBF(length_scale=1.)
X_S = Phi[idx,:]
C_NpNp = kernel(Phi)
C_NpS = kernel(Phi,Y=X_S)
C_SNp = C_NpS.T
C_SS = kernel(X_S,Y=X_S)
C_SS_inv = np.linalg.inv(C_SS)
Lambda = np.diag(np.diag(L.dot(C_NpNp.dot(L.T)) - L.dot(C_NpS.dot(C_SS_inv.dot(C_SNp.dot(L.T))))))
Q0 = np.linalg.inv(Lambda + 1./beta * np.eye(Lambda.shape[0]))
Q1 = np.linalg.inv(C_SS + C_SNp.dot(L.T).dot(Q0.dot(L.dot(C_NpS))))
Q2 = C_SNp.dot(L.T.dot(Q0.dot(y)))
print("Lambda", Lambda.shape)
print("Q0",Q0.shape)
print("Q1",Q1.shape)
print("Q2",Q2.shape)
return Q1.dot(Q2)
def get_kvec_v1(single_Phi, idx, Phi, kernel=None):
X_S = Phi[idx,:]
_k = kernel(single_Phi, X_S)
return _k
def get_GAP_matrix_v2(Phi, Phi_list, t, kernel=None,
sigma_E=.001, sigma_W=1.):
# Bartok et al. 2015
C_SS = np.array([[kernel(_x0,Y=_x1).sum() for _x0 in Phi_list] for _x1 in Phi_list])
C_SS = sigma_W**2 * C_SS + sigma_E*np.eye(len(Phi_list))
C_ST = np.copy(C_SS)
C_TS = C_ST.T
Lambda_TT = np.diag([len(v)*sigma_E for v in Phi_list])
Lambda_TT_inv = np.linalg.inv(Lambda_TT)
Q = np.linalg.inv(C_SS + C_ST.dot(Lambda_TT_inv.dot(C_TS))).dot(C_ST.dot(Lambda_TT_inv.dot(t)))
return Q
def get_kvec_v2(single_Phi, Phi_list, kernel=None):
_k = np.array([kernel(single_Phi,Y=_x0).sum() for _x0 in Phi_list])
return _k
def get_atom_contribution_info(Phi, Phi_list, decimals=5):
decimals = 5
mod = lambda x: np.around(x, decimals=decimals)
rPhi = mod(Phi).astype("str")
#print("rPhi",rPhi)
rPhi_list = [mod(v).astype("str") for v in Phi_list]
unique, idx, idx_inv = np.unique(rPhi, axis=0, return_inverse=True, return_index=True)
unique_idx_map = {tuple(unique[v,:]): idx[v] for v in range(unique.shape[0])}
L = np.zeros((len(Phi_list), idx_inv.shape[0]))
for i in range(L.shape[0]):
_ix = np.array([unique_idx_map[tuple(v)] for v in rPhi_list[i]])
L[i,_ix] = 1.
return L, unique, idx, idx_inv, unique_idx_map
class GAPRegressor(sklearn.base.BaseEstimator, sklearn.base.TransformerMixin):
Q = None
X = None
X_list = None
kernel = None
kernel_spec = None
implemented_kernel = set(["rbf",])
def __init__(self, beta=1., sigma_E=0.001, sigma_W=1., kernel=None,
kind="v1", decimals=5):
assert kind in ["v1","v2"], "Unknown 'kind' value (%s)!" %kind
self.kind = kind
self.beta = beta
self.sigma_E = sigma_E
self.sigma_W = sigma_W
self.kernel = kernel
self.decimals = decimals
def _initialize_kernel(self):
assert not self.kernel is None, "'kernel' needs to be provided!"
assert isinstance(self.kernel, (tuple,list)) and len(self.kernel)==2, "Kernel needs to be provided as a list or tuple of two elements!"
assert self.kernel[0] in self.implemented_kernel, "Specified kernel is none of the implemented kernels (%s)" %self.implemented_kernel
if self.kernel[0] == "rbf":
self.k = kernels.RBF(**self.kernel[1])
else:
raise NotImplementedError
def fit(self,X_list,y):
self._initialize_kernel()
assert isinstance(X_list, list) and all([isinstance(v,np.ndarray) for v in X_list]),\
"'X_list' needs to be provided and be be a list of np.ndarrays with a constant number of columns."
self.X_list = X_list
self.X = np.vstack(X_list)
if self.kind == "v1":
self.L, self.unique, self.idx, self.idx_inv, self.unique_idx_map = get_atom_contribution_info(self.X, self.X_list, decimals=self.decimals)
self.Q = get_GAP_matrix_v1(self.X, self.X_list, y, self.idx, self.L,
beta=self.beta, kernel=self.k)
elif self.kind == "v2":
self.Q = get_GAP_matrix_v2(self.X, self.X_list, y,
sigma_W=self.sigma_W, sigma_E=self.sigma_E,
kernel=self.k)
def predict(self,X,y=None):
if self.kind == "v1":
k = get_kvec_v1(X, self.idx, self.X, kernel=self.k)
return k.dot(self.Q).sum()
elif self.kind == "v2":
if isinstance(X,np.ndarray):
k = get_kvec_v2(X, self.X_list, kernel=self.k)
return k.dot(self.Q)
elif isinstance(X,list):
ks = [get_kvec_v2(_X, self.X_list, kernel=self.k) for _X in X]
return np.array([k.dot(self.Q) for k in ks]) | [
"11818904+eschmidt42@users.noreply.github.com"
] | 11818904+eschmidt42@users.noreply.github.com |
c5982e7ba0e42bb7a2188f72b2377db8bf9a6e8b | 339f3b1e0949fcccdd178e1ebfc0ac4ea6828de6 | /user.py | 672f0b9db55df5325afc1a37d1c3d7b9b8e52089 | [] | no_license | eduincarreno/Ejercicios_Python | bdb3df0298cc579ab581e35215b6e5ee79958f77 | 189d95b283579eb13b712a81b1299de062b498b9 | refs/heads/main | 2023-01-01T21:18:46.785448 | 2020-10-26T03:31:55 | 2020-10-26T03:31:55 | 307,252,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py |
def validar_usuario(nombre_usuario):
longitud=len(nombre_usuario)
nameuser=nombre_usuario.isalnum()
if nameuser== False:
print("El usuario debe tener letras y números")
if longitud < 6:
print("El usuario debe contener mínimo 6 caracteres")
if longitud > 12:
print("El usuario debe contener máximo 12 caracteres")
if longitud >5 and longitud <13 and nameuser ==True:
return True
| [
"noreply@github.com"
] | noreply@github.com |
375e958549538b806a2321d2c1da2b6620981bcf | ca0068d83ebeef49d24e5480b3e74012bbbf44a0 | /tests/test_security_api_key_cookie_description.py | 2cd3565b43ad688307d6f27ce8b4edd84d58cb60 | [
"MIT"
] | permissive | chbndrhnns/fastapi | e66d9485ed59b6018b10a01c1c2832419e91fac8 | b83709add4c5e0cc3dbcb5ecbbd019767c408abd | refs/heads/master | 2023-03-03T23:31:37.436961 | 2022-09-01T09:59:45 | 2022-09-01T09:59:45 | 245,459,912 | 0 | 0 | MIT | 2023-02-24T19:03:36 | 2020-03-06T15:53:14 | Python | UTF-8 | Python | false | false | 1,973 | py | from fastapi import Depends, FastAPI, Security
from fastapi.security import APIKeyCookie
from fastapi.testclient import TestClient
from pydantic import BaseModel
app = FastAPI()
api_key = APIKeyCookie(name="key", description="An API Cookie Key")
class User(BaseModel):
username: str
def get_current_user(oauth_header: str = Security(api_key)):
user = User(username=oauth_header)
return user
@app.get("/users/me")
def read_current_user(current_user: User = Depends(get_current_user)):
return current_user
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/users/me": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
}
},
"summary": "Read Current User",
"operationId": "read_current_user_users_me_get",
"security": [{"APIKeyCookie": []}],
}
}
},
"components": {
"securitySchemes": {
"APIKeyCookie": {
"type": "apiKey",
"name": "key",
"in": "cookie",
"description": "An API Cookie Key",
}
}
},
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
def test_security_api_key():
response = client.get("/users/me", cookies={"key": "secret"})
assert response.status_code == 200, response.text
assert response.json() == {"username": "secret"}
def test_security_api_key_no_key():
response = client.get("/users/me")
assert response.status_code == 403, response.text
assert response.json() == {"detail": "Not authenticated"}
| [
"noreply@github.com"
] | noreply@github.com |
d3bef1bd1a41795e43168785fee2248e21cf3b84 | 54869fe38624f1c4338f8dc5dd5f0d89aa17c9e4 | /p12.py | 2ba9841678ede63d750d74eebd96ce2c57a59db1 | [] | no_license | qoqosz/Advent-of-Code-2020 | d3b3512eb3b59b7404189ad094a2cc3b8ddb07be | e27928482e8fc9f30aea3fed21e11f8d8743431d | refs/heads/master | 2023-02-05T22:17:45.767109 | 2020-12-24T21:20:57 | 2020-12-24T21:21:06 | 318,346,507 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,941 | py | # Part 1
with open('p12.txt') as f:
steps = [x.strip() for x in f]
direction = {0: 'E', 90: 'N', 180: 'W', 270: 'S'}
direction_inv = {v: k for k, v in direction.items()}
coords = {'E': [1, 0], 'N': [0, 1], 'W': [-1, 0], 'S': [0, -1]}
def turn(orientation, instruction):
sign = 1 if instruction[0] == 'L' else -1
rotate_by = int(instruction[1:])
delta = sign * rotate_by
return direction[(direction_inv[orientation] + delta) % 360]
def move(pos, action):
direction = action[0]
if direction == 'F':
direction = orientation
direction = coords[direction]
move_by = int(action[1:])
return [x + move_by * y for x, y in zip(pos, direction)]
orientation = 'E'
pos = [0, 0]
for step in steps:
cmd = step[0]
if cmd in ['L', 'R']:
orientation = turn(orientation, step)
else:
pos = move(pos, step)
print(sum(map(abs, pos)))
# Part 2
def move_ship(pos, action, waypoint):
move_by = int(action[1:])
for i in range(move_by):
pos = [x + y for x, y in zip(pos, waypoint)]
return pos
def move_waypoint(step, waypoint):
return move(waypoint, step)
def turn_waypoint(orientation, step):
# 90 -> (x, y) -> (-y, x)
# 180 -> (x, y) -> (-x, -y)
# 270 -> (x, y) -> (y, -x)
sign = 1 if step[0] == 'L' else -1
rotate_by = int(step[1:])
delta = (sign * rotate_by) % 360
if delta == 90:
return [-orientation[1], orientation[0]]
elif delta == 180:
return [-orientation[0], -orientation[1]]
elif delta == 270:
return [orientation[1], -orientation[0]]
else:
return orientation
waypoint = [10, 1]
pos = [0, 0]
for step in steps:
cmd = step[0]
if cmd == 'F':
pos = move_ship(pos, step, waypoint)
elif cmd in ['L', 'R']:
waypoint = turn_waypoint(waypoint, step)
else:
waypoint = move_waypoint(step, waypoint)
print(sum(map(abs, pos)))
| [
"lukasz@bednarski.me"
] | lukasz@bednarski.me |
d52a217c41ef8b70b1df00bbde2336982e48d35b | 2f88b736f7c20081a2685b954d6c3bf2b5c3d50a | /article/admin.py | d34f999e1e936249ff99ff09c203b5e8430237f4 | [] | no_license | resaUndefined/android | 417b24a0dae35057365220b345b39fcab8cfbe8b | 21ceec9f6c4e5358a9555e09f1f0de5cde41b8f3 | refs/heads/master | 2021-01-23T18:44:37.538509 | 2017-09-14T13:40:36 | 2017-09-14T13:40:36 | 102,804,843 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from article.models import Category,Post
class PostAdmin(admin.ModelAdmin):
list_display = ('title','category','created_on','last_modified','image')
list_filter = ('category','created_on')
search_fields = ('title','content')
prepopulated_fields = {'slug' : ('title',)}
# Register your models here.
admin.site.register(Category)
admin.site.register(Post,PostAdmin)
| [
"resamuhammad96@gmail.com"
] | resamuhammad96@gmail.com |
f137282ec9b0f4d61f2f93dd1ba8b3c222be31fb | eb19e02e86a40f483e70b826aff8dc0b735e9827 | /supproject/models/siyou/model.py | 9793405bf79b248d478b01465d111e31804bebf4 | [] | no_license | decadeGuo/python | 22206950cf7432c9f505bf1c8fef5576f6315e3b | be277417df0b59490fc519c987750815c73986e1 | refs/heads/master | 2020-03-23T14:10:03.447349 | 2018-07-19T06:41:32 | 2018-07-19T06:41:32 | 141,660,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,839 | py | #coding:utf-8
from django.db import models
class Clear(models.Model):
uid = models.IntegerField(default=0)
p_id = models.IntegerField(default=0)
c_id = models.IntegerField(default=0)
l_id = models.IntegerField(default=0)
explain = models.CharField(max_length=200)
add_time = models.IntegerField(default=0)
type = models.IntegerField(default=0) # 0 清空数据 1 答案权限 2督导资格 3微信
class Meta:
db_table = 'yh_clear'
class UserManage(models.Model):
uid = models.IntegerField(default=0)
username = models.CharField(max_length=100)
type = models.IntegerField(default=0)
add_time = models.IntegerField(default=0)
update_time = models.IntegerField(default=0)
status = models.IntegerField(default=0)
class Meta:
db_table='yh_user_manage'
class QuickLogin(models.Model):
"""快速登录表"""
username = models.CharField(max_length=20)
password = models.CharField(max_length=20)
status = models.IntegerField(default=1)
type = models.IntegerField(default=0)
remark = models.CharField(max_length=1000)
name = models.CharField(max_length=20)
position = models.CharField(max_length=20)
class Meta:
db_table='quick_login'
class LiuYan(models.Model):
uid = models.IntegerField(default=0)
name = models.CharField(max_length=50)
content = models.CharField(max_length=300)
add_time = models.IntegerField(default=0)
class Meta:
db_table='liuyan'
class Game(models.Model):
"""游戏记录"""
type = models.IntegerField(default=0) # 1 火眼金睛
uid = models.IntegerField(default=0)
time = models.IntegerField(default=0) # 游戏用时
dw = models.CharField(max_length=500)
add_time = models.DateTimeField(auto_now=True)
class Meta:
db_table='game'
| [
"guodongbo@tbkt.cn"
] | guodongbo@tbkt.cn |
b26fa7154acf019d6ab62274c28e669627516f3a | b864557087e9a292ac45717d5e2487cefdf1ddba | /leetcode/168.py | fc152c041070d477e7fbd64d97c11fd0e51d5d41 | [] | no_license | gohdong/algorithm | 737ec8830b1d1754f52f3956022c9a063ba4976f | 5ea8f88fc1b3a30470053f459e528bf5c00da766 | refs/heads/master | 2023-08-06T09:30:04.670020 | 2021-10-07T09:28:39 | 2021-10-07T09:28:39 | 385,096,857 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | class Solution:
def convertToTitle(self, columnNumber: int) -> str:
answer = ""
while columnNumber:
if columnNumber % 26:
answer = chr(64+columnNumber % 26) + answer
columnNumber = columnNumber - columnNumber % 26
else:
answer = "Z" + answer
columnNumber = columnNumber - 26
columnNumber = columnNumber // 26
return answer
solution = Solution()
print(solution.convertToTitle(2147483647))
| [
"aldehf420@naver.com"
] | aldehf420@naver.com |
4e8fd8831a4f09be6415f01c5e318a134881b2d3 | bdbd9f2a63da5dd9b1394849587740cd21121bba | /bonus/visualizer.py | 2f5fb87e0a9b0d261b8c04c436b6ac791cacd4ae | [] | no_license | logn777/Lem_in42 | 7f6a3694c8694abada18192d899abf2d6a7f80f6 | 316ab6e9ec531d855310ba71cc58bb0afc59d749 | refs/heads/master | 2022-11-26T23:20:24.369928 | 2020-08-04T20:10:33 | 2020-08-04T20:10:33 | 284,273,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,535 | py | import sys
import json
import argparse
import contextlib
import pytweening
import time
import networkx as nx
import matplotlib
import matplotlib.pyplot as plt
#
import matplotlib.animation as animation
col_path = ['orange', 'magenta', 'cyan', 'brown', 'blue', '#f08c00', '#308bc0',
'#f9c030', '#23f012', '#497663', '#ec5952', '#db8fb0', '#afc58c',
'#08ea07', '#3e60f3', '#9d5d80', '#701488', '#a78923', '#d461f8',
'#0628c4', '#2f8bdc', '#1abf73', '#04edc1', '#dffe5d', '#fbfbad',
'#b26258', '#d2881e', '#95d6ae']
class Lemin:
nodes_light = []
pos = []
nodes_white = []
def __init__(self, name=None, G=None, debug=None, pos=None):
self.name = "Graph"
self.G = nx.Graph(name=self.name)
self.draw_grey = False
self.debug = 0
self.pos = "kamada"
self.connections = []
self.nodes = []
self.start = None
self.end = None
self.num_ants = 0
self.max_moves = None
self.max_flow = None
self.ants = {}
self.antmoves = []
self.paths = []
self.nodes_colors = []
self.edges_colors = []
def add_room(self, line, start_end):
self.nodes.append(line)
n = line.split(' ')
if start_end == -1 and 'red' not in self.nodes_colors:
self.G.add_node(n[0], weight=2)
self.end = n[0]
self.nodes_colors.append('red')
elif start_end == 1 and 'green' not in self.nodes_colors:
self.G.add_node(n[0], weight=2)
self.start = n[0]
self.nodes_colors.append('green')
else:
self.G.add_node(n[0], weight=2)
self.nodes_colors.append('grey')
def add_edge(self, line):
self.connections.append(line)
n = line.split('-')
self.G.add_edge(n[0], n[1], capacity=1, weight=1)
self.edges_colors.append("grey")
def add_ant(self, line):
for move in line.split(" "):
a = move.split("-")
if a[0] not in self.ants:
self.ants[a[0]] = [a[1]]
else:
self.ants[a[0]].append(a[1])
def draw_graph(self):
draw_graph_nodes(self.G, self.paths, self.pos, col_path, self.draw_grey)
draw_graph_edges(self.G, self.paths, self.pos, col_path, self.draw_grey)
def read_input(self, argfile):
start_end = 0
lines = [line.rstrip("\n") for line in argfile]
num_lines = len(lines)
if self.debug >= 2:
print("num_lines: " + str(num_lines))
n = 0
tmp_list = []
for line in lines:
if line == "":
pass
elif n == 0 and line.isdigit():
self.num_ants = int(line)
print("num_ants: " + str(self.num_ants))
elif line[0] == '#':
if line == "##start":
start_end = 1
elif line == "##end":
start_end = -1
else:
start_end = 0
elif line.count(" ") == 2:
self.add_room(line, start_end)
elif "L" not in line and "-" in line:
self.add_edge(line)
elif "L" in line and "-" in line:
self.antmoves.append(line)
self.add_ant(line)
str_h = line.split(" ")
for s in str_h:
tmp_list.append(s.split("-")[1])
self.nodes_light.append(list(tmp_list))
tmp_list.clear()
n += 1
tmp = []
if (len(self.antmoves) > 0):
for move in self.antmoves[0].split(" "):
tmp.append(move.split("-")[0])
self.paths.append([self.start, self.end])
for ant in tmp:
self.paths.append(self.ants[ant][:-1])
self.pos = nx.kamada_kawai_layout(self.G)
loops = Lemin()
def animate(num_in):
if (num_in == 0):
return
num = num_in - 1
it_nodes = loops.nodes_light[num]
if (num != 0):
nx.draw_networkx_nodes(loops.G, loops.pos, loops.nodes_white,
node_color='white', node_size=5)
nx.draw_networkx_nodes(loops.G,loops.pos, it_nodes,
node_color='black', node_size=5)
loops.nodes_white.clear()
loops.nodes_white = it_nodes
def draw_graph_nodes(G, paths, pos, col_path, draw_grey):
n = 0
flag = False
for node in G.nodes:
if flag and draw_grey:
nx.draw_networkx_nodes(G, pos, nodelist=[node],
node_color='gray',
node_size=2)
if node == paths[0][0]:
nx.draw_networkx_nodes(G, pos, nodelist=[node],
node_color='green', node_size=50)
nx.draw_networkx_nodes(G, pos, nodelist=[node],
node_color='white', node_size=5)
elif node == paths[0][1]:
nx.draw_networkx_nodes(G, pos, nodelist=[node],
node_color='red', node_size=50)
nx.draw_networkx_nodes(G, pos, nodelist=[node],
node_color='white', node_size=5)
for i in range(1, len(paths)):
if node in paths[i]:
nx.draw_networkx_nodes(G, pos, nodelist=[node],
node_color=col_path[i + 1], node_size=20)
nx.draw_networkx_nodes(G, pos, nodelist=[node],
node_color='white', node_size=5)
flag = False
break
else:
flag = True
flag = False
n += 1
if n == len(G.nodes):
break
def draw_graph_edges(G, paths, pos, col_path, draw_grey):
e = 0
flag = False
for edge in G.edges:
for i in range(1, len(paths)):
if (
(edge[0] in paths[i] and edge[1] in paths[i])
or (edge[0] in paths[0] and edge[1] in paths[i])
or (edge[0] in paths[i] and edge[1] in paths[0])
):
nx.draw_networkx_edges(G, pos, edgelist=[edge],
edge_color=col_path[i+1])
flag = False
break
else:
flag = True
if flag and draw_grey:
nx.draw_networkx_edges(G, pos, edgelist=[edge],
edge_color='gray', alpha=0.5)
flag = False
e += 1
if e == len(G.edges):
break
def main():
plt.ion()
loops.read_input(sys.stdin)
fig = plt.figure()
loops.draw_graph()
frames = len(loops.nodes_light)
frames = len(loops.nodes_light)
anim = animation.FuncAnimation(fig, animate, frames=(frames + 1), interval=500)
anim.save('animation.gif')
if __name__ == '__main__':
main()
| [
"rmaple@oa-h5.msk.21-school.ru"
] | rmaple@oa-h5.msk.21-school.ru |
ba5e2c97e8f9ce3548daee04347b0d86a35a4d9e | 26d84a3b5716a9d27f3c14da36c07d8a5130790e | /condition_simulation.py | 794684ffebb0f1e083ef6938744c4b2b151ac339 | [] | no_license | allhailjustice/SynTEG | a217fded4582fa788086af928fa46dea1001a291 | 7a602f5629236773a95055d12e0761232cafc1a4 | refs/heads/master | 2023-08-23T01:30:33.661434 | 2021-10-25T14:38:13 | 2021-10-25T14:38:13 | 283,507,903 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,359 | py | import tensorflow as tf
import numpy as np
import time
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = "6"
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
tf.config.experimental.set_memory_growth = True
checkpoint_directory = "training_checkpoints_gan_new"
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
num_code = 1276
batchsize = 10000
max_num_visit = 200
Z_DIM = 128
G_DIMS = [256, 256, 512, 512, 512, 512, num_code]
D_DIMS = [256, 256, 256, 128, 128, 128]
class PointWiseLayer(tf.keras.layers.Layer):
def __init__(self, num_outputs):
super(PointWiseLayer, self).__init__()
self.num_outputs = num_outputs
def build(self, input_shape):
self.bias = self.add_variable("bias",
shape=[self.num_outputs],regularizer=tf.keras.regularizers.l2(1e-5))
def call(self, x, y):
return x * y + self.bias
class Generator(tf.keras.Model):
def __init__(self):
super(Generator, self).__init__()
self.dense_layers = [tf.keras.layers.Dense(dim,
kernel_regularizer=tf.keras.regularizers.l2(1e-5),
bias_regularizer=tf.keras.regularizers.l2(1e-5))
for dim in G_DIMS[:-1]]
self.batch_norm_layers = [tf.keras.layers.BatchNormalization(epsilon=1e-5,center=False, scale=False)
for _ in G_DIMS[:-1]]
self.output_layer = tf.keras.layers.Dense(G_DIMS[-1], activation=tf.nn.sigmoid,
kernel_regularizer=tf.keras.regularizers.l2(1e-5),
bias_regularizer=tf.keras.regularizers.l2(1e-5))
self.condition_layer = [tf.keras.layers.Dense(dim,
kernel_regularizer=tf.keras.regularizers.l2(1e-5),
bias_regularizer=tf.keras.regularizers.l2(1e-5))
for dim in G_DIMS[:-1]]
self.pointwiselayer = [PointWiseLayer(dim) for dim in G_DIMS[:-1]]
def call(self, x, condition, training):
for i in range(len(G_DIMS[:-1])):
h = self.dense_layers[i](x)
x = tf.nn.relu(self.pointwiselayer[i](self.batch_norm_layers[i](h, training=training), self.condition_layer[i](condition)))
x = self.output_layer(x)
return x
class Discriminator(tf.keras.Model):
def __init__(self):
super(Discriminator, self).__init__()
self.dense_layers = [tf.keras.layers.Dense(dim, activation=tf.nn.relu,
kernel_regularizer=tf.keras.regularizers.l2(1e-5),
bias_regularizer=tf.keras.regularizers.l2(1e-5))
for dim in D_DIMS]
self.layer_norm_layers = [tf.keras.layers.LayerNormalization(epsilon=1e-5,center=False, scale=False)
for _ in D_DIMS]
self.output_layer = tf.keras.layers.Dense(1, kernel_regularizer=tf.keras.regularizers.l2(1e-5),
bias_regularizer=tf.keras.regularizers.l2(1e-5))
self.condition_layer = [tf.keras.layers.Dense(dim,
kernel_regularizer=tf.keras.regularizers.l2(1e-5),
bias_regularizer=tf.keras.regularizers.l2(1e-5))
for dim in D_DIMS]
self.pointwiselayer = [PointWiseLayer(dim) for dim in D_DIMS]
def call(self, x, condition):
a = (2 * x) ** 15
sparsity = tf.reduce_sum(a / (a + 1), axis=-1, keepdims=True)
x = tf.concat((x, sparsity), axis=-1)
for i in range(len(D_DIMS)):
x = self.dense_layers[i](x)
x = self.pointwiselayer[i](self.layer_norm_layers[i](x), self.condition_layer[i](condition))
x = self.output_layer(x)
return x
def train():
feature_description = {
'word': tf.io.FixedLenFeature([36], tf.int64),
'condition': tf.io.FixedLenFeature([256], tf.float32)
}
def _parse_function(example_proto):
parsed = tf.io.parse_single_example(example_proto, feature_description)
return parsed['word'], parsed['condition']
dataset_train = tf.data.TFRecordDataset('condition_vector_2.tfrecord')
parsed_dataset_train = dataset_train.map(_parse_function, num_parallel_calls=4)
parsed_dataset_train = parsed_dataset_train.batch(batchsize, drop_remainder=True).prefetch(5)
generator_optimizer = tf.keras.optimizers.Adam(4e-6)
discriminator_optimizer = tf.keras.optimizers.Adam(2e-5)
generator = Generator()
discriminator = Discriminator()
checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer, generator=generator,
discriminator_optimizer=discriminator_optimizer, discriminator=discriminator)
# checkpoint.restore(checkpoint_prefix+'-13')
@tf.function
def d_step(word, condition):
real = word
z = tf.random.normal(shape=[batchsize, Z_DIM])
epsilon = tf.random.uniform(
shape=[batchsize, 1],
minval=0.,
maxval=1.)
with tf.GradientTape() as disc_tape:
synthetic = generator(z, condition, False)
interpolate = real + epsilon * (synthetic - real)
real_output = discriminator(real, condition)
fake_output = discriminator(synthetic, condition)
w_distance = (-tf.reduce_mean(real_output) + tf.reduce_mean(fake_output))
with tf.GradientTape() as t:
t.watch([interpolate, condition])
interpolate_output = discriminator(interpolate, condition)
w_grad = t.gradient(interpolate_output, [interpolate, condition])
slopes = tf.sqrt(tf.reduce_sum(tf.square(w_grad[0]), 1)+tf.reduce_sum(tf.square(w_grad[1]), 1))
gradient_penalty = tf.reduce_mean((slopes - 1.) ** 2)
reg_loss = tf.reduce_sum(discriminator.losses)
disc_loss = 10 * gradient_penalty + w_distance + reg_loss
gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))
return disc_loss, w_distance, reg_loss
@tf.function
def g_step(condition):
z = tf.random.normal(shape=[batchsize, Z_DIM])
with tf.GradientTape() as gen_tape:
synthetic = generator(z, condition, True)
fake_output = discriminator(synthetic, condition)
gen_loss = -tf.reduce_mean(fake_output) + tf.reduce_sum(generator.losses)
gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
@tf.function
def train_step(batch):
word, condition = batch
word = tf.reduce_sum(tf.one_hot(word, depth=num_code, dtype=tf.float32), axis=-2)
disc_loss, w_distance, reg_loss = d_step(word, condition)
g_step(condition)
return disc_loss, w_distance, reg_loss
print('training start')
for epoch in range(2000):
start_time = time.time()
total_loss = 0.0
total_w = 0.0
total_reg = 0.0
step = 0.0
for args in parsed_dataset_train:
loss, w, reg = train_step(args)
total_loss += loss
total_w += w
total_reg += reg
step += 1
duration_epoch = time.time() - start_time
format_str = 'epoch: %d, loss = %f, w = %f, reg = %f (%.2f)'
print(format_str % (epoch, -total_loss / step, -total_w / step, total_reg / step, duration_epoch))
if epoch % 50 == 49:
checkpoint.save(file_prefix=checkpoint_prefix)
if __name__ == '__main__':
train()
| [
"noreply@github.com"
] | noreply@github.com |
a799c0a8fffef5b1328d74921d60a0fb43b2e3fc | 8de4c1a7ab9d09cc9a67f72fe5c492f3c5173ee7 | /nfldb/te_stats.py | 8118ce8341224fa78907dd7e86b39ae5d3af11d9 | [] | no_license | a-wang24/nfldb_ml | a200ae566fc8d1f7790e89b83a3a3d85476f84a2 | f088f78533fa98191d45c7bff01ded1c25fb030d | refs/heads/master | 2022-01-19T20:52:57.345881 | 2017-06-06T02:18:17 | 2017-06-06T02:18:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,424 | py | # -*- coding: utf-8 -*-
"""
Created on Sat May 13 22:13:15 2017
@author: Alan
Write TE stats to .csv
"""
import nfldb
import csv
import individual_stats as indst
import os
years = [2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016]
position = 'TE'
limit = 60
sort_by = ['receiving_yds', 'receiving_tar', 'receiving_tds', 'receiving_yac_yds']
ppr_rules = [0, 0.25, 0.5, 1]
for year in years:
for sort in sort_by:
for ppr in ppr_rules:
data = []
db = nfldb.connect()
q = nfldb.Query(db)
basepath = os.path.dirname(__file__)
filepath = os.path.abspath(os.path.join(basepath, "..", "output/te/top"+
str(limit)+'_'+str(position)+'_'+str(year)+
'_'+str(ppr)+'ppr_by'+
str(sort)+'.csv'))
with open(filepath, 'wb') as csvfile:
c = csv.writer(csvfile)
c.writerow(['Player', 'Rec Yds', 'Rec Tar', 'Rec Tds', 'Rec YAC',
'Rush Yds', 'Rush Att', 'Rush Tds', 'Pass Yds', 'Pass Tds',
'Pass Att', 'Fumbles', 'Interceptions',
'Total Fantasy Pts', 'Team'])
q.game(season_year = year, season_type = 'Regular')
q.player(position = position)
# pp is play_player class
# sort is default descending order
for pp in q.sort(sort).limit(limit).as_aggregate():
tfp = indst.calc_fp(pp, ppr)
data.append([pp.player, pp.receiving_yds, pp.receiving_tar, pp.receiving_tds,
pp.receiving_yac_yds, pp.rushing_yds, pp.rushing_att, pp.rushing_tds,
pp.passing_yds, pp. passing_tds, pp.passing_att,
pp.fumbles_tot, pp.passing_int, tfp])
#q.sort(sort).limit(limit)
for entry in data:
pp_list = indst.player_team(year, position, sort, limit, entry[0].full_name)
if len(pp_list) == 0:
team = ''
else:
team = pp_list[0].team
entry.append(team)
c.writerow(entry)
csvfile.close()
| [
"12wanga@gmail.com"
] | 12wanga@gmail.com |
298572da371a3f2684d72a4d215ccacc3301fc9d | 77176b06a717c65e311f4ec70722d5068851234b | /Windows/prepareDatabase/data_webscrapper.py | 52cf96b3c4a0a273eba703a8577768bfeac70728 | [] | no_license | miloszg/medBase | 3d4484192d1f34c500d175182617f7d90debf5ae | 6f804b034b59068b26a59a1652d468eafdb54f3a | refs/heads/master | 2023-06-09T11:42:40.599929 | 2019-12-08T23:18:09 | 2019-12-08T23:18:09 | 190,621,248 | 0 | 0 | null | 2023-05-30T19:15:06 | 2019-06-06T17:20:59 | Java | UTF-8 | Python | false | false | 2,979 | py | from bs4 import BeautifulSoup
from urllib.request import urlopen
import csv
class Drug:
def __init__(self, _name, _composition, _form, _category, _speciality, _effect, _info="", _dosage=""):
self.name = _name
self.composition = _composition
self.form = _form
self.category = _category
self.speciality = _speciality
self.effect = _effect
self.info = _info
self.dosage = _dosage
def __str__(self):
return "{},{},{},{},{},{},{},{}".format(self.name, self.composition, self.form, self.category,
self.speciality, self.effect, self.info, self.dosage)
def main():
header = ["Name", "Composition", "Form", "Category", "Speciality", "Effect", "Info", "Dosage"]
with open("data.csv", 'w') as file:
writer = csv.writer(file)
writer.writerow(header)
file.close()
for i in range(2, 207, 10):
url_address = "https://www.doz.pl/leki/w_{}-wszystkie".format(i)
print(url_address)
html = getHtml(url_address)
webscrapp_data_to_file(html)
print("Zapis zakonczony sukcesem!")
def getHtml(url_address):
return urlopen(url_address)
def create_drug_list(drug_url_list):
return [get_drug_info(url) for url in drug_url_list]
def webscrapp_data_to_file(html):
drugs_url_list = get_drugs_url(html)
drug_list = create_drug_list(drugs_url_list)
try:
with open("data.csv", 'a', newline='') as file:
writer = csv.writer(file)
for drug in drug_list:
writer.writerow(drug)
file.close()
except IOError as e:
print("Couldn't open or write to file (%s)." % e)
def get_drugs_url(html):
soup = BeautifulSoup(html, 'html.parser')
en_items = soup.findAll("div", {"class": "encyclopediaItem col-lg-12 col-sm-12"}, limit=10)
en_items_list = [("https://www.doz.pl" + str(el.find("a")['href'])) for el in en_items]
return en_items_list
def get_drug_info(url):
soup = BeautifulSoup(getHtml(url), 'html.parser')
_name = ""
_composition = ""
_form = ""
_category = ""
_speciality = ""
_effect = ""
tr = soup.findAll("tr")
try:
_name = soup.findAll('header')[1].find('h1').contents[0].strip()
_composition = [a.contents[0] for a in tr[1].findAll('a') if len(a.contents) > 0]
_form = [str(t).strip() for i, t in enumerate(tr[2].findAll('td')[1].contents) if i % 2 == 0]
_category = [a.contents[0] for a in tr[3].findAll('a') if len(a.contents) > 0]
_speciality = [a.contents[0] for a in tr[4].findAll('a') if len(a.contents) > 0]
_effect = [a.contents[0] for a in tr[5].findAll('a') if len(a.contents) > 0]
except IndexError as e:
print(str(e))
return [_name, _composition, _form, _category, _speciality, _effect]
def test():
print("Hello world")
if __name__ == '__main__':
main()
# test()
| [
"[pawel.kankowski@gmail.com]"
] | [pawel.kankowski@gmail.com] |
cfa28847849eeeee9efc5aceb251c642fa49d9bf | 1db51ef25d4ededb70d83acfb5da6060aac728de | /mqtt2redis.py | 2de955b3ffcc25e01f0610874a960637e0526380 | [] | no_license | wowo/pihome-api | 8268d64d83a9d11bf0443c9a183b3fb530717069 | a28f19eeef0df0bee00e5aa7c7bdb7936b669b7e | refs/heads/master | 2023-08-03T06:43:51.154674 | 2023-07-23T19:25:37 | 2023-07-23T19:25:37 | 25,784,297 | 2 | 0 | null | 2023-03-31T14:40:16 | 2014-10-26T17:57:48 | Python | UTF-8 | Python | false | false | 947 | py | #!/usr/bin/python
from datetime import datetime
import json
import paho.mqtt.subscribe as subscribe
import redis
import sys
import time
sensor_key = 'temperature' if len(sys.argv) == 3 else sys.argv[3]
print('Starting topic %s key %s, parameter: %s' % (sys.argv[1], sys.argv[2], sensor_key))
redis = redis.StrictRedis(host='localhost', port=6379, db=0)
def on_msg(client, userdata, message):
print('message received %s, searching for %s' % (str(message.payload.decode('utf-8')), sensor_key))
data = json.loads(message.payload.decode('utf-8'))
data['value'] = data[sensor_key]
data['when'] = str(datetime.now())
redis.lpush(sys.argv[2], json.dumps(data))
redis.ltrim(sys.argv[2], 0, 50)
subscribe.callback(on_msg, sys.argv[1], hostname='localhost')
# $1 - topic name
# $2 - redis key name
#echo '$0 fetching from MQTT topic $1 and saving to Redis list $2'
#mosquitto_sub -t $1 -F %I,%p | parallel redis-cli lpush $2
| [
"wojciech@sznapka.pl"
] | wojciech@sznapka.pl |
8c205a7d4003fa5e71c1b5a52726c951d55b0033 | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/graph/viewer/layout/GridLocationMap.pyi | f3f8d6ccb23e3486a6d68662f4b630f36b2434c5 | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,437 | pyi | from typing import List
import ghidra.graph.viewer.layout
import java.lang
class GridLocationMap(object):
"""
An object that maps vertices to rows and columns and edges to their articulation points.
This class is essentially a container that allows layout algorithms to store results, which
can later be turned into layout positioning points. The integer point values in this
class are row, column grid values, starting at 0,0.
Note: the Point2D values for the edge articulations use x,y values that are row and
column index values, the same values as calling #row(Object) and #col(Object).
After building the grid using this class, clients can call #rows() to get
high-order object that represent rows.
"""
def __init__(self): ...
def centerRows(self) -> None:
"""
Updates each row within the grid such that it's x values are set to center the row in
the grid. Each row will be updated so that all its columns start at zero. After that,
each column will be centered in the grid.
"""
...
@overload
def col(self, __a0: object) -> int: ...
@overload
def col(self, __a0: object, __a1: int) -> None: ...
def dispose(self) -> None: ...
def equals(self, __a0: object) -> bool: ...
def getArticulations(self, __a0: object) -> List[object]: ...
def getClass(self) -> java.lang.Class: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
@overload
def row(self, __a0: object) -> int: ...
@overload
def row(self, __a0: object, __a1: int) -> None: ...
def rows(self) -> List[ghidra.graph.viewer.layout.Row]:
"""
Returns the rows in this grid, sorted by index (index can be negative)
@return the rows in this grid
"""
...
def set(self, __a0: object, __a1: int, __a2: int) -> None: ...
def setArticulations(self, __a0: object, __a1: List[object]) -> None: ...
def toString(self) -> unicode: ...
def toStringGrid(self) -> unicode:
"""
Creates a string representation of this grid
@return a string representation of this grid
"""
...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
| [
"tsunekou1019@gmail.com"
] | tsunekou1019@gmail.com |
2d6d77393f75ae9de52060b26dbf15df2ef7702c | 1f979b6bb1d8b927e02b033077b7ac41f38c5dab | /bin/macho_find | 12d4b4713fd66f23976fd566543d287ccf74295d | [] | no_license | echo-wen/Pig-Meat-Sichuan | 829c8109386d291782a679e73853313acda60497 | cbf6166a399877e7a5bd9fa66b3de0c94d50cbab | refs/heads/master | 2022-11-29T15:40:07.354152 | 2020-08-11T02:25:00 | 2020-08-11T02:25:00 | 285,754,591 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | #!/Users/wenzicheng/PycharmProjects/pig_meat/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from macholib.macho_find import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"wen_zicheng01@gmail.com"
] | wen_zicheng01@gmail.com | |
5c08cc84bffdaff5f05d651e56959922d36161e0 | 05ae8138b976c372d70a5b41a317b32dfaa20883 | /Core_Python/Python Functions/Default Parameter Value.py | c18f8233110b1576a28fcc137df78fd3f7c1f4d7 | [] | no_license | infamousdarius/Python-Practice | 06a16acbf3a986661428999cf9e6f612abaea33f | e11db25fd5d0e29ebb7665c28044739fcfc02589 | refs/heads/master | 2020-06-21T09:34:29.141868 | 2019-07-19T14:37:16 | 2019-07-19T14:37:16 | 197,409,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | # The following example shows how to use a default parameter value.
#
# If we call the function without parameter, it uses the default value:
def my_function(country="Norway"):
print("I am from " + country)
my_function("Sweden")
my_function("India")
my_function()
my_function("Brazil")
| [
"mrifat@tistatech.com"
] | mrifat@tistatech.com |
da382c1c7477d4ac48f29194bfba0eeb3520846a | 29c7cc5ad02157643a77c71922fb938ee381e125 | /venv/Scripts/django-admin.py | c0da0efeb1e330a4c80878ac561d84dcff9fed75 | [] | no_license | imklesley/PollsDjangoProject | 03c0f498f4b2bc2eb52ebecfd72c4a0e3742c198 | f3e42fcedfd58552bbf10a5f75c7b9f8eb85d7fe | refs/heads/main | 2023-01-24T14:51:52.237202 | 2020-11-23T04:39:58 | 2020-11-23T04:39:58 | 315,201,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | #!C:\Users\imkle\PycharmProjects\PollDjangoCrashCourse\venv\Scripts\python.exe
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"imklesley@gmail.com"
] | imklesley@gmail.com |
324b8d7bdf5c6beb4960c52815263ed0c2e05028 | 2b7046b71f27831a13f9039cc2d2cbe4439732e1 | /routes.py | 258b981e06f8fd8534d9a99c4716af2a6a94998e | [] | no_license | mozillalives/thaiorders | 0fe9ac1274d9bfb3a865a39c23c61239f23e23d3 | 3e78f933604fdff3b42e5360ba85165cc23ef02c | refs/heads/master | 2020-04-06T04:31:14.543847 | 2012-05-19T03:12:19 | 2012-05-19T03:12:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 948 | py | """
Using redirect route instead of simple routes since it supports strict_slash
Simple route: http://webapp-improved.appspot.com/guide/routing.html#simple-routes
RedirectRoute: http://webapp-improved.appspot.com/api/webapp2_extras/routes.html#webapp2_extras.routes.RedirectRoute
"""
from webapp2_extras.routes import RedirectRoute
from web.handlers import MainPage, OrderPage, CancelItemPage, LogoutHandler, GoogleLoginHandler
_routes = [
RedirectRoute('/login/', GoogleLoginHandler, name='login', strict_slash=True),
RedirectRoute('/logout/', LogoutHandler, name='logout', strict_slash=True),
RedirectRoute('/cancel', CancelItemPage, name='cancel', strict_slash=True),
RedirectRoute('/order/<order_key:.+>', OrderPage, name='order', strict_slash=True),
RedirectRoute('/', MainPage, name='home', strict_slash=True)
]
def get_routes():
return _routes
def add_routes(app):
for r in _routes:
app.router.add(r)
| [
"mozillalives@gmail.com"
] | mozillalives@gmail.com |
7b7b41ce739043d0a9f9a5c24d99b345beebe06e | 219ce374afe533c530eb10856e9e45c924df0ed2 | /src/applicationlayer/management/permission/views.py | 1f1496679afa993939ae184a57554b3fc2a67188 | [] | no_license | gladysforte/neko-api | dc60b95e9f2771f3d55fb58d81a9ed934d69b093 | 0168533623ccbe89bad8763e2ac88febc1a8fa05 | refs/heads/master | 2022-12-05T14:47:02.570100 | 2020-08-07T03:16:42 | 2020-08-07T03:16:42 | 285,589,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | from rest_framework import viewsets, permissions
from src.applicationlayer.management.permission.serializers import PermissionSerializer
from src.entities import models
# Create your views here.
class PermissionViewSet(viewsets.ModelViewSet):
permission_classes = [permissions.AllowAny]
serializer_class = PermissionSerializer
queryset = models.Permission.objects.all()
| [
"gladys@tirsolutions.com"
] | gladys@tirsolutions.com |
053b4ae6bb4f8ac87dc20403abc872c543784cd4 | 1e65ca80032b1b5a4ab3631044c3d41a9f3dd035 | /01_Jump_to_Python/Chapter03/rhombus_v1.py | 2ef012fb11668bdbb5d71475d648db0bd3ffd592 | [] | no_license | bj730612/Bigdata | cdd398c56023c67a2e56c36151e9f2bca067a40a | 9bb38e30bb3728b4a4e75bc763fa858029414d4e | refs/heads/master | 2020-03-15T09:27:23.995217 | 2018-10-02T00:07:38 | 2018-10-02T00:07:38 | 132,075,198 | 0 | 0 | null | null | null | null | UHC | Python | false | false | 473 | py | #coding: cp949
while True:
i = 0
base=(int)(input("홀수를 입력하세요.(0 <- 종료): "))
num=(base+1)/2
if base%2 == 1:
while True:
if num > i:
print(" "*(int)(num-1-i),end="")
print("*"*(int)((2*i)+1))
i+=1
if num <= i:
break
elif base == 0:
break
else:
print("짝수를 입력하셨습니다. 다시 입력하세요")
| [
"USER@test.com"
] | USER@test.com |
2f6fdb8b9c6bb35b20a53804169c36281fef88f5 | e39244b8dd2d8662098fdeed2e7850fa18d5d063 | /main.py | cbbe5e9bbcf28b68ab9a1e702089031fe6d5a5fc | [] | no_license | hawa0011/Rock-Pappers-Scissors | e23b476e3d8d36d857e3eddaa69d94400ade4f55 | 68999aa628d97a7ab12e017be661f0a7135ab63c | refs/heads/main | 2023-01-18T16:52:00.195354 | 2020-11-10T06:55:35 | 2020-11-10T06:55:35 | 311,571,731 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,065 | py | import random
rock = '''
_______
---' ____)
(_____)
(_____)
(____)
---.__(___)
'''
paper = '''
_______
---' ____)____
______)
_______)
_______)
---.__________)
'''
scissors = '''
_______
---' ____)____
______)
__________)
(____)
---.__(___)
'''
moves=[rock,paper,scissors]
user_choice=int(input("Welcome to RPS, choose 0 Rock ,1 Paper, 2 Scissors \n"))
computer_choice=random.randint(0,2)
print("You chose",user_choice,":" ,moves[user_choice])
print("Computer chose ",computer_choice,":")
print(moves[computer_choice])
if user_choice == computer_choice:
print("It's a Draw!")
if user_choice ==0 and computer_choice ==1:
print("Computer wins")
if user_choice ==0 and computer_choice==2:
print("You win!")
if user_choice ==1 and computer_choice ==0:
print("You win!")
if user_choice ==1 and computer_choice ==2:
print("Computer wins!")
if user_choice==2 and computer_choice==1:
print("You win!")
elif user_choice >2:
print("Wrong choice, you lose")
| [
"noreply@github.com"
] | noreply@github.com |
23edf3fbfe1db234c24b79cd8c272514d314f1c4 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /m42wuSJtbRYC2HmRP_1.py | f66939f77888e2de21eaa38b6d7b1f8e7e9981c8 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py |
from math import*
largest_exponential=lambda l:l.index(max(l,key=lambda x:x[1]*log(x[0])))+1
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
3def27b583f63a74db5c137ef944ba9fda166b39 | c64fceda8e55aff23f7eff27417c4d494b8792ec | /mypy-stubs/typeguard.pyi | 970ffe639a8f3e91d579b0bb7fda681c037f6c64 | [
"Apache-2.0"
] | permissive | AndrewLitteken/parsl | 946efd9d22c554da409a0f8248b17718339b727c | ec6c6a2a7fbae566006781338226fc13bc0d21ca | refs/heads/master | 2020-04-20T22:24:38.307883 | 2019-07-19T21:33:06 | 2019-07-19T21:33:06 | 169,138,369 | 2 | 2 | Apache-2.0 | 2019-04-28T22:13:49 | 2019-02-04T19:48:27 | Python | UTF-8 | Python | false | false | 269 | pyi |
# this type for runtime_validation is based on the
# "Decorators that do not change the signature of the function" section of
# https://github.com/python/mypy/issues/3157
from typing import TypeVar, List
Func = TypeVar('Func')
def typechecked(f: Func) -> Func: ...
| [
"noreply@github.com"
] | noreply@github.com |
438266feb6b85e6d9b6af66a9eb0986baff8c2c2 | d9821f540e10bd785e1f9959909958555ad2dfe2 | /SOM.py | 3eb1482082f1ca0dd7ba4b42eec89bc632347b46 | [] | no_license | tanjiawei23/EE7207 | f931425d11e4ea0267f3b7158788c487137fe626 | 97bec30d68539fb85ad12fc11f58fb7e7c3cec7f | refs/heads/main | 2023-08-14T14:45:42.707545 | 2021-09-30T19:54:37 | 2021-09-30T19:54:37 | 412,206,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,952 | py | import numpy as np
import pylab as pl
# 神经网络
class SOM(object):
def __init__(self, X, output, iteration, batch_size):
"""
:param X: 形状是N*D,输入样本有N个,每个D维
:param output: (n,m)一个元组,为输出层的形状是一个n*m的二维矩阵
:param iteration:迭代次数
:param batch_size:每次迭代时的样本数量
初始化一个权值矩阵,形状为D*(n*m),即有n*m权值向量,每个D维
"""
self.X = X
self.output = output
self.iteration = iteration
self.batch_size = batch_size
self.W = np.random.rand(X.shape[1], output[0] * output[1])
print(self.W.shape)
print("权值矩阵:", self.W)
def GetN(self, t):
"""
:param t:时间t, 这里用迭代次数来表示时间
:return: 返回一个整数,表示拓扑距离,时间越大,拓扑邻域越小
"""
a = min(self.output)
return int(a - float(a) * t / self.iteration)
# 求学习率
def Geteta(self, t, n):
"""
:param t: 时间t, 这里用迭代次数来表示时间
:param n: 拓扑距离
:return: 返回学习率,
"""
return np.power(np.e, -n) / (t + 2)
# 更新权值矩阵
def updata_W(self, X, t, winner):
N = self.GetN(t) # 表示随时间变化的拓扑距离
for x, i in enumerate(winner):
to_update = self.getneighbor(i[0], N)
for j in range(N + 1):
e = self.Geteta(t, j) # 表示学习率
for w in to_update[j]:
self.W[:, w] = np.add(self.W[:, w], e * (X[x, :] - self.W[:, w]))
def getneighbor(self, index, N):
"""
:param index:获胜神经元的下标
:param N: 邻域半径
:return ans: 返回一个集合列表,分别是不同邻域半径内需要更新的神经元坐标
"""
a, b = self.output
length = a * b
def distence(index1, index2):
i1_a, i1_b = index1 // a, index1 % b # //:向下取整; %:返回除法的余数;
i2_a, i2_b = index2 // a, index2 % b
return np.abs(i1_a - i2_a), np.abs(i1_b - i2_b) # abs() 函数返回数字的绝对值。
ans = [set() for i in range(N + 1)]
for i in range(length):
dist_a, dist_b = distence(i, index)
if dist_a <= N and dist_b <= N: ans[max(dist_a, dist_b)].add(i)
return ans
def train(self):
"""
train_Y:训练样本与形状为batch_size*(n*m)
winner:一个一维向量,batch_size个获胜神经元的下标
:return:返回值是调整后的W
"""
count = 0
while self.iteration > count:
train_X = self.X[np.random.choice(self.X.shape[0], self.batch_size)]
normal_W(self.W)
normal_X(train_X)
train_Y = train_X.dot(self.W)
winner = np.argmax(train_Y, axis=1).tolist()
self.updata_W(train_X, count, winner)
count += 1
return self.W
def train_result(self):
normal_X(self.X)
train_Y = self.X.dot(self.W)
winner = np.argmax(train_Y, axis=1).tolist()
print(winner)
return winner
def normal_X(X):
"""
:param X:二维矩阵,N*D,N个D维的数据
:return: 将X归一化的结果
"""
N, D = X.shape
for i in range(N):
temp = np.sum(np.multiply(X[i], X[i]))
X[i] /= np.sqrt(temp)
return X
def normal_W(W):
"""
:param W:二维矩阵,D*(n*m),D个n*m维的数据
:return: 将W归一化的结果
"""
for i in range(W.shape[1]):
temp = np.sum(np.multiply(W[:, i], W[:, i]))
W[:, i] /= np.sqrt(temp)
return W
| [
"noreply@github.com"
] | noreply@github.com |
099ec43119f2ae5e0635eb44a1dbff6c88d3ed20 | 4730749ce5f0f4f652b688c7594badc1c357f1d6 | /LV.1/핸드폰 번호 가리기.py | 4553fb1630829f20ac62717e105e0e247257c327 | [] | no_license | RobertHan96/programmers_algorithm | 4de015278d7242ee79cd33047a6975a9c9d63c92 | 776777e14e33ca99571296defd28d145d6366bef | refs/heads/master | 2022-04-26T22:46:54.363542 | 2022-04-12T13:53:46 | 2022-04-12T13:53:46 | 233,599,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 574 | py | # 프로그래머스 모바일은 개인정보 보호를 위해 고지서를 보낼 때 고객들의 전화번호의 일부를 가립니다.
# 전화번호가 문자열 phone_number로 주어졌을 때,
# 전화번호의 뒷 4자리를 제외한 나머지 숫자를 전부 *으로 가린 문자열을 리턴하는 함수, solution을 완성해주세요.
# s는 길이 4 이상, 20이하인 문자열입니다.
def solution(phone_number):
numbers = list(phone_number)
return (len(numbers)-4) * "*" + ''.join(numbers[len(numbers)-4:])
print(solution('027778888'))
| [
"yshan4329@gmail.com"
] | yshan4329@gmail.com |
1588a9be00f49bc54ab4f1f48fbf7fb544f9f999 | 5cb38042bdf1126609c1ad0a19edad47e3bdbc55 | /ProblemSet/ProblemSet4/getWordScore.py | ce8ff674b7494112773b10e64cce222e710e313d | [
"Giftware"
] | permissive | y0m0/MIT.6.00.1x | 0e14c05ed0ffefefce49d504de853d0db39ad02d | c10fa1615879f35f822d73d6fa209566ce66b47f | refs/heads/master | 2021-01-01T05:25:25.403150 | 2020-12-05T14:05:06 | 2020-12-05T14:05:06 | 57,986,412 | 180 | 95 | null | 2022-08-08T12:07:09 | 2016-05-03T17:03:36 | Python | UTF-8 | Python | false | false | 827 | py | def getWordScore(word, n):
"""
Returns the score for a word. Assumes the word is a valid word.
The score for a word is the sum of the points for letters in the
word, multiplied by the length of the word, PLUS 50 points if all n
letters are used on the first turn.
Letters are scored as in Scrabble; A is worth 1, B is worth 3, C is
worth 3, D is worth 2, E is worth 1, and so on (see SCRABBLE_LETTER_VALUES)
word: string (lowercase letters)
n: integer (HAND_SIZE; i.e., hand size required for additional points)
returns: int >= 0
"""
score = 0
for letters in word:
if letters in SCRABBLE_LETTER_VALUES:
score += SCRABBLE_LETTER_VALUES[letters]
if len(word) == n:
return (score * len(word)) + 50
else:
return score * len(word)
| [
"lixi.daniele@gmail.com"
] | lixi.daniele@gmail.com |
2550065b7ba0f3c632729aed298ab54bc4201d84 | 9c40a40466ab5d29bdb42365f1f8531e5756154f | /MSM_test_fitkbar.py | fdb4b98006a2a3715f3c8fb707a8270b666db60d | [] | no_license | goldstar111/MSM_Thanasarn | 996bbd1d79bc0a84c4e2b45ad0965b64daf901c5 | 6b29e7a31d5ea278dad563cf3fd5428e662aa035 | refs/heads/master | 2021-09-14T14:05:15.710998 | 2018-05-14T21:35:43 | 2018-05-14T21:35:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,010 | py | import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from pandas import DataFrame as df
from MSM_util import *
from lmfit import minimize, Minimizer, Parameters, Parameter, report_fit
import sys, scipy, numpy
import statsmodels
# print(scipy.__version__, numpy.__version__, sys.version_info)
# Hyper parameters
# How much period taken into calculation and prediction
m = 20
# number of days prediction
RVn = 22
# kbar
kbar = 3
vary = "k"
kbar_start = 3
kbar_max = 4
xls_data = pd.read_excel('data_GVZ_GLD.xlsx')
date_GLD = xls_data.iloc[:,3]
GLD = xls_data.loc[:,'GLD']
GLD2 = GLD.shift(1)
GLD2[0] = GLD[0]
GLD_r = GLD/GLD2
GLD_d = GLD_r - np.mean(GLD_r)
plt.plot(date_GLD,GLD)
plt.title('de-mean data')
plt.plot(date_GLD,200*GLD_r)
data = GLD_d[::-1]
startingvals = []#[1.2, 1.2, 0.5 , 1.2 ]
LB = [1, 1, 0.001, 0.0001]
UB = [1.99, 50, 0.99999, 5]
op_methods = 'slsqp'
msm_vary_k_cal(data, vary, kbar_start, kbar_max, LB, UB, op_methods, startingvals, m, RVn)
| [
"lawofearth@gmail.com"
] | lawofearth@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.