blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b3df555d0e491ab0d8a6876dfbca0bd6868a7d90 | 8656da2c1ad850de4f8c9a310c0cbd187111e61a | /Solutions/Set-01/TestChallenges.py | 0bba34cdbe4d5df8c151edba09934e221f1fd9ca | [] | no_license | zeecnla/CryptopalsCryptoChallenges | d3ad7c72c01bb05d4e891a5c54e547645147c3a4 | 46d57dd0f8770880611dd80b218c13e1e965fef0 | refs/heads/master | 2021-09-09T20:32:32.591638 | 2018-03-19T15:22:48 | 2018-03-19T15:22:48 | 107,307,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,215 | py | import Challenge01
import Challenge02
import Challenge03
import Challenge04
import Challenge05
import unittest
# Will begin to review the challenges to finally complete them
class TestChallengesSet1(unittest.TestCase):
def test_challenge_1(self):
answer = "SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc29ub3VzIG11c2hyb29t"
input_ = "49276d206b696c6c696e6720796f757220627261696e206c696b65206120706f69736f6e6f7573206d757368726f6f6d"
output = Challenge01.HexToBase64(input_)
solution = output.decode("utf-8")
self.assertEqual(solution,answer)
def test_challenge_2(self):
firstString = "1c0111001f010100061a024b53535009181c"
secondString = "686974207468652062756c6c277320657965"
answer = "746865206b696420646f6e277420706c6179"
output = Challenge02.XOROperation(firstString, secondString)
self.assertEqual(output,answer)
def test_challenge_3(self):
encoded_hex_string = "1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736"
output = Challenge03.xorCipher(encoded_hex_string)
answer = "Cooking MC's like a pound of bacon"
self.assertEqual(output,answer)
def test_challenge_4(self):
file_path = "/Users/Cesar-Melchor/Documents/Github/CryptopalsCryptoChallenges/Solutions/Set-01/assets/strings.txt"
#file_path = "C:\\Users\\Admin\Documents\\GitHub\\CryptopalsCryptoChallenges\\Solutions\\Set-01\\assets\\strings.txt"
output = Challenge04.detectSingleCharacterXOR(file_path)
answer = "Now that the party is jumping\n"
self.assertEqual(output,answer)
def test_challenge_5(self):
key = 'ICE'
phrase = '''Burning 'em, if you ain't quick and nimble
I go crazy when I hear a cymbal'''
answer = '''0b3637272a2b2e63622c2e69692a23693a2a3c6324202d623d63343c2a26226324272765272
a282b2f20430a652e2c652a3124333a653e2b2027630c692b20283165286326302e27282f'''
output = Challenge05.RepeatingXOR(phrase,key)
self.assertEqual(output,answer)
'''
def test_challenge_6(self):'''
if __name__ == '__main__':
unittest.main()
| [
"csrmelchor@gmail.com"
] | csrmelchor@gmail.com |
31d7b4e178e802fb9796fbd72f7a12870fc5c5d1 | 93cc63c3bb664346a63736d30c71f7ef1f37849e | /client_app/management/commands/create_client.py | 1e7b16a4382aaac3b582813724fa5c96d649058c | [] | no_license | bikashsaud/client | 84a6baf9a220f1bfe72c289620fae9ac3d84ffba | 9123a620a22775ddf8e4db3179e967d03e37c5af | refs/heads/master | 2023-01-24T01:24:24.530842 | 2020-12-03T16:57:43 | 2020-12-03T16:57:43 | 318,256,131 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 504 | py | from client_app.models import Client
from django.core.management.base import BaseCommand
from django.utils.crypto import get_random_string
from django_seed import Seed
seeder = Seed.seeder()
class Command(BaseCommand):
help = 'create 10 clients'
def handle(self, *args, **kwargs):
Client.objects.all().delete()
total_client = 10
for i in range(total_client):
Client.objects.create(client_name=get_random_string(), client_email=seeder.faker.email())
| [
"saudbikash514@gmail.com"
] | saudbikash514@gmail.com |
362467cd5e32cd4dcb90e29eaca44d0b17706341 | 3b030444b2d1d9d57197ccba41387b447114b210 | /config.py | fb716512cf2763712c551c3b4015a4743de47d8e | [] | no_license | popfido/PairCNN-Ranking | ec85e45ef54f05a6b1778297cd316b2fa8a23a90 | b29bbe774888e154a8bad5dafa67ec24aba33256 | refs/heads/master | 2020-03-09T10:21:58.509310 | 2018-04-09T09:27:02 | 2018-04-09T09:27:02 | 128,735,443 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,261 | py | # coding:utf8
import warnings
class DefaultConfig(object):
env = 'default' # visdom 环境
model = 'PairCNN' # 使用的模型,名字必须与models/__init__.py中的名字一致
train_dir = './'
train_data_root = './data/train/' # 训练集存放路径
validate_data_root = './data/validate' # 验证集存放路径
test_data_root = './data/test/' # 测试集存放路径
load_model_path = None # 加载预训练的模型的路径,为None代表不加载
dev_ratio = 0.1 # Ratio of dev/validation data picked from training set
batch_size = 128 # batch size
use_gpu = False # user GPU or not
num_workers = 4 # how many workers for loading data
print_freq = 20 # print info every N batch
eval_freq = 100 # Evaluate model on dev set after this many steps (default: 100)
checkpoint_freq = 100 # Save model after this many steps (default: 100)
debug_file = '/tmp/debug' # if os.path.exists(debug_file): enter ipdb
result_file = 'result.csv'
seed = 233 # Random seed (default: 233)
max_epoch = 20
lr = 0.1 # initial learning rate
lr_decay = 0.95 # when val_loss increase, lr = lr*lr_decay
embedding_dim = 64 # Dimensionality of character embedding (default: 64)
filter_sizes = "2,3" # Comma-separated filter sizes (default: '2,3')
num_filters = 64 # Number of filters per filter size (default: 64)
num_hidden = 100 # Number of hidden layer units (default: 100)
dropout_keep_prob = 0.5 # Dropout keep probability (default: 0.5)
max_len_left = 10 # max document length of left input
max_len_right = 10 # max document length of right input
weight_decay = 1e-4 # l2_regularization
vocab_size = 300000 # Most number of words in vocab (default: 300000)
def parse(self, kwargs):
"""
根据字典kwargs 更新 config参数
"""
for k, v in kwargs.items():
if not hasattr(self, k):
warnings.warn("Warning: opt has not attribut %s" % k)
setattr(self, k, v)
print('user config:')
for k, v in self.__class__.__dict__.items():
if not k.startswith('__'):
print(k, getattr(self, k))
DefaultConfig.parse = parse
opt = DefaultConfig()
# opt.parse = parse
| [
"wanghailin317@gmail.com"
] | wanghailin317@gmail.com |
ca29a865d887f29d1515775ed8d07b25e1e41db0 | c3bf5f549830623c723d053e9f61d40c6470bd5a | /scripts/filter_mutations/02_generate_csv.py | aa834a23f33513504a3817483b72d985656380c5 | [] | no_license | yeemey/dvh_mms2 | 93117172095640620939249d912464ae56ec315e | 768186e01e5212846862faf7073140a0ce050fb1 | refs/heads/master | 2020-05-26T02:17:46.627745 | 2019-08-01T00:29:14 | 2019-08-01T00:29:14 | 84,984,912 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,601 | py | import numpy as np
import pandas as pd
import tidy_breseq as tb
# Start data input
input_dir = '~/Repositories/dvh_mms2/depth_mle/'
d2mut, d2ev = tb.subset_gd_to_df(input_dir + 'd2/annotated.gd', 'D2-0', 'D2', '0', cov=True)
m1mut, m1ev = tb.subset_gd_to_df(input_dir + 'm1/annotated.gd', 'M1-0', 'M1', '0', cov=True)
d2 = tb.combine_mutations_and_evidence(d2mut, d2ev)
m1 = tb.combine_mutations_and_evidence(m1mut, m1ev)
df = pd.concat([d2, m1], ignore_index=True)
# End data input
with open('samples.csv', 'w') as samples:
samples.write('Name,Population,Generation\n')
samples_df = df[['sample', 'line', 'generation']].drop_duplicates()
for row in samples_df.index:
samples.write(samples_df.loc[row, 'sample'] + ',' + samples_df.loc[row, 'line'] + ',' + str(samples_df.loc[row, 'generation'] + '\n'))
df_RA = df[df['evidence_type'] == 'RA']
freqs_df = df_RA[['genome_id', 'position', 'REF', 'ALT', 'sample', 'polymorphism_frequency']]
pvt_freqs = freqs_df.pivot_table(index=['genome_id', 'position', 'REF', 'ALT'], columns='sample', values='polymorphism_frequency')
with open('freqs.csv', 'w') as freqs:
header = '#CHROM,POS,REF,ALT,'
count = 0
while count < len(pvt_freqs.columns) - 1:
header += pvt_freqs.columns[count] + ','
count += 1
header += pvt_freqs.columns[count] + '\n'
freqs.write(header)
for row in pvt_freqs.index:
freqs.write(row[0] + ',' + row[1] + ',' + row[2] + ',' + row[3] + ',')
sample_count = 0
while sample_count < len(pvt_freqs.columns) - 1:
freqs.write(str(pvt_freqs.loc[row, pvt_freqs.columns[sample_count]]) + ',')
sample_count += 1
freqs.write(str(pvt_freqs.loc[row, pvt_freqs.columns[sample_count]]) + '\n')
cov_df = df_RA[['genome_id', 'position', 'REF', 'ALT', 'sample', 'ra_cov']]
pvt_cov = cov_df.pivot_table(index=['genome_id', 'position', 'REF', 'ALT'], columns='sample', values='ra_cov').fillna(0)
with open('coverage.csv', 'w') as cov:
header = '#CHROM,POS,REF,ALT,'
count = 0
while count < len(pvt_cov.columns) - 1:
header += pvt_cov.columns[count] + ','
count += 1
header += pvt_cov.columns[count] + '\n'
cov.write(header)
for row in pvt_cov.index:
cov.write(row[0] + ',' + row[1] + ',' + row[2] + ',' + row[3] + ',')
sample_count = 0
while sample_count < len(pvt_cov.columns) - 1:
cov.write(str(pvt_cov.loc[row, pvt_cov.columns[sample_count]]) + ',')
sample_count += 1
cov.write(str(pvt_cov.loc[row, pvt_cov.columns[sample_count]]) + '\n')
| [
"yeemey@users.noreply.github.com"
] | yeemey@users.noreply.github.com |
033e102e9ec3869d874a2d7ed46fda3ca7595e17 | b2d9974c711c7271a67759c205dbf774103ac870 | /adminregistrationfinal/settings.py | 53b10cc203204c1d2ac3948f5f756550851f4a78 | [] | no_license | Vidhyasagarudayakumar/adminregistrationfinal | 9a10b29450d5dedb474d0d2de771eee21ad9b882 | 5b2f9f67f4155106d94c231f692547fd47d04da9 | refs/heads/master | 2020-05-23T09:10:24.377716 | 2017-01-31T16:49:25 | 2017-01-31T16:49:25 | 80,444,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,367 | py | """
Django settings for adminregistrationfinal project.
Generated by 'django-admin startproject' using Django 1.10.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'm2zj8u(z&x^o&t9m!(*$ms8gg%f%%s#c6yi+1alklwlc1slf3j'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'adminregistration',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'adminregistrationfinal.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'adminregistrationfinal.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
LOGIN_URL ='/login/'
SPARKPOST_API_KEY = '5b5d709328e1f6ef44e60971121a75ae5e7130ce'
EMAIL_BACKEND = 'sparkpost.django.email_backend.SparkPostEmailBackend' | [
"sagar.vidhya816@gmail.com"
] | sagar.vidhya816@gmail.com |
53e59ee464307278ddc231f1e727417ac63c6650 | d3bf6f4f6c5ee567057a8b8b97f72ace2e9159e0 | /sample/migrations/0010_auto_20210929_1728.py | ca1bacd70e58b1c0a3aebb7a755fa63c4c9e33ee | [] | no_license | shelu-edi/idesiignred | 82a56f7a9bcaf8f116ae60f2b7af6f043941d22c | 5fe37605a73d97005462c3d81a186a8bac504b1a | refs/heads/main | 2023-08-29T12:42:10.669403 | 2021-10-18T06:06:30 | 2021-10-18T06:06:30 | 406,770,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,913 | py | # Generated by Django 3.1.2 on 2021-09-29 11:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sample', '0009_auto_20210929_1723'),
]
operations = [
migrations.RenameField(
model_name='boyspant',
old_name='pain_cost',
new_name='paint_cost',
),
migrations.RenameField(
model_name='boysshirt',
old_name='pain_cost',
new_name='paint_cost',
),
migrations.RenameField(
model_name='boysshort',
old_name='pain_cost',
new_name='paint_cost',
),
migrations.RenameField(
model_name='boystshirt',
old_name='pain_cost',
new_name='paint_cost',
),
migrations.RenameField(
model_name='childrensfrock',
old_name='pain_cost',
new_name='paint_cost',
),
migrations.RenameField(
model_name='childrenspant',
old_name='pain_cost',
new_name='paint_cost',
),
migrations.RenameField(
model_name='girlsfrock',
old_name='pain_cost',
new_name='paint_cost',
),
migrations.RenameField(
model_name='girlspant',
old_name='pain_cost',
new_name='paint_cost',
),
migrations.RenameField(
model_name='girlsshort',
old_name='pain_cost',
new_name='paint_cost',
),
migrations.RenameField(
model_name='girlstshirt',
old_name='pain_cost',
new_name='paint_cost',
),
migrations.RenameField(
model_name='infantsfrock',
old_name='pain_cost',
new_name='paint_cost',
),
migrations.RenameField(
model_name='infantspant',
old_name='pain_cost',
new_name='paint_cost',
),
migrations.RenameField(
model_name='kaftan',
old_name='pain_cost',
new_name='paint_cost',
),
migrations.RenameField(
model_name='ladiesskirt',
old_name='pain_cost',
new_name='paint_cost',
),
migrations.RenameField(
model_name='ladiestshirt',
old_name='pain_cost',
new_name='paint_cost',
),
migrations.RenameField(
model_name='maternityfrock',
old_name='pain_cost',
new_name='paint_cost',
),
migrations.RenameField(
model_name='nightwear',
old_name='pain_cost',
new_name='paint_cost',
),
migrations.RenameField(
model_name='teenfrock',
old_name='pain_cost',
new_name='paint_cost',
),
]
| [
"61077989+shelu-edi@users.noreply.github.com"
] | 61077989+shelu-edi@users.noreply.github.com |
1828255c8961c73638bab006e6d09f999240db93 | 4445c7d94554d548aedd8f7220f1bf7007594104 | /helper.py | 07c1f44fdf3a76da8a57deb2fdf933829713f9c2 | [] | no_license | Justin19960919/Chess-Game | ebf668a7fd9bcbd4a1ee4b9bbd5d932de44ab3d1 | 333c66363910d05c91d2de242359ebf645edd268 | refs/heads/main | 2023-03-06T03:58:19.966962 | 2021-02-20T05:24:52 | 2021-02-20T05:24:52 | 340,567,503 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 778 | py | from constants import SIZE_OF_SQUARE
def get_coordinates_given_index(x_index,y_index):
'''
Method -- get_coordinates_given_index
Given x_index and y_index of the positions array in
gamestate, we return the bottom left corner coordinates
of the square
Parameters:
x_index -- The x array index in the positions array of the piece
y_index -- The y array index in the positions array of the piece
'''
# x
x_coordinate = abs(y_index - 4) * SIZE_OF_SQUARE
if y_index <= 4:
x_coordinate = - x_coordinate
# y
y_coordinate = abs(x_index - 3) * SIZE_OF_SQUARE
if x_index > 3:
y_coordinate = - y_coordinate
return [x_coordinate,y_coordinate]
| [
"jclee1996@gmail.com"
] | jclee1996@gmail.com |
e4e396bdb9c9ff1453ea79cb8ca39725235f75db | 8e1f493ce9fc34b42637bc7d69560aab20c384a3 | /simple_filter/scripts/simple_kalman.py | 7f10fadc9c8eb8b01238b66e7502fb534f4f3abd | [] | no_license | AmyPhung/comprobo20 | 6f980a82174b3938527fb5939cdd539420aaff42 | 2eff4918275542d2d28828df97c8100d2391cfb0 | refs/heads/master | 2023-04-28T21:49:52.085491 | 2021-05-17T22:27:14 | 2021-05-17T22:27:14 | 290,074,022 | 0 | 0 | null | 2020-08-25T00:47:21 | 2020-08-25T00:47:21 | null | UTF-8 | Python | false | false | 4,471 | py | #!/usr/bin/env python3
"""
This script implements a Kalman filter for the system:
x_0 ~ N(0, sigma_sq)
x_t = x_{t-1} + w_t, w_t ~ N(0, sigma_m_sq)
z_t = x_t + v_t, v_t ~ N(0, sigma_z_sq)
"""
import matplotlib.pyplot as plt
import rospy
from numpy import arange
from numpy.random import randn
from math import e, sqrt, pi
from dynamic_reconfigure.server import Server
from simple_filter.cfg import SimpleKalmanConfig
class SimpleWorld(object):
""" A simple system with dynamics:
x_0 ~ N(0, sigma_sq)
x_t = x_{t-1} + w_t, w_t ~ N(0, sigma_m_sq)
z_t = x_t + v_t, v_t ~ N(0, sigma_z_sq)
"""
def __init__(self, mu_0, sigma_0, sigma_m_sq, sigma_z_sq):
""" the initial state is sampled from N(mu_0, sigma_0).
the movement noise is sigma_m_sq and the measurement noise is sigma_z_sq
"""
self.x_true = mu_0 + sqrt(sigma_0)*randn()
self.sigma_m_sq = sigma_m_sq
self.sigma_z_sq = sigma_z_sq
def get_z_t(self):
""" Sample an observation centered at x_true plus Gaussian noise
with variance sigma_sq_z and mean 0 """
return self.x_true + sqrt(self.sigma_z_sq)*randn()
def get_x_t(self):
""" Sample next system state as the current system state plus Gaussian
noise with variance sigma_sq_m and mean 0 """
self.x_true = self.x_true + sqrt(self.sigma_m_sq)*randn()
return self.x_true
class SimpleKalmanFilter(object):
""" A Kalman filter node that estimates a single state x_t using noisy position measurements """
def __init__(self):
""" Sets up the world model and loads initial parameters """
rospy.init_node('simple_kalman')
plt.ion()
# initial beliefs
self.mu = 0
self.sigma_sq = 1
# motor noise
sigma_m_sq = rospy.get_param('~sigma_m_sq', 0.01)
# observation noise
sigma_z_sq = rospy.get_param('~sigma_z_sq', .1)
# time to pause between plots
self.pause_time = rospy.get_param('~pause_time', 0.5)
self.graphs = None
self.world = SimpleWorld(self.mu, self.sigma_sq, sigma_m_sq, sigma_z_sq)
srv = Server(SimpleKalmanConfig, self.config_callback)
def config_callback(self, config, level):
""" Get the pause_time, movement noise, and measurement noise """
self.pause_time = config['pause_time']
self.world.sigma_m_sq = config['sigma_m_sq']
self.world.sigma_z_sq = config['sigma_z_sq']
return config
def run(self):
while not rospy.is_shutdown():
# Graph new observation from the system
z_t = self.world.get_z_t()
self.graphs = self.plot_pdf(z_t)
# Do Kalman updates
K_t = (self.sigma_sq + self.world.sigma_m_sq)/(self.sigma_sq + self.world.sigma_m_sq + self.world.sigma_z_sq)
self.mu = self.mu + K_t*(z_t - self.mu)
self.sigma_sq = (1-K_t)*(self.sigma_sq+self.world.sigma_m_sq)
plt.pause(self.pause_time)
self.graphs = self.plot_pdf(z_t)
# sample next state
self.world.get_x_t()
plt.pause(self.pause_time)
def plot_pdf(self, z):
""" Plot the Gaussian PDF with the specified mean (mu) and variance (sigma_sq)
x_true is the true system state which will be plotted in blue
z is the current observation which will be plotted in red """
xs = arange(min(-5,z-2,self.world.x_true-2), max(5,z+2,self.world.x_true+2), .005)
p_of_x = [1./sqrt(2*pi*self.sigma_sq)*e**(-(x - self.mu)**2/(2*self.sigma_sq)) for x in xs]
plt.xlim([min(xs), max(xs)])
if self.graphs:
self.graphs[0].set_xdata(xs)
self.graphs[0].set_ydata(p_of_x)
self.graphs[1].set_xdata(self.world.x_true)
self.graphs[2].set_xdata(z)
else:
self.graphs = []
self.graphs.append(plt.plot(xs, p_of_x)[0])
self.graphs.append(plt.plot(self.world.x_true, 0,'b.')[0])
self.graphs.append(plt.plot(z, 0,'r.')[0])
self.graphs[1].set_markersize(20)
self.graphs[2].set_markersize(20)
plt.ylim([0, 5])
plt.legend(('probability density','true position','measured position'))
plt.show(block=False)
return self.graphs
if __name__ == '__main__':
node = SimpleKalmanFilter()
node.run()
| [
"paullundyruvolo@gmail.com"
] | paullundyruvolo@gmail.com |
e39f77286b08e140ace992fada902db7cebe9a2d | 38ba110ce37dbb930c76f2e33afcfbe416f26ee1 | /analysis/pyFinder/entryChecker.py | 028cf171b9f566596fa62c044e1ac227c9a3b508 | [
"Apache-2.0",
"BSD-2-Clause"
] | permissive | gitter-badger/DockerFinder | 295d17aebd3820ac745ed10b9cd8d60a7a34498e | a8d0a99969a101543275192272140aa5904220e2 | refs/heads/master | 2020-05-23T10:17:23.313005 | 2017-01-30T10:33:33 | 2017-01-30T10:33:33 | 80,413,188 | 0 | 0 | null | 2017-01-30T10:50:46 | 2017-01-30T10:50:46 | null | UTF-8 | Python | false | false | 2,334 | py | from pyfinder import Checker
from docopt import docopt
import time
from os import path
import logging.config
__doc__= """Checker.
Usage:
entryChecker.py run [--interval=<10>] [--path-logging=</data/crawler/log/stats.log>] [--key=<images.scan>] [--amqp-url=<amqp://guest:guest@rabbitmq:5672>] [--ex=<dofinder>] [--queue=<images>] [--images-url=<http://images_server:3000/api/images/>][--hub-url=<https://hub.docker.com/>]
entryChecker.py verify
entryChecker.py (-h | --help)
entryChecker.py --version
Options:
-h --help Show this screen.
--amqp-url=AMQP-URL url of the rabbitMQ server [default: amqp://guest:guest@rabbitmq:5672]
--path-logging=PATH-LOGGING the path for storing [default: /data/crawler/log/stats.log]
--interval=interval interval time in seconds between two consecutnve cheks [default:10]
--ex=EXCHANGE The exchange name of the rabbitMQ [default: dofinder]
--queue==QUEUE Queue name of the rabbitMQ server [default: images]
--key==KEY Routing key used by the rabbitMQ server [default: images.scan]
--images-url=IMAGES_URL The url of the images service [default: http://images_server:3000/api/images/]
--hub-url=HUB-URL The url of the DockerHub [default: https://hub.docker.com/]
--version Show version.
"""
# interactive mode for scanner
#docker run -it --net=core-net --entrypoint=/bin/sh dofinder/scanner:latest
if __name__ == '__main__':
args = docopt(__doc__, version='Scanner 0.0.1')
log_file_path = path.dirname(path.abspath(__file__))+ '/pyfinder/resources/logging.conf'
logging.config.fileConfig(log_file_path)
logger = logging.getLogger()
logger.info("Logging conf: "+ log_file_path)
#print(args)
checker = Checker( amqp_url=args['--amqp-url'],
exchange=args['--ex'],
queue=args['--queue'],
route_key=args['--key'],
images_url=args['--images-url'],
hub_url=args['--hub-url'],
path_file_logging=args['--path-logging']
)
if args['run']:
checker.run(interval_next_check=int(args['--interval']))
if args['verify']:
checker.verify_images()
| [
"davideneri18@gmail.com"
] | davideneri18@gmail.com |
2c797dce206728933f0809dc69e3834e9b33077d | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/span/ldestination.py | 9a6afadcebc410e9ab61d574a95437569b884b82 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,545 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class LDestination(Mo):
"""
The local SPAN destination, which is where network traffic is sent for analysis by a network analyzer. When you create a traffic monitoring session, you must select an local SPAN source and destination. The type of session (Tenant, Access, or Fabric) determines the allowed types of local SPAN sources and destinations. The destination can be either a port or endpoint group. If the destination is a port, it should not be one that has been configured for other purposes.
"""
meta = ClassMeta("cobra.model.span.LDestination")
meta.moClassName = "spanLDestination"
meta.rnFormat = "ldst"
meta.category = MoCategory.REGULAR
meta.label = "Local Span Destination"
meta.writeAccessMask = 0x20001
meta.readAccessMask = 0x20001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Inst")
meta.childClasses.add("cobra.model.fault.Counts")
meta.childClasses.add("cobra.model.health.Inst")
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Inst", "fault-"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.parentClasses.add("cobra.model.span.Session")
meta.superClasses.add("cobra.model.span.Destination")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Comp")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.rnPrefixes = [
('ldst', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5582, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "monPolDn", "monPolDn", 14403, PropCategory.REGULAR)
prop.label = "Monitoring policy attached to this observable object"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("monPolDn", prop)
prop = PropMeta("str", "mtu", "mtu", 2039, PropCategory.REGULAR)
prop.label = "MTU"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(64, 9216)]
prop.defaultValue = 1518
prop.defaultValueStr = "1518"
meta.props.add("mtu", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "operSt", "operSt", 2040, PropCategory.REGULAR)
prop.label = "Operational State"
prop.isOper = True
prop.defaultValue = 2
prop.defaultValueStr = "down"
prop._addConstant("down", "down", 2)
prop._addConstant("failed", "failed", 3)
prop._addConstant("partial", "partial", 4)
prop._addConstant("unspecified", "unspecified", 0)
prop._addConstant("up", "up", 1)
meta.props.add("operSt", prop)
prop = PropMeta("str", "operStQual", "operStQual", 2041, PropCategory.REGULAR)
prop.label = "Operational State Qualifier"
prop.isOper = True
prop.defaultValue = 3
prop.defaultValueStr = "no-oper-src-dst"
prop._addConstant("Dst-PC-Member-Not-Supported-err", "a-pc-member-port-cannot-be-a-local-span-destination", 19)
prop._addConstant("active", "the-session-is-up", 1)
prop._addConstant("dummy-src-err", "span-src-is-partially/fully-impacted,-not-programmed-due-to-hw-res-exhaustion", 20)
prop._addConstant("error", "generic-error", 4)
prop._addConstant("hw-err", "hardware-error", 15)
prop._addConstant("invalid-dst-mode", "dst-in-wrong-mode", 9)
prop._addConstant("invalid-ip", "no-valid-ip-address", 12)
prop._addConstant("invalid-src-mode", "src-in-wrong-mode", 10)
prop._addConstant("invalid-vrf", "no-valid-vrf", 11)
prop._addConstant("no-dst", "no-dest-configured", 6)
prop._addConstant("no-eg-intf", "egress-interface-not-resolved", 14)
prop._addConstant("no-hw-res", "no-hardware-resource", 2)
prop._addConstant("no-oper-src-dst", "no-operational-src/dst", 3)
prop._addConstant("no-route", "no-route-to-destination-ip-address", 13)
prop._addConstant("no-src", "no-sources-configured", 5)
prop._addConstant("no-src-dst", "no-src/dst-configured", 7)
prop._addConstant("not-supported-err", "configuration-not-supported-on-this-tor", 17)
prop._addConstant("pc-with-lacp-err", "pc-destination-with-lacp-not-supported", 18)
prop._addConstant("shut", "session-admin-shut", 8)
prop._addConstant("ver-error", "erspan-version-not-supported", 16)
meta.props.add("operStQual", prop)
prop = PropMeta("str", "port", "port", 2051, PropCategory.REGULAR)
prop.label = "Port"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("port", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "spanCfgFailedBmp", "spanCfgFailedBmp", 14942, PropCategory.REGULAR)
prop.label = "None"
prop.isOper = True
prop._addConstant("span:LDestinationdescr_failed_flag", None, 4)
prop._addConstant("span:LDestinationlcOwn_failed_flag", None, -9223372036854775808)
prop._addConstant("span:LDestinationmodTs_failed_flag", None, 2305843009213693952)
prop._addConstant("span:LDestinationmonPolDn_failed_flag", None, 64)
prop._addConstant("span:LDestinationmtu_failed_flag", None, 8)
prop._addConstant("span:LDestinationnameAlias_failed_flag", None, 2)
prop._addConstant("span:LDestinationname_failed_flag", None, 1)
prop._addConstant("span:LDestinationport_failed_flag", None, 128)
meta.props.add("spanCfgFailedBmp", prop)
prop = PropMeta("str", "spanCfgFailedTs", "spanCfgFailedTs", 14944, PropCategory.REGULAR)
prop.label = "None"
prop.isOper = True
meta.props.add("spanCfgFailedTs", prop)
prop = PropMeta("str", "spanCfgState", "spanCfgState", 14943, PropCategory.REGULAR)
prop.label = "None"
prop.isOper = True
meta.props.add("spanCfgState", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"bkhoward@live.com"
] | bkhoward@live.com |
a1a41d18acaad2da2a50f52d33f420d7b5794950 | a54c9239370e279def187ac604a8c20523e61167 | /PA1/Knn/knn.py | 2686bed73242c5fe7ede4ced42396c2e772983dc | [] | no_license | ZhengyiWang/CSCI567 | 14edc586c22f17a59940da4a3b9d6736f9bb730e | 210fb823c3b2b286a8bd5c34e163c2bbd25bb77f | refs/heads/master | 2020-12-10T19:10:41.611374 | 2020-01-13T20:11:27 | 2020-01-13T20:11:27 | 233,682,599 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,987 | py | import numpy as np
from collections import Counter
class KNN:
def __init__(self, k, distance_function):
"""
:param k: int
:param distance_function
"""
self.k = k
self.distance_function = distance_function
# TODO: save features and lable to self
def train(self, features, labels):
"""
In this function, features is simply training data which is a 2D list with float values.
For example, if the data looks like the following: Student 1 with features age 25, grade 3.8 and labeled as 0,
Student 2 with features age 22, grade 3.0 and labeled as 1, then the feature data would be
[ [25.0, 3.8], [22.0,3.0] ] and the corresponding label would be [0,1]
For KNN, the training process is just loading of training data. Thus, all you need to do in this function
is create some local variable in KNN class to store this data so you can use the data in later process.
:param features: List[List[float]]
:param labels: List[int]
"""
self.features=features
self.labels=labels
# TODO: find KNN of one point
def get_k_neighbors(self, point):
"""
This function takes one single data point and finds k-nearest neighbours in the training set.
You already have your k value, distance function and you just stored all training data in KNN class with the
train function. This function needs to return a list of labels of all k neighours.
:param point: List[float]
:return: List[int]
"""
distance_list=[]
for i in range(len(self.features)):
distance_list.append((self.labels[i],self.distance_function(point,self.features[i])))
distance_list.sort(key=lambda x:x[1])
k_labels=[]
for i in range(self.k):
k_labels.append(distance_list[i][0])
return k_labels
# TODO: predict labels of a list of points
def predict(self, features):
"""
This function takes 2D list of test data points, similar to those from train function. Here, you need process
every test data point, reuse the get_k_neighbours function to find the nearest k neighbours for each test
data point, find the majority of labels for these neighbours as the predict label for that testing data point.
Thus, you will get N predicted label for N test data point.
This function need to return a list of predicted labels for all test data points.
:param features: List[List[float]]
:return: List[int]
"""
predicted_labels=[]
for point in features:
k_labels = self.get_k_neighbors(point)
predict=Counter(k_labels).most_common(1)[0][0]
predicted_labels.append(predict)
return predicted_labels
if __name__ == '__main__':
print(np.__version__)
| [
"wbswzy@163.com"
] | wbswzy@163.com |
6b7db1315ed670a06db67503fedbfdaff88c9de0 | cf3e9b05f2d1751c2358b631f2c06c898dabc9a8 | /storyboard/plugin/scheduler/base.py | 8f227d22a7336228f4a157149b887a71c7e3ddde | [
"Apache-2.0"
] | permissive | Sitcode-Zoograf/storyboard | 130d5151aba7db3b29d257d6c140ad0a63bc914a | 5833f87e20722c524a1e4a0b8e1fb82206fb4e5c | refs/heads/master | 2020-04-11T22:46:27.045263 | 2018-12-04T15:51:58 | 2018-12-04T15:52:17 | 162,147,547 | 0 | 0 | Apache-2.0 | 2018-12-17T15:01:50 | 2018-12-17T15:01:49 | null | UTF-8 | Python | false | false | 1,707 | py | # Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing permissions and
# limitations under the License.
import abc
import six
from oslo_log import log
import storyboard.plugin.base as plugin_base
LOG = log.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class SchedulerPluginBase(plugin_base.PluginBase):
"""Base class for a plugin that executes business logic on a schedule.
All plugins are loaded into the scheduler in such a way that long-running
plugins will not cause multiple 'overlapping' executions.
"""
@abc.abstractmethod
def run(self):
"""Execute a periodic task. It is guaranteed that no more than one of
these will be run on any one storyboard instance. If you are running
multiple instances, that is not the case.
"""
@abc.abstractmethod
def trigger(self):
"""The plugin's scheduler trigger. Must implement BaseTrigger from
the apscheduler package.
:return: A trigger that describes the interval under which this
plugin should execute.
"""
def get_name(self):
"""A simple name for this plugin."""
return self.__module__ + ":" + self.__class__.__name__
| [
"krotscheck@gmail.com"
] | krotscheck@gmail.com |
f4c3288cf1c1417cd9ed9515fb2741abe00f3bb9 | 07b4dd9a88f3404c4851ea7cbb57c67035bc9a54 | /eric.py | b4e8a8f25539f10b194515115cc8fd428448ebe5 | [] | no_license | surajgholap/python-Misc | 9c9d02c42bb37b7378d7336343f8bef7cd802edf | 4a8ce4bfa5a959692d98663b7b5c0b67a165835f | refs/heads/master | 2021-06-17T19:19:25.021038 | 2021-01-27T20:54:03 | 2021-01-27T20:54:03 | 142,781,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,012 | py | from collections import Counter
import requests
def top_five(corpus):
count_map = {}
for i in corpus:
try:
count_map[i] += 1
except:
count_map[i] = 1
# words = corpus.split()
# counter = Counter(words)
# most_fav = counter.most_common(5)
# for i in most_fav:
# print(i)
def clean_func(corpus, stop):
new = []
for i in corpus.split(" "):
i = i.lower()
if i.isalpha() and i not in stop:
new.append(i)
top_five(" ".join(new))
response = requests.get(
"https://ocw.mit.edu/ans7870/6/6.006/s08/lecturenotes/files/t8.shakespeare.txt")
stop_words = requests.get(
"https://gist.githubusercontent.com/sebleier/554280/raw/7e0e4a1ce04c2bb7bd41089c9821dbcf6d0c786c/NLTK's%2520list%2520of%2520english%2520stopwords")
stop_list = stop_words.text.splitlines()
# print(stop_list)
content = response.text.splitlines()
content = " ".join(content[245:])
# print(content)
clean_func(content, stop_list)
| [
"surajgholap27@gmail.com"
] | surajgholap27@gmail.com |
948ea6a3930199dc069fd25e26ed627e48dd2685 | a4228f707e918dc7d4d99850428db574848480e7 | /findECGRmutationsindimers/test_findpointmutationsindimers.py | 427882b300cebd2c44b1110454211a7aff5968e7 | [] | no_license | lvn3668/naivevariantcaller_ECGR_variantdetection | 09461bb6e17d3b50ab51659ba9c070b501a79fbd | 2dbbe193b3e6f928ffc18bd00025b2b22471b96f | refs/heads/main | 2023-08-28T05:04:57.412295 | 2021-10-19T15:37:19 | 2021-10-19T15:37:19 | 391,140,353 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | from unittest import TestCase
class Test(TestCase):
def test_findpointmutationsindownstreamdimers(self):
self.fail()
| [
"lalitha.viswanathan79@gmail.com"
] | lalitha.viswanathan79@gmail.com |
afe31b960321c7edb54f5abc62e659f06f9bc691 | e18ad3935053a9447f5147fbbeac56d85d4b5b91 | /2019/12/12.2.py | 2078a7a9d98d6cfd1e9425916d12247a6e09d337 | [] | no_license | bo0tzz/AdventOfCode | edb2aaa6fac4e6646c86fe9c9d9de9a809508690 | 4a5a01df12ce67d464841cb7462d238753225de1 | refs/heads/master | 2023-01-13T23:18:51.454093 | 2022-12-22T11:56:38 | 2022-12-22T11:56:38 | 225,330,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,820 | py | import re
from math import gcd
regex = re.compile(r"<x=(?P<x>-?\d+), y=(?P<y>-?\d+), z=(?P<z>-?\d+)>")
class Moon:
def __init__(self, x=0, y=0, z=0):
self.x = x
self.y = y
self.z = z
self.dx, self.dy, self.dz = 0, 0, 0
def apply_grav(self, to):
if self.x > to.x:
self.dx -= 1
elif self.x < to.x:
self.dx += 1
if self.y > to.y:
self.dy -= 1
elif self.y < to.y:
self.dy += 1
if self.z > to.z:
self.dz -= 1
elif self.z < to.z:
self.dz += 1
def step(self):
self.x += self.dx
self.y += self.dy
self.z += self.dz
def __str__(self):
return f"<x={self.x}, y={self.y}, z={self.z}>"
def __repr__(self):
return str(self)
def compare(i, j):
if i > j:
return -1
elif i < j:
return 1
else:
return 0
with open('in', 'r') as infile:
parsed = [regex.match(l) for l in infile.readlines()]
moons = [(int(match.group('x')), int(match.group('y')), int(match.group('z'))) for match in parsed]
steps = []
for d in range(3):
values = [t[d] for t in moons]
velos = [0 for _ in values]
step = 0
seen = []
initial_state = (values.copy(), velos.copy())
while True:
for i, v in enumerate(values):
for j, w in enumerate(values):
if i != j:
velos[i] += compare(v, w)
for i, v in enumerate(velos):
values[i] += v
step += 1
if initial_state == (values, velos):
break
steps.append(step)
lcm = steps[0]
for i in steps[1:]:
lcm = lcm * i // gcd(lcm, i)
print(lcm)
| [
"boet@pescheck.nl"
] | boet@pescheck.nl |
b44857c46d895f4857faeac4e3deb4dff8e60872 | c7a1406b2230acaf412542124ef744c83171fa9a | /perdiem/campaign/apps.py | d14c5e8fe339d634cbe6cded1f0c73ad9cefa0c4 | [] | no_license | GilbertRoy/perdiem-django | de2f1351088597fb2b5e739388f28ff346e5e824 | 3d1f00b21a28f71cb89e49986d07b893e5abe1d9 | refs/heads/master | 2020-03-14T03:30:21.445845 | 2018-04-21T21:44:20 | 2018-04-21T21:44:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | from django.apps import AppConfig
class CampaignConfig(AppConfig):
name = 'campaign'
def ready(self):
import campaign.signals
| [
"lucas.revolutiontech@gmail.com"
] | lucas.revolutiontech@gmail.com |
3e1b8b7774e09da34f792a1b856a1a8dd9deb5f6 | 32789a85e97053969b94c6d0b33b8b381e87842d | /Function/Code/2_func_param.py | 4a7382c40b73ed132565503f1ae27b03198bf243 | [] | no_license | abs51295/Python-Summer-School | 6607e00dba5cd06e395aa1fffde6a501d5d10a5e | 57ba177880869dbd80928b6625d8bdd6cf6c011b | refs/heads/master | 2021-01-19T03:45:35.867408 | 2017-03-01T14:36:52 | 2017-03-01T14:36:52 | 61,949,254 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99 | py | def greet_user(username):
print("Hello, " + str(username).title() + "!")
greet_user(12)
| [
"noreply@github.com"
] | noreply@github.com |
7a03a57714a7e1e7c4be0d714266f119ffbe2667 | 5abc60a40ed26b7d1553c8777d0ebf5fae14f6bf | /chapter3.py | ddb5f017208e4cd531f6c2775aab53bce51f94f7 | [] | no_license | pankaj-raturi/python-practice | d72637b867c8c0cc118da7dd36dff9ed1a8693f2 | 968185f8dad51ebfbb1f9110dfe43964fd90c795 | refs/heads/master | 2022-11-21T19:56:24.253159 | 2020-07-26T17:55:04 | 2020-07-26T17:55:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,086 | py | #!/usr/bin/env python3
separatorLength = 40
lname = 'Raturi'
name = 'Pankaj ' + lname
# Get length of the string
length = len(name)
# String Function
lower = name.lower()
print (lower)
# * is repetation operator for string
print ( '-' * separatorLength)
# Integer Object
age = 30
# Convert Integers to string objects
# Required to be concatenated with String Objects
print(name + ' Age: ' + str(age))
print ( '-' * separatorLength)
# ---------------------------------------------------------
# Formating String
formatted = 'I am {1}\nMy Age is {0} years'. format(age,name)
print (formatted)
print ( '-' * separatorLength)
# -----------------------------------------------------------
# Format Specification (specifying the width of the objects for printing)
print('{0:10} | {1:>10}'. format('Fruits', 'Quantity'))
print('{0:10} | {1:>10.2f}'. format('Orange', 10))
print ( '-' * separatorLength)
# -----------------------------------------------------------
# Accept standard input
person = input('Please identify yourself: ')
print('Hi {}. How are you?'.format(person)) | [
"panakj@tripatinfoways.com"
] | panakj@tripatinfoways.com |
4ecf20d8722cab3c69e0ac1735765e8be64ebaaf | 2657a8cc69af7f4e805e62af30d204df49bd0bb4 | /domain/controller/icontroller.py | a7f60202a32374a97819aa18eef1d4fd13880efc | [
"Unlicense"
] | permissive | gajoc/indexing-api | 905cf6fa5b2fa68f15d100dcefa23e8b465ff078 | ce1663b3c6ff30d60557a4da521e48388b866260 | refs/heads/master | 2023-03-27T03:30:49.530020 | 2021-04-02T14:18:01 | 2021-04-02T14:18:01 | 226,409,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | from abc import ABC, abstractmethod
from utils.constants import UserAction
class IController(ABC):
@abstractmethod
def wait_for_user_action(self) -> UserAction:
pass
@abstractmethod
def before_exit(self):
pass
| [
"pawel.maciejski@gmail.com"
] | pawel.maciejski@gmail.com |
e871199a0c8a30d8eeef55c5dcd37eb21f00c5ad | 404378e736e3b9c8b22cedda872af2da7562b58d | /Class Project/ClassProject_SenWang 2/FiPy-3.1.3/fipy/variables/distanceVariable.py | 99aec464772849bedb2044fc2604c1479e4df37f | [
"LicenseRef-scancode-public-domain"
] | permissive | wangsen992/bayesian-surrogate-modeling-coursework | d6d13b8fb457bc685d9fe51ef30c14c9cd3539b9 | 7abe9e6c5761b27ac99960fb2e4b98f4dda746eb | refs/heads/master | 2020-09-09T23:15:20.088030 | 2019-11-14T02:29:07 | 2019-11-14T02:29:07 | 221,593,138 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 22,920 | py | #!/usr/bin/env python
## -*-Pyth-*-
# ###################################################################
# FiPy - Python-based finite volume PDE solver
#
# FILE: "distanceVariable.py"
#
# Author: Jonathan Guyer <guyer@nist.gov>
# Author: Daniel Wheeler <daniel.wheeler@nist.gov>
# Author: James Warren <jwarren@nist.gov>
# mail: NIST
# www: http://www.ctcms.nist.gov/fipy/
#
# ========================================================================
# This software was developed at the National Institute of Standards
# and Technology by employees of the Federal Government in the course
# of their official duties. Pursuant to title 17 Section 105 of the
# United States Code this software is not subject to copyright
# protection and is in the public domain. FiPy is an experimental
# system. NIST assumes no responsibility whatsoever for its use by
# other parties, and makes no guarantees, expressed or implied, about
# its quality, reliability, or any other characteristic. We would
# appreciate acknowledgement if the software is used.
#
# This software can be redistributed and/or modified freely
# provided that any derivative works bear some notice that they are
# derived from it, and any modified versions bear some notice that
# they have been modified.
# ========================================================================
#
# ###################################################################
##
__docformat__ = 'restructuredtext'
from fipy.tools import numerix
from fipy.tools.numerix import MA
from fipy.tools.decorators import getsetDeprecated
from fipy.variables.cellVariable import CellVariable
from fipy.tests.doctestPlus import register_skipper
import sys
import os
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
def _checkForLSMLIB():
return module_exists('pylsmlib')
def _checkForSKFMM():
return module_exists('skfmm')
def _parseLSMSolver():
args = [s.lower() for s in sys.argv[1:]]
# any command-line specified solver takes precedence over environment variables
if '--lsmlib' in args:
if _checkForLSMLIB():
return "lsmlib"
else:
return None
elif '--skfmm' in args:
if _checkForSKFMM():
return "skfmm"
else:
return None
elif 'FIPY_LSM' in os.environ:
return os.environ['FIPY_LSM'].lower()
elif _checkForLSMLIB():
return 'lsmlib'
elif _checkForSKFMM():
return 'skfmm'
else:
return None
LSM_SOLVER = _parseLSMSolver()
register_skipper(flag="LSM",
test=lambda : LSM_SOLVER is not None,
why="neither `lsmlib` nor `skfmm` can be found on the $PATH")
register_skipper(flag="LSMLIB",
test=lambda : LSM_SOLVER == 'lsmlib',
why="`lsmlib` must be used to run some tests")
register_skipper(flag="SKFMM",
test=lambda : LSM_SOLVER == 'skfmm',
why="`skfmm` must be used to run some tests")
__all__ = ["DistanceVariable"]
class DistanceVariable(CellVariable):
r"""
A `DistanceVariable` object calculates :math:`\phi` so it satisfies,
.. math::
\abs{\nabla \phi} = 1
using the fast marching method with an initial condition defined
by the zero level set. The solution can either be first or second
order.
Here we will define a few test cases. Firstly a 1D test case
>>> from fipy.meshes import Grid1D
>>> from fipy.tools import serialComm
>>> mesh = Grid1D(dx = .5, nx = 8, communicator=serialComm)
>>> from distanceVariable import DistanceVariable
>>> var = DistanceVariable(mesh = mesh, value = (-1., -1., -1., -1., 1., 1., 1., 1.))
>>> var.calcDistanceFunction() #doctest: +LSM
>>> answer = (-1.75, -1.25, -.75, -0.25, 0.25, 0.75, 1.25, 1.75)
>>> print var.allclose(answer) #doctest: +LSM
1
A 1D test case with very small dimensions.
>>> dx = 1e-10
>>> mesh = Grid1D(dx = dx, nx = 8, communicator=serialComm)
>>> var = DistanceVariable(mesh = mesh, value = (-1., -1., -1., -1., 1., 1., 1., 1.))
>>> var.calcDistanceFunction() #doctest: +LSM
>>> answer = numerix.arange(8) * dx - 3.5 * dx
>>> print var.allclose(answer) #doctest: +LSM
1
A 2D test case to test `_calcTrialValue` for a pathological case.
>>> dx = 1.
>>> dy = 2.
>>> from fipy.meshes import Grid2D
>>> mesh = Grid2D(dx = dx, dy = dy, nx = 2, ny = 3)
>>> var = DistanceVariable(mesh = mesh, value = (-1., 1., 1., 1., -1., 1.))
>>> var.calcDistanceFunction() #doctest: +LSM
>>> vbl = -dx * dy / numerix.sqrt(dx**2 + dy**2) / 2.
>>> vbr = dx / 2
>>> vml = dy / 2.
>>> crossProd = dx * dy
>>> dsq = dx**2 + dy**2
>>> top = vbr * dx**2 + vml * dy**2
>>> sqrt = crossProd**2 *(dsq - (vbr - vml)**2)
>>> sqrt = numerix.sqrt(max(sqrt, 0))
>>> vmr = (top + sqrt) / dsq
>>> answer = (vbl, vbr, vml, vmr, vbl, vbr)
>>> print var.allclose(answer) #doctest: +LSM
1
The `extendVariable` method solves the following equation for a given
extensionVariable.
.. math::
\nabla u \cdot \nabla \phi = 0
using the fast marching method with an initial condition defined at
the zero level set.
>>> from fipy.variables.cellVariable import CellVariable
>>> mesh = Grid2D(dx = 1., dy = 1., nx = 2, ny = 2, communicator=serialComm)
>>> var = DistanceVariable(mesh = mesh, value = (-1., 1., 1., 1.))
>>> var.calcDistanceFunction() #doctest: +LSM
>>> extensionVar = CellVariable(mesh = mesh, value = (-1, .5, 2, -1))
>>> tmp = 1 / numerix.sqrt(2)
>>> print var.allclose((-tmp / 2, 0.5, 0.5, 0.5 + tmp)) #doctest: +LSM
1
>>> var.extendVariable(extensionVar, order=1) #doctest: +LSM
>>> print extensionVar.allclose((1.25, .5, 2, 1.25)) #doctest: +LSM
1
>>> mesh = Grid2D(dx = 1., dy = 1., nx = 3, ny = 3, communicator=serialComm)
>>> var = DistanceVariable(mesh = mesh, value = (-1., 1., 1.,
... 1., 1., 1.,
... 1., 1., 1.))
>>> var.calcDistanceFunction(order=1) #doctest: +LSM
>>> extensionVar = CellVariable(mesh = mesh, value = (-1., .5, -1.,
... 2., -1., -1.,
... -1., -1., -1.))
>>> v1 = 0.5 + tmp
>>> v2 = 1.5
>>> tmp1 = (v1 + v2) / 2 + numerix.sqrt(2. - (v1 - v2)**2) / 2
>>> tmp2 = tmp1 + 1 / numerix.sqrt(2)
>>> print var.allclose((-tmp / 2, 0.5, 1.5, 0.5, 0.5 + tmp,
... tmp1, 1.5, tmp1, tmp2)) #doctest: +LSM
1
>>> answer = (1.25, .5, .5, 2, 1.25, 0.9544, 2, 1.5456, 1.25)
>>> var.extendVariable(extensionVar, order=1) #doctest: +LSM
>>> print extensionVar.allclose(answer, rtol = 1e-4) #doctest: +LSM
1
Test case for a bug that occurs when initializing the distance
variable at the interface. Currently it is assumed that adjacent cells
that are opposite sign neighbors have perpendicular normal vectors. In
fact the two closest cells could have opposite normals.
>>> mesh = Grid1D(dx = 1., nx = 3)
>>> var = DistanceVariable(mesh = mesh, value = (-1., 1., -1.))
>>> var.calcDistanceFunction() #doctest: +LSM
>>> print var.allclose((-0.5, 0.5, -0.5)) #doctest: +LSM
1
Testing second order. This example failed with Scikit-fmm.
>>> mesh = Grid2D(dx = 1., dy = 1., nx = 4, ny = 4, communicator=serialComm)
>>> var = DistanceVariable(mesh = mesh, value = (-1., -1., 1., 1.,
... -1., -1., 1., 1.,
... 1., 1., 1., 1.,
... 1, 1, 1, 1))
>>> var.calcDistanceFunction(order=2) #doctest: +LSM
>>> answer = [-1.30473785, -0.5, 0.5, 1.49923009,
... -0.5, -0.35355339, 0.5, 1.45118446,
... 0.5, 0.5, 0.97140452, 1.76215286,
... 1.49923009, 1.45118446, 1.76215286, 2.33721352]
>>> print numerix.allclose(var, answer, rtol=1e-9) #doctest: +LSM
True
** A test for a bug in both LSMLIB and Scikit-fmm **
The following test gives different result depending on whether
LSMLIB or Scikit-fmm is used. There is a deeper problem that is
related to this issue. When a value becomes "known" after
previously being a "trial" value it updates its neighbors'
values. In a second order scheme the neighbors one step away also
need to be updated (if the in between cell is "known" and the far
cell is a "trial" cell), but are not in either package. By luck
(due to trial values having the same value), the values calculated
in Scikit-fmm for the following example are correct although an
example that didn't work for Scikit-fmm could also be constructed.
>>> mesh = Grid2D(dx = 1., dy = 1., nx = 4, ny = 4, communicator=serialComm)
>>> var = DistanceVariable(mesh = mesh, value = (-1., -1., -1., -1.,
... 1., 1., -1., -1.,
... 1., 1., -1., -1.,
... 1., 1., -1., -1.))
>>> var.calcDistanceFunction(order=2) #doctest: +LSM
>>> var.calcDistanceFunction(order=2) #doctest: +LSM
>>> answer = [-0.5, -0.58578644, -1.08578644, -1.85136395,
... 0.5, 0.29289322, -0.58578644, -1.54389939,
... 1.30473785, 0.5, -0.5, -1.5,
... 1.49547948, 0.5, -0.5, -1.5]
The 3rd and 7th element are different for LSMLIB. This is because
the 15th element is not "known" when the "trial" value for the 7th
element is calculated. Scikit-fmm calculates the values in a
slightly different order so gets a seemingly better answer, but
this is just chance.
>>> print numerix.allclose(var, answer, rtol=1e-9) #doctest: +SKFMM
True
"""
def __init__(self, mesh, name = '', value = 0., unit = None, hasOld = 0):
"""
Creates a `distanceVariable` object.
:Parameters:
- `mesh`: The mesh that defines the geometry of this variable.
- `name`: The name of the variable.
- `value`: The initial value.
- `unit`: the physical units of the variable
- `hasOld`: Whether the variable maintains an old value.
"""
CellVariable.__init__(self, mesh, name = name, value = value, unit = unit, hasOld = hasOld)
self._markStale()
def _calcValue(self):
return self._value
def extendVariable(self, extensionVariable, order=2):
"""
Calculates the extension of `extensionVariable` from the zero
level set.
:Parameters:
- `extensionVariable`: The variable to extend from the zero
level set.
"""
dx, shape = self.getLSMshape()
extensionValue = numerix.reshape(extensionVariable, shape)
phi = numerix.reshape(self._value, shape)
if LSM_SOLVER == 'lsmlib':
from pylsmlib import computeExtensionFields as extension_velocities
elif LSM_SOLVER == 'skfmm':
from skfmm import extension_velocities
else:
raise Exception("Neither `lsmlib` nor `skfmm` can be found on the $PATH")
tmp, extensionValue = extension_velocities(phi, extensionValue, ext_mask=phi < 0., dx=dx, order=order)
extensionVariable[:] = extensionValue.flatten()
def getLSMshape(self):
mesh = self.mesh
if hasattr(mesh, 'nz'):
raise Exception("3D meshes not yet implemented")
elif hasattr(mesh, 'ny'):
dx = (mesh.dy, mesh.dx)
shape = (mesh.ny, mesh.nx)
elif hasattr(mesh, 'nx'):
dx = (mesh.dx,)
shape = mesh.shape
else:
raise Exception("Non grid meshes can not be used for solving the FMM.")
return dx, shape
def calcDistanceFunction(self, order=2):
"""
Calculates the `distanceVariable` as a distance function.
:Parameters:
- `order`: The order of accuracy for the distance funtion
calculation, either 1 or 2.
"""
dx, shape = self.getLSMshape()
if LSM_SOLVER == 'lsmlib':
from pylsmlib import distance
elif LSM_SOLVER == 'skfmm':
from skfmm import distance
else:
raise Exception("Neither `lsmlib` nor `skfmm` can be found on the $PATH")
self._value = distance(numerix.reshape(self._value, shape), dx=dx, order=order).flatten()
self._markFresh()
@getsetDeprecated
def getCellInterfaceAreas(self):
return self.cellInterfaceAreas
@property
def cellInterfaceAreas(self):
"""
Returns the length of the interface that crosses the cell
A simple 1D test:
>>> from fipy.meshes import Grid1D
>>> mesh = Grid1D(dx = 1., nx = 4)
>>> distanceVariable = DistanceVariable(mesh = mesh,
... value = (-1.5, -0.5, 0.5, 1.5))
>>> answer = CellVariable(mesh=mesh, value=(0, 0., 1., 0))
>>> print numerix.allclose(distanceVariable.cellInterfaceAreas,
... answer)
True
A 2D test case:
>>> from fipy.meshes import Grid2D
>>> from fipy.variables.cellVariable import CellVariable
>>> mesh = Grid2D(dx = 1., dy = 1., nx = 3, ny = 3)
>>> distanceVariable = DistanceVariable(mesh = mesh,
... value = (1.5, 0.5, 1.5,
... 0.5,-0.5, 0.5,
... 1.5, 0.5, 1.5))
>>> answer = CellVariable(mesh=mesh,
... value=(0, 1, 0, 1, 0, 1, 0, 1, 0))
>>> print numerix.allclose(distanceVariable.cellInterfaceAreas, answer)
True
Another 2D test case:
>>> mesh = Grid2D(dx = .5, dy = .5, nx = 2, ny = 2)
>>> from fipy.variables.cellVariable import CellVariable
>>> distanceVariable = DistanceVariable(mesh = mesh,
... value = (-0.5, 0.5, 0.5, 1.5))
>>> answer = CellVariable(mesh=mesh,
... value=(0, numerix.sqrt(2) / 4, numerix.sqrt(2) / 4, 0))
>>> print numerix.allclose(distanceVariable.cellInterfaceAreas,
... answer)
True
Test to check that the circumfrence of a circle is, in fact,
:math:`2\pi r`.
>>> mesh = Grid2D(dx = 0.05, dy = 0.05, nx = 20, ny = 20)
>>> r = 0.25
>>> x, y = mesh.cellCenters
>>> rad = numerix.sqrt((x - .5)**2 + (y - .5)**2) - r
>>> distanceVariable = DistanceVariable(mesh = mesh, value = rad)
>>> print numerix.allclose(distanceVariable.cellInterfaceAreas.sum(), 1.57984690073)
1
"""
from fipy.variables.interfaceAreaVariable import _InterfaceAreaVariable
return _InterfaceAreaVariable(self)
@getsetDeprecated
def _getCellInterfaceNormals(self):
return self._cellInterfaceNormals
@property
def _cellInterfaceNormals(self):
"""
Returns the interface normals over the cells.
>>> from fipy.meshes import Grid2D
>>> from fipy.variables.cellVariable import CellVariable
>>> mesh = Grid2D(dx = .5, dy = .5, nx = 2, ny = 2)
>>> distanceVariable = DistanceVariable(mesh = mesh,
... value = (-0.5, 0.5, 0.5, 1.5))
>>> v = 1 / numerix.sqrt(2)
>>> answer = CellVariable(mesh=mesh,
... value=(((0, 0, v, 0),
... (0, 0, 0, 0),
... (0, 0, 0, 0),
... (0, v, 0, 0)),
... ((0, 0, v, 0),
... (0, 0, 0, 0),
... (0, 0, 0, 0),
... (0, v, 0, 0))))
>>> print numerix.allclose(distanceVariable._cellInterfaceNormals, answer)
True
"""
dim = self.mesh.dim
valueOverFaces = numerix.repeat(self._cellValueOverFaces[numerix.newaxis, ...], dim, axis=0)
cellFaceIDs = self.mesh.cellFaceIDs
if cellFaceIDs.shape[-1] > 0:
interfaceNormals = self._interfaceNormals[...,cellFaceIDs]
else:
interfaceNormals = 0
return MA.where(valueOverFaces < 0, 0, interfaceNormals)
@getsetDeprecated
def _getInterfaceNormals(self):
return self._interfaceNormals
@property
def _interfaceNormals(self):
"""
Returns the normals on the boundary faces only, the other are set to zero.
>>> from fipy.meshes import Grid2D
>>> from fipy.variables.faceVariable import FaceVariable
>>> mesh = Grid2D(dx = .5, dy = .5, nx = 2, ny = 2)
>>> distanceVariable = DistanceVariable(mesh = mesh,
... value = (-0.5, 0.5, 0.5, 1.5))
>>> v = 1 / numerix.sqrt(2)
>>> answer = FaceVariable(mesh=mesh,
... value=((0, 0, v, 0, 0, 0, 0, v, 0, 0, 0, 0),
... (0, 0, v, 0, 0, 0, 0, v, 0, 0, 0, 0)))
>>> print numerix.allclose(distanceVariable._interfaceNormals, answer)
True
"""
M = self.mesh.dim
interfaceFlag = numerix.repeat(self._interfaceFlag[numerix.newaxis, ...], M, axis=0)
return numerix.where(interfaceFlag, self._levelSetNormals, 0)
@getsetDeprecated
def _getInterfaceFlag(self):
return self._interfaceFlag
@property
def _interfaceFlag(self):
"""
Returns 1 for faces on boundary and 0 otherwise.
>>> from fipy.meshes import Grid2D
>>> from fipy.variables.faceVariable import FaceVariable
>>> mesh = Grid2D(dx = .5, dy = .5, nx = 2, ny = 2)
>>> distanceVariable = DistanceVariable(mesh = mesh,
... value = (-0.5, 0.5, 0.5, 1.5))
>>> answer = FaceVariable(mesh=mesh,
... value=(0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0))
>>> print numerix.allclose(distanceVariable._interfaceFlag, answer)
True
"""
adjacentCellIDs = self.mesh._adjacentCellIDs
val0 = numerix.take(numerix.array(self._value), adjacentCellIDs[0])
val1 = numerix.take(numerix.array(self._value), adjacentCellIDs[1])
return numerix.where(val1 * val0 < 0, 1, 0)
@getsetDeprecated
def _getCellInterfaceFlag(self):
return self._cellInterfaceFlag
@property
def _cellInterfaceFlag(self):
"""
Returns 1 for those cells on the interface:
>>> from fipy.meshes import Grid2D
>>> from fipy.variables.cellVariable import CellVariable
>>> mesh = Grid2D(dx = .5, dy = .5, nx = 2, ny = 2)
>>> distanceVariable = DistanceVariable(mesh = mesh,
... value = (-0.5, 0.5, 0.5, 1.5))
>>> answer = CellVariable(mesh=mesh, value=(0, 1, 1, 0))
>>> print numerix.allclose(distanceVariable._cellInterfaceFlag, answer)
True
"""
from fipy.variables.interfaceFlagVariable import _InterfaceFlagVariable
return _InterfaceFlagVariable(self)
@getsetDeprecated
def _getCellValueOverFaces(self):
return self._cellValueOverFaces
@property
def _cellValueOverFaces(self):
"""
Returns the cells values at the faces.
>>> from fipy.meshes import Grid2D
>>> from fipy.variables.cellVariable import CellVariable
>>> mesh = Grid2D(dx = .5, dy = .5, nx = 2, ny = 2)
>>> distanceVariable = DistanceVariable(mesh = mesh,
... value = (-0.5, 0.5, 0.5, 1.5))
>>> answer = CellVariable(mesh=mesh,
... value=((-.5, .5, .5, 1.5),
... (-.5, .5, .5, 1.5),
... (-.5, .5, .5, 1.5),
... (-.5, .5, .5, 1.5)))
>>> print numerix.allclose(distanceVariable._cellValueOverFaces, answer)
True
"""
M = self.mesh._maxFacesPerCell
N = self.mesh.numberOfCells
return numerix.reshape(numerix.repeat(numerix.array(self._value)[numerix.newaxis, ...], M, axis=0), (M, N))
@getsetDeprecated
def _getLevelSetNormals(self):
return self._levelSetNormals
@property
def _levelSetNormals(self):
"""
Return the face level set normals.
>>> from fipy.meshes import Grid2D
>>> from fipy.variables.faceVariable import FaceVariable
>>> mesh = Grid2D(dx = .5, dy = .5, nx = 2, ny = 2)
>>> distanceVariable = DistanceVariable(mesh = mesh,
... value = (-0.5, 0.5, 0.5, 1.5))
>>> v = 1 / numerix.sqrt(2)
>>> answer = FaceVariable(mesh=mesh,
... value=((0, 0, v, v, 0, 0, 0, v, 0, 0, v, 0),
... (0, 0, v, v, 0, 0, 0, v, 0, 0, v, 0)))
>>> print numerix.allclose(distanceVariable._levelSetNormals, answer)
True
"""
faceGrad = self.grad.arithmeticFaceValue
faceGradMag = numerix.array(faceGrad.mag)
faceGradMag = numerix.where(faceGradMag > 1e-10,
faceGradMag,
1e-10)
faceGrad = numerix.array(faceGrad)
## set faceGrad zero on exteriorFaces
exteriorFaces = self.mesh.exteriorFaces
if len(exteriorFaces.value) > 0:
faceGrad[..., exteriorFaces.value] = 0.
return faceGrad / faceGradMag
def _test():
import fipy.tests.doctestPlus
return fipy.tests.doctestPlus.testmod()
if __name__ == "__main__":
_test()
| [
"wangsen992@gmail.com"
] | wangsen992@gmail.com |
10c09cc8d92c76de700e05fe1be530bbb786fb3b | 9b5b97b6a5278b6fa5998deb2abdaa9254772982 | /dcc/__init__.py | c607c1950e0b91e9b6d3ad5fd4b31578e580c03c | [] | no_license | majkee15/Dynamic-Credit-Collections | add4b8208e0bfa77f986e8b9d36b21c85cd9fbd8 | aa3af53f09da34c60578750bd0a2702a5d60766d | refs/heads/first_feature | 2023-07-31T18:45:02.758024 | 2020-03-20T10:55:51 | 2020-03-20T10:55:51 | 248,341,936 | 1 | 0 | null | 2023-07-06T21:32:35 | 2020-03-18T21:00:44 | Python | UTF-8 | Python | false | false | 177 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__version__ = "0.0.1"
__author__ = "Michael Mark"
from dcc.aav import AAV
from dcc.oav import OAV
from dcc.oav import Parameters
| [
"92mark.michael@gmail.com"
] | 92mark.michael@gmail.com |
9d93162fa5e6cbd885e8b781ec062cc14951d04e | 595c2c372a4dad5429eca7e1383fe5eeae53b8fa | /src/restoran/migrations/0002_auto_20151203_1858.py | e5c6daeb980ed412882a0ea688b82a71f56db6f9 | [] | no_license | micic1marko/ISAProjekat | 89496a3ed316488fb1b64b2831f4e775dd3620fd | 9da1a90900f993f7ce45170ec6ca5ade97532ce4 | refs/heads/master | 2021-01-10T06:37:56.806232 | 2016-01-20T17:12:29 | 2016-01-20T17:12:29 | 50,045,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 993 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-03 17:58
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('restoran', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='pozivnica',
name='korisnik',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pozvani', to='restoran.Korisnik'),
),
migrations.AlterField(
model_name='rezervacija',
name='korisnik',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='korisnik', to='restoran.Korisnik'),
),
migrations.AlterField(
model_name='rezervacija',
name='zvanice',
field=models.ManyToManyField(related_name='zvanice', to='restoran.Korisnik'),
),
]
| [
"chakmi1@gmail.com"
] | chakmi1@gmail.com |
c9bb6bbe1d55ee3cb293d76cf55d2a8caf940bc9 | 5ae710faf55925b64648ff45c804f9b92713a3ce | /common/basics_setting.py | cdc69f9ab77a7f41fab96b7fd28696f1007106f2 | [] | no_license | baobaobaozhu135/webupdate | b731a7376cf6f934f2131033a06fcd2e818e821b | 58e473f0c1d39374c2ca5e831263cb3f8f0a47f0 | refs/heads/master | 2020-04-17T03:20:31.726507 | 2019-01-17T07:07:36 | 2019-01-17T07:07:36 | 166,177,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24 | py | # _*_encoding:utf-8_*_
| [
"jiayunxiaoju@sina.com"
] | jiayunxiaoju@sina.com |
899a70babc2e3061c78a33d1cedabf110f87a464 | 70e4234519613fa6d680691ebe99f5174df660d3 | /Fabio02_P01/F2_P1_Q24_EQUACAO2GRAU.py | 8dc2c2e5b201a9ebcce6b09c4d47f6e60f869cba | [] | no_license | erickcarvalho1/ifpi-ads-algoritmos2020 | 061a39454f487adb9be82879b9270379486498ba | 26258c1457fe53a086e206c129e5bcd93edf1113 | refs/heads/master | 2021-04-15T02:02:16.074706 | 2020-03-22T22:48:16 | 2020-03-22T22:48:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | #Leia os coeficientes (A, B e C) de uma equações de 2° grau e escreva suas raízes.
#coeficiente A deve ser diferente de 0 (zero).
a = int(input('Digite o coeficiente A '))
b = int(input('Digite o coeficiente B '))
c = int(input('Digite o coeficiente C '))
delta = (b**2) - 4 * a * c
raiz_1 = (-b + delta**1/2) / 2*a
raiz_2 = (-b - delta**1/2) / 2*a
print(raiz_1 + raiz_2) | [
"noreply@github.com"
] | noreply@github.com |
91e49ed8baadda13eddc5efb65f249231f7cd04f | 73c85588aa13c39dfaf698da0a38ae7f43f5ad33 | /blog/models.py | a222cf1baeb757c2cec7d431d8b34061f4d1d4bd | [] | no_license | hlorofilka/YoutubeTutorial | dfa23d6757533381b64a7c9683ac3b39de7dc1f2 | 93abf1a3f140f0a90ab602be40464e1b23802f44 | refs/heads/master | 2022-05-12T08:14:16.161641 | 2019-07-30T20:59:25 | 2019-07-30T20:59:25 | 195,212,613 | 0 | 0 | null | 2022-04-22T22:05:50 | 2019-07-04T09:34:36 | Python | UTF-8 | Python | false | false | 519 | py | from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.urls import reverse
class Post(models.Model):
title = models.CharField(max_length =100)
content = models.TextField()
date_posted = models.DateTimeField(default = timezone.now)
author = models.ForeignKey(User, on_delete = models.CASCADE)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('post-detail', kwargs = {'pk': self.pk})
| [
"hlorofilk@mail.ru"
] | hlorofilk@mail.ru |
d9c99610497792b9b638da1136aec4848607694b | d9a8510e2adb2bae13114d3367f6a2e29ff1c1fb | /FeatureSelection.py | 5783fcda2dace225596a0db1080083a602788c00 | [] | no_license | shayantanikar/Team_Xplore_fake_news_classifier | da16789fdcb2112f12593e871155b57e38c26b3c | 0d7f246ed634a24476ed4f72578ffba195fb8d9d | refs/heads/main | 2023-04-05T04:25:32.469316 | 2021-04-11T07:03:28 | 2021-04-11T07:03:28 | 356,785,499 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,091 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 9 14:13:38 2021
@author: Shayantani Kar
Note: before we can train an algorithm to classify fake news labels, we need to extract features from it. It means reducing the mass
of unstructured data into some uniform set of attributes that an algorithm can understand. For fake news detection, it could be
word counts (bag of words).
"""
import DataPrep
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline
import nltk
import nltk.corpus
from nltk.tokenize import word_tokenize
#we will start with simple bag of words technique
#creating feature vector - document term matrix
countV = CountVectorizer()
train_count = countV.fit_transform(DataPrep.train_news['Statement'].values)
print(countV)
print(train_count)
#print training doc term matrix
#we have matrix of size of (10240, 12196) by calling below
def get_countVectorizer_stats():
#vocab size
train_count.shape
#check vocabulary using below command
print(countV.vocabulary_)
#get feature names
print(countV.get_feature_names()[:25])
#create tf-df frequency features
#tf-idf
tfidfV = TfidfTransformer()
train_tfidf = tfidfV.fit_transform(train_count)
def get_tfidf_stats():
train_tfidf.shape
#get train data feature names
print(train_tfidf.A[:10])
#bag of words - with n-grams
#countV_ngram = CountVectorizer(ngram_range=(1,3),stop_words='english')
#tfidf_ngram = TfidfTransformer(use_idf=True,smooth_idf=True)
tfidf_ngram = TfidfVectorizer(stop_words='english',ngram_range=(1,4),use_idf=True,smooth_idf=True)
#POS Tagging
tagged_sentences = nltk.corpus.treebank.tagged_sents()
cutoff = int(.75 * len(tagged_sentences))
training_sentences = DataPrep.train_news['Statement']
print(training_sentences)
#training POS tagger based on words
def features(sentence, index):
""" sentence: [w1, w2, ...], index: the index of the word """
return {
'word': sentence[index],
'is_first': index == 0,
'is_last': index == len(sentence) - 1,
'is_capitalized': sentence[index][0].upper() == sentence[index][0],
'is_all_caps': sentence[index].upper() == sentence[index],
'is_all_lower': sentence[index].lower() == sentence[index],
'prefix-1': sentence[index][0],
'prefix-2': sentence[index][:2],
'prefix-3': sentence[index][:3],
'suffix-1': sentence[index][-1],
'suffix-2': sentence[index][-2:],
'suffix-3': sentence[index][-3:],
'prev_word': '' if index == 0 else sentence[index - 1],
'next_word': '' if index == len(sentence) - 1 else sentence[index + 1],
'has_hyphen': '-' in sentence[index],
'is_numeric': sentence[index].isdigit(),
'capitals_inside': sentence[index][1:].lower() != sentence[index][1:]
}
#helper function to strip tags from tagged corpus
def untag(tagged_sentence):
return [w for w, t in tagged_sentence]
#Using Word2Vec
with open("glove.6B.50d.txt", "rb") as lines:
w2v = {line.split()[0]: np.array(map(float, line.split()[1:]))
for line in lines}
#model = gensim.models.Word2Vec(X, size=100) # x be tokenized text
#w2v = dict(zip(model.wv.index2word, model.wv.syn0))
class MeanEmbeddingVectorizer(object):
def __init__(self, word2vec):
self.word2vec = word2vec
# if a text is empty we should return a vector of zeros
# with the same dimensionality as all the other vectors
self.dim = len(word2vec.itervalues().next())
def fit(self, X, y):
return self
def transform(self, X):
return np.array([
np.mean([self.word2vec[w] for w in words if w in self.word2vec]
or [np.zeros(self.dim)], axis=0)
for words in X
])
"""
class TfidfEmbeddingVectorizer(object):
def __init__(self, word2vec):
self.word2vec = word2vec
self.word2weight = None
self.dim = len(word2vec.itervalues().next())
def fit(self, X, y):
tfidf = TfidfVectorizer(analyzer=lambda x: x)
tfidf.fit(X)
# if a word was never seen - it must be at least as infrequent
# as any of the known words - so the default idf is the max of
# known idf's
max_idf = max(tfidf.idf_)
self.word2weight = defaultdict(
lambda: max_idf,
[(w, tfidf.idf_[i]) for w, i in tfidf.vocabulary_.items()])
return self
def transform(self, X):
return np.array([
np.mean([self.word2vec[w] * self.word2weight[w]
for w in words if w in self.word2vec] or
[np.zeros(self.dim)], axis=0)
for words in X
])
"""
| [
"noreply@github.com"
] | noreply@github.com |
ce72b7bb239177efb435d6cc7d06c93e1377518a | 3fa8eead6e001c4d5a6dc5b1fd4c7b01d7693292 | /ros _navigation_in_5_days/src/initialize_particles/scripts/init_particles_caller.py | e82c4f46632c1080b34ac406afdbf5a7b7ed4ca5 | [] | no_license | MarzanShuvo/Ros_from_the_construct | 09261902841cdd832672658947790ec5fbba4cd3 | 4798234284d9d0bab3751e9d8ac2df95ae34a5bf | refs/heads/master | 2023-08-24T17:28:09.182113 | 2021-10-23T07:57:02 | 2021-10-23T07:57:02 | 339,105,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | #! /usr/bin/env python
import rospy
from std_srvs.srv import Empty, EmptyRequest
import sys
rospy.init_node('service_client')
rospy.wait_for_service('/global_localization')
disperse_particles_service = rospy.ServiceProxy('/global_localization', Empty)
msg = EmptyRequest()
result = disperse_particles_service(msg)
print(result) | [
"marzanalam3@gmail.com"
] | marzanalam3@gmail.com |
3b431301daa97683f32e90c2dbabcb8bda107f42 | 6babbfa6c4225d358236a0a285675311d132be54 | /Flask/MyApp/app/__init__.py | 88b19c8af31e8bdec441221d858887417f97548d | [] | no_license | NurFaizin/SDP | 6bc9610511b2d3ce17de76eddc5c7a3d2bb2b271 | 79fcb32fde22e29bde4790e2ae556b17ef4167ad | refs/heads/master | 2021-01-19T10:38:47.077038 | 2015-06-27T02:57:32 | 2015-06-27T02:57:32 | 33,845,719 | 0 | 1 | null | 2015-04-14T20:51:42 | 2015-04-13T03:25:09 | Shell | UTF-8 | Python | false | false | 1,460 | py | import os
import sys
from flask import Flask, render_template
from flask.ext.sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config.from_object('config')
db = SQLAlchemy(app)
########################
# Configure Secret Key #
########################
def install_secret_key(app, filename='secret_key'):
"""Configure the SECRET_KEY from a file
in the instance directory.
If the file does not exist, print instructions
to create it from a shell with a random key,
then exit.
"""
filename = os.path.join(app.instance_path, filename)
try:
app.config['SECRET_KEY'] = open(filename, 'rb').read()
except IOError:
print('Error: No secret key. Create it with:')
full_path = os.path.dirname(filename)
if not os.path.isdir(full_path):
print('mkdir -p {filename}'.format(filename=full_path))
print('head -c 24 /dev/urandom > {filename}'.format(filename=filename))
sys.exit(1)
if not app.config['DEBUG']:
install_secret_key(app)
@app.errorhandler(404)
def not_found(error):
return render_template('404.html'), 404
from app.users.controllers import mod as usersModule
app.register_blueprint(usersModule)
# Later on you'll import the other blueprints the same way:
#from app.comments.controllers import mod as commentsModule
#from app.posts.controllers import mod as postsModule
#app.register_blueprint(commentsModule)
#app.register_blueprint(postsModule) | [
"nur.faizin91@gmail.com"
] | nur.faizin91@gmail.com |
52dc39a488a2ce7850783f9f271c8550f4b50866 | fa83f4972b02537046174b7f7b9baa43e8a6bb71 | /household/asgi.py | fb139b8d0598b1d0f738df36d91ee1c4c8c33bcf | [] | no_license | arsen-movsesyan/household-api | fe0e1ab7b00ddec70737a625b0b099d59362759c | 3d74635465c2f44b077b18ae8c20d3a4d9c7216c | refs/heads/master | 2023-04-24T20:02:27.356126 | 2021-05-11T16:34:10 | 2021-05-11T16:34:10 | 363,224,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
ASGI config for household project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'household.settings')
application = get_asgi_application()
| [
"arsen.movsesyan@gmail.com"
] | arsen.movsesyan@gmail.com |
9d69ab143b4f3cc545752c84e558324779dd61ee | 2a140fcf596163995b7d3099b9aaf4a73cb1b292 | /Lab2_DatabaseCreation/CARS-build-cars-data.py | 4aa216c4053c1083fc47dd10828ee725e8225719 | [] | no_license | mmurdock95/csc365-Database-Systems | e528dcebefab947f4deb96edf25b8c5b9527a19d | ab4cf7a239f0dac829bd388cb4cddbf62861ef73 | refs/heads/master | 2021-05-04T21:06:45.303722 | 2018-02-01T20:18:59 | 2018-02-01T20:18:59 | 119,885,404 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 806 | py | #Michael Murdock
#rmurdock@calpoly.edu
#CSC365 EBuckalew
#Lab 2
import csv
#read in the csv file and seperate the columns
fileName = "cars-data"
file = open(fileName + ".csv", 'r')
fileOut = open(fileName + ".txt", 'w')
f1 = "id"
f2 = "mpg"
f3 = "cylinders"
f4 = "eDispl"
f5 = "horsepower"
f6 = "weight"
f7 = "accelerate"
f8 = "year"
reader = csv.DictReader(file, skipinitialspace=True, fieldnames = (f1, f2, f3, f4, f5, f6, f7, f8))
i = 0
for row in reader:
if i != 0:
fileOut.write("INSERT INTO " + "carsData" + "(" + f2 + "," + f3 + "," + f4 + "," + f5 + "," + f6 +
"," + f7 + "," + f8 + ") VALUES ("
+ row[f2] + "," + row[f3] + "," + row[f4] + "," + row[f5] + "," + row[f6] + "," + row[f7] +
"," + row[f8] + ");" + '\n')
i += 1
fileOut.close()
file.close() | [
"mmurdock95@gmail.com"
] | mmurdock95@gmail.com |
0962bef45d9ca756726838812c5b8a2ba32a87c2 | 1e1ae5560e41ba63c30f6763a80f71fb4360c091 | /4_실시간_환경이슈_분석_웹App/ME_KF_RTA.py | 1838bd4726802159746e1f68a5a066a35bd25264 | [] | no_license | keibigdata/dyjin_2019 | 268d75a19bd8265ebed708d0b1ca9b89d6e85d8d | d295bece5717541d5e64cffd88dd9278c6efb3cb | refs/heads/master | 2020-11-29T19:08:51.076019 | 2019-12-26T05:08:41 | 2019-12-26T05:08:41 | 164,393,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,302 | py |
# coding: utf-8
# ## 네이버 환경뉴스 실시간 분석
# ### 라이브러리 import
# In[96]:
import re
import gensim
import mglearn
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
from konlpy.utils import pprint
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.decomposition import LatentDirichletAllocation
from ckonlpy.tag import Twitter
import pyLDAvis.gensim
import pyLDAvis.sklearn
import collections
from gensim.models.word2vec import Word2Vec
import copy
import soynlp
from soynlp.utils import DoublespaceLineCorpus
from soynlp.noun import LRNounExtractor_v2
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt2
import matplotlib
import datetime
from textrankr import TextRank
pd.options.display.float_format = '{:.3f}'.format
plt.rc('font', family='NanumBarunGothicOTF')
# ## 인덱스 파일 로드
# In[92]:
dir_path = '/media/Data/Naver_News/result/'
idx_result_df = pd.read_csv(dir_path + 'indexing.txt',encoding="UTF8",header=None)
idx_result_df.columns = ['date','title','press','content_url_list','file_list']
file_list = idx_result_df['file_list']
file_list = file_list.replace("./result",dir_path,regex=True)
idx_result_df['file_list'] = file_list
idx_result_df = idx_result_df.sort_values('date')
idx_result_df = idx_result_df.drop_duplicates(subset=['date','title'],keep="last")
idx_result_df = idx_result_df.reset_index(drop=True)
# ## 형태소 분석기 지정
# In[97]:
# Add Words
# Stopwords
s_words = pd.read_csv("/media/Realtime_Analysis/stop_words.txt",header=None)
s_words = s_words[0].tolist()
r_words = pd.read_csv("/media/Realtime_Analysis/region_words.txt",header=None)
r_words = r_words[0].tolist()
s_words = list(set(s_words + r_words))
e_words = pd.read_csv("/media/Realtime_Analysis/e_words.txt",header=None)
e_words = e_words[0].tolist()
twitter = Twitter()
twitter.add_dictionary(e_words,'Noun')
# ## 실시간 키워드 빈도수 분석
# In[ ]:
import datetime
# 일주일 단위로 계산
periods = [7,14,30]
tr_cnt = 10
dd = 3
num = 1000
for period in periods:
for d in range(1,dd):
print(d)
now = datetime.datetime.now().date() - datetime.timedelta(d)
start_date = str(now - datetime.timedelta(period-1))
path = "/media/Data/Realtime_Analysis/Keyword_Frequency/"
# 날짜 필터링
dt_index = pd.date_range(start=start_date, end = now)
dt_list = dt_index.strftime("%Y-%m-%d").tolist()
idx = list(map(lambda x: x in dt_list, idx_result_df['date'].tolist()))
filtered_idx_result_df = idx_result_df.iloc[idx]
filtered_idx_result_df = filtered_idx_result_df.reset_index(drop=True)
# 옵션 설정
corpus = []
is_tfidf = True
fn = str(now) + "_" + str(period) + "_" + "NN" + "_"
file_list = filtered_idx_result_df['file_list'].tolist()
for i in range(len(file_list)):
#print(i)
f = open(file_list[i], 'r',encoding='utf-8',errors="ignore")
doc = f.read()
corpus.append(" ".join(twitter.nouns(doc)))
# Stop words
stop_words = s_words
vect = CountVectorizer(stop_words = stop_words,ngram_range=(1, 2),min_df=0.001,max_df=0.5)
X = vect.fit_transform(corpus)
X = TfidfTransformer().fit_transform(X)
fn = fn + "norm_TF.txt"
count = X.toarray().sum(axis=0)
idx = np.argsort(-count)
count = count[idx]
feature_name = np.array(vect.get_feature_names())[idx]
keyword_list = list(zip(feature_name[:num], count[:num]))
result = pd.DataFrame(keyword_list)
result.to_csv(path + fn,header=None)
# 키워드 빈도수 분석 (TF)
fn = str(now) + "_" + str(period) + "_" + "NN" + "_"
fn = fn + "TF.txt"
vect = CountVectorizer(stop_words = stop_words,ngram_range=(1, 2),min_df=0.001,max_df=0.5)
X = vect.fit_transform(corpus)
count = X.toarray().sum(axis=0)
idx = np.argsort(-count)
count = count[idx]
feature_name = np.array(vect.get_feature_names())[idx]
keyword_list = list(zip(feature_name[:num], count[:num]))
result = pd.DataFrame(keyword_list)
result.to_csv(path + fn,header=None)
# 문서요약
path = "/media/Data/Realtime_Analysis/Doc_Summary/"
t_list = []
m_list = []
s_list = list(filtered_idx_result_df['title'])
for s in s_list:
t_list.append(s)
t_list = list(set(t_list))
p_sentences = ". ".join(t_list)
textrank = TextRank(p_sentences)
t = textrank.summarize(count=tr_cnt)
t = t.split("\n")
t = pd.DataFrame(t)
t.columns=['Sentences']
fn = str(now) + "_" + str(period) + "_" + "NN_" + str(tr_cnt) + "_TR_Summary" + ".txt"
t.to_csv(path + fn,header=None)
# 인덱스 저장
path = "/media/Data/Realtime_Analysis/Indexing/"
fn = str(now) + "_" + str(period) + "_" + "NN_" + "idx" + ".txt"
filtered_idx_result_df.to_csv(path + fn,header=None)
| [
"noreply@github.com"
] | noreply@github.com |
e54ec4f0ccafb701f112bd15334220e6981686a3 | 519e0431a0d59db5cac646e5f21dd6c207e8c33c | /1P/meuprojeto/Atividades 1VA/Nova Aula/Lista de Exercicios 1/Testando Atividades/Teste_aula1.py | 80c32c5687ecf1076dfed74e6d085261422ac707 | [] | no_license | vinny0965/phyton | 1c79cc394b72202e53117502c3c58054a68541b5 | 5ba3fe7875ba30d052e357426c8869d3d9a6dbf5 | refs/heads/master | 2020-07-28T08:54:50.828084 | 2019-10-22T23:29:45 | 2019-10-22T23:29:45 | 209,371,958 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 780 | py | """nome=input("Qual Seu Nome")
print("Olá",nome,"Seja Bem Vindo")
"""
"""dia=input("Qual dia de Seu Nascimento?")
mês=input("Qual mês de Seu Nascimento?")
ano=input("Qual Ano do Seu Nascimento?")
print("Voce Nasceu em", dia , "de", mês, "de", ano)
"""
"""n=int(input("Um numero"))
s=n + 1
p=n-1
print(s,p)
"""
"""n1=int(input("Um Numero:"))
n2= n1*2
n3= n1*3
n4=n1**(1/2)
print(n2,n3,n4)
"""
"""n1=int(input("Um numero"))
n2=int(input("Outro Numro"))
n3= n1+n2
print(n3/2)
"""
"""n1=int(input("Digite um Numero:"))
n2= n1 * 0.05
print(n1 - n2)
"""
"""n1=int(input("Qual Seu Salário:"))
n2= n1 * 0.15
print("Seu Saário é:",n1)
print("Seu Novo Salário é:",n1+n2)
"""
"""import math
num=int(input("Digite Um Numero"))
raiz=math.sqrt(num)
print("A raiz é igual a",raiz)
"""
| [
"carvalho.vinicius163@gmail.com"
] | carvalho.vinicius163@gmail.com |
db5c50f07d52f16dd6cdd4c8ce821122231ba200 | 015011f99ac6315906871cd7f7f170550c16591c | /figpay-backend/figpay_backend/urls.py | e3ac9410573a3480aea00d873505673a12b0dab7 | [] | no_license | bochackathon-fintech/Figpay | dd6be1dc68e8a563bde060d6ac8df1d35028d2dc | 001e40781d9514953b29c81c051d577071c8438c | refs/heads/master | 2022-12-10T18:59:56.114010 | 2017-06-11T13:25:41 | 2017-06-11T13:25:41 | 93,884,731 | 0 | 2 | null | 2022-12-07T01:14:19 | 2017-06-09T18:07:08 | CSS | UTF-8 | Python | false | false | 2,376 | py | """figpay_backend URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework.authtoken import views
from django.conf import settings
from django.conf.urls.static import static
# from drf_auto_endpoint.router import router
from rest_framework.routers import DefaultRouter
from facing.views import UploadList
from payments.endpoints import ConsumerPaymentEndpoint, VendorPaymentEndpoint
from payments.views import ConsumerPaymentViewset, VendorPaymentViewset
from signup.views import signup, SignupSuccess
from facing.views import chatbot_connect
router = DefaultRouter()
# router.register(endpoint=ConsumerPaymentEndpoint, url='payments/consumers')
# router.register(endpoint=VendorPaymentEndpoint, url='payments/vendors')
router.register("consumer/payments", ConsumerPaymentViewset, base_name='consumer_payments')
router.register("vendor/payments", VendorPaymentViewset, base_name='vendor_payments')
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api/token-auth/', views.obtain_auth_token),
url(r'^api/', include(router.urls)),
url(r'^api/recognize', UploadList.as_view()),
url(r'^chatbot/connect', chatbot_connect),
url(r'^signup/success$', SignupSuccess.as_view(), name='signup-success'),
url(r'^$', signup, name='signup'),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL,
document_root=settings.STATIC_ROOT) + static(settings.MEDIA_URL_LOCAL,
document_root=settings.MEDIA_ROOT)
admin.site.site_header = 'FigPay Backend'
import kairos_face
kairos_face.settings.app_id = settings.KAIROS_APP_ID
kairos_face.settings.app_key = settings.KAIROS_APP_KEY
| [
"kyriakos@pixelactions.com"
] | kyriakos@pixelactions.com |
6766ef7f01a731433633f6e43353c5a7ea766ba7 | 09795761625073e46aeffe8b7344136c2c4486ca | /lop.py | 98d62653a844fc46e4cdd03223a0a6668bf218dc | [] | no_license | lolwentx/klllol | b7665eee3ba22ecaa757918f95b15ae58051b7df | ccd47a16b7d5bb4a6516fd04d2ffe065622a9396 | refs/heads/main | 2023-06-21T17:06:16.231733 | 2021-08-05T11:30:05 | 2021-08-05T11:30:05 | 393,016,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,599 | py | name = input('привет, как вас зовут? ')
import sys
print ('ясно ' + name)
print ('очень приятно')
вуфлч = input('как дела? ' + name )
n = 'ясно' if name != ("отлично") else 'ооо у меня тоже!'
print (n)
import time
time.sleep(3)
print ('хы')
import time
time.sleep(2)
print ('нууууууууу')
print("(1)Общение")
print("(2)Выход")
print("Введите цифры(1,2,):")
p = str(input());
if p == "1" :
print("Окей, начали:")
elif p == "2" :
input("Нажмите Enter")
else:
print ("Выход...")
print("Выбирайте ответы так либо 1 либо 2")
import time
time.sleep(3)
print("а что бы ты сьел?")
print (' 5-спагетти, 7-роллы, 6-бургер ')
g = str(input());
if g == "5" :
print("oooo я бы тоже хотел о мне пора")
sys.exit
elif g == '2' :
print ('ok')
elif g == '1' :
print ('ok')
elif g == "7" :
input("мммммммммммм ага ага но мне пора")
sys.exit
elif g == '6' :
input('мне пора')
sys.exit
else:
print ('такого нету в списке мне пора')
sys.exit
import time
time.sleep(2)
print (' ты можешь рассказать мне какую-нибудь историю?')
import time
time.sleep(2)
print ('желательно смешную.....')
story = input('рассказывай: ')
a = len(story)
print(a)
import time
time.sleep(3)
print ('ты мне столько бесполезных букв написал(а)')
import time
time.sleep(2)
print ('мдааа ужж')
print ('11010000 10111110 11010001 10000010 11010000 10110010 11010000 10110101 11010001 10000010 111010 1000111 1000111 110101')
print ('пора завязывать с этим пока ' +name)
l = str(input());
if l == "GGG5" :
print ('хм')
else:
sys.exit
print ('ладно не пора')
import time
time.sleep(1)
print ('сыграем?')
print ('правила простые отгадай число')
import random
num = random.randint(1,10)
guess = int(input('Вводи от 1 до 10 '))
if guess == num :
print ('ладно повезло')
print ('тебе повезло......')
elif guess > 10:
print('мне пора')
sys.exit
elif guess < 1:
print('мне пора')
sys.exit
else:
print('не правильно мне пора было загадано ', num)
| [
"noreply@github.com"
] | noreply@github.com |
0f82bdd29163e123703bec8c3b5a6232a6cedbb0 | 686a2f942e466a9bf7b1c7c03d29dbb303643a20 | /reference/tf2.py | 9e032ae13270e69d371259be10faba7e9742858e | [] | no_license | shaxinlei/TrafficSignRecognition | c7c3679181c7f12d53ed470c09805b5b59462525 | c335dc413702cd007c1eed60296b0cacc0ff27e2 | refs/heads/master | 2021-10-09T03:16:45.589457 | 2018-12-20T14:07:20 | 2018-12-20T14:07:20 | 88,011,397 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | import tensorflow as tf
matrix1 = tf.constant([[3,3]])
matrix2 = tf.constant([[2],
[2]])
product = tf.matmul(matrix1,matrix2) #matrix multiply np.dot(m1,m2)
#method1
# sess = tf.Session()
# result = sess.run(product)
# print(result)
# sess.close()
#method2
with tf.Session() as sess:
result2 = sess.run(product)
print(result2)
| [
"shaxinlei@gmail.com"
] | shaxinlei@gmail.com |
0cd829b2989f164150f7deb6916ef2a5ae4c33ba | ec20b8f649fdc54199dc3205461e847f6e07b753 | /Exercise2_5.py | 7dd2c392e66f7f397e235524d91bd220d610af52 | [] | no_license | nibsdey/PythonToy1 | 69e4b1a306e6949d62e168a898c652eec7126913 | 1a42c823c0e10cda78740dcf512446871f7cd144 | refs/heads/master | 2021-01-23T16:04:48.255533 | 2017-06-10T22:38:45 | 2017-06-10T22:38:45 | 93,281,446 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | celsius=input("Enter temperature in celcius: ")
print("Temperature in Farenhite: ")
print(float(celsius)*32) | [
"nibs.dey@sas.com"
] | nibs.dey@sas.com |
77d284e2b345dc9df82af95355126cbf386ca2fd | a74a0317d8b8e1cf5135cbd0821617f70c8879ca | /old/python_resume/file.py | a0b117dbafe07691e133591f387574ae6e1beeb9 | [] | no_license | chuck1/python-resume | cbd3c0eb2fe3d0894b3809a2ac1526d171d6afc2 | 5b83fa831525faba17f72173cfff9c2155bd21fc | refs/heads/master | 2021-01-10T19:14:08.036676 | 2017-01-04T01:23:03 | 2017-01-04T01:23:03 | 42,127,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,042 | py | import os
import json
class Manager(object):
def __init__(self, root):
self.root = root
def get_path(self, filename):
return os.path.join(self.root, filename)
def read_text(self, filename):
path = self.get_path(filename)
with open(path, 'r') as f:
text = f.read()
return text
def write_text(self, filename, text):
path = self.get_path(filename)
#try:
#os.makedirs(os.path.dirname(path))
#except:
# pass
#fd = os.open(path, os.O_WRONLY, 0666)
#os.fchmod(fd,0666)
#os.close(fd)
with open(path, 'w') as f:
f.write(text)
def read_json(self, filename):
try:
text = self.read_text(filename)
except:
text = "{}"
j = json.loads(text)
return j
def write_json(self, filename, j):
text = json.dumps(j)
self.write_text(filename, text)
| [
"charlesrymal@gmail.com"
] | charlesrymal@gmail.com |
74989ae2cd432e488d25d367702718af2d75802f | 897554d1ffe398f701df82cdf390bf185f97bf20 | /django-stubs/core/management/commands/squashmigrations.pyi | 27a81d61e9c264f3a40349a263d3523ab8ee5bb0 | [
"MIT"
] | permissive | maximmasiutin/django-stubs | c0a7bac533b03c81704755c5e4a8eff9a178010d | 55366fbf087522f860aa242c200f87b36c6c24a7 | refs/heads/master | 2023-04-15T09:47:51.980794 | 2021-04-20T12:18:22 | 2021-04-20T16:07:45 | 359,802,871 | 1 | 0 | MIT | 2021-04-20T12:12:03 | 2021-04-20T12:12:03 | null | UTF-8 | Python | false | false | 940 | pyi | from django.apps import apps as apps
from django.conf import settings as settings
from django.core.management.base import BaseCommand as BaseCommand, CommandError as CommandError
from django.db import DEFAULT_DB_ALIAS as DEFAULT_DB_ALIAS, connections as connections, migrations as migrations
from django.db.migrations.migration import Migration
from django.db.migrations.loader import AmbiguityError as AmbiguityError, MigrationLoader as MigrationLoader
from django.db.migrations.migration import SwappableTuple as SwappableTuple
from django.db.migrations.optimizer import MigrationOptimizer as MigrationOptimizer
from django.db.migrations.writer import MigrationWriter as MigrationWriter
from django.utils.version import get_docs_version as get_docs_version
class Command(BaseCommand):
verbosity: int = ...
interactive: bool = ...
def find_migration(self, loader: MigrationLoader, app_label: str, name: str) -> Migration: ...
| [
"noreply@github.com"
] | noreply@github.com |
b0869b0623a35197e22092fc0de9bf69224da267 | 52ef9a347e9fd725ee9a99a389bfde83e057e5c8 | /2020/d06.py | 86a77b213b0464465aa85563ec2bd554dc9e162f | [] | no_license | slapersonne/adventOfCode | 0fd2ba8e7a2567f648e7fae77074913bfe6c1436 | 963b5916db42b2b99a3972fb2b56163e5820444d | refs/heads/master | 2023-02-02T22:21:50.334972 | 2020-12-21T19:43:24 | 2020-12-21T19:43:24 | 317,586,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 734 | py | import functools
def parse_forms(lines, join_operation):
split_indexes = [i for i, val in enumerate(lines) if val == ""]
total_count = sum([
len(grouped_form) for grouped_form in
(functools.reduce(join_operation, form) for form in
(map(lambda str_form: {char for char in str_form}, str_forms_group) for str_forms_group in
(lines[i+1:j] for i, j in zip([-1] + split_indexes, split_indexes + [len(lines)]))))
])
return total_count
def run_06():
with open("inputs/d06.txt") as f:
lines = [line.strip() for line in f.readlines()]
count_1 = parse_forms(lines, set.union)
print(count_1)
count_2 = parse_forms(lines, set.intersection)
print(count_2)
return
| [
"simon.lapersonne@gmail.com"
] | simon.lapersonne@gmail.com |
af7e81d7814d9de33c0ee5c697544569178afbd6 | 58b7edf062ad3c568dc9710dbb0a5ca4c4146278 | /pct/utils/data_reader_test.py | f05aecc6198748864c11a02c4e4cb49b41c19517 | [
"MIT"
] | permissive | murphyyhuang/mini_transformer | f00b4b65a843f3f035a96a50d2ac5dee7a7b0208 | dc6fc270c573e613bc2f5cad62534c6416e7dbd5 | refs/heads/master | 2022-02-23T15:37:11.686110 | 2019-10-24T02:36:46 | 2019-10-24T02:36:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,003 | py | # coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from pct.utils import data_reader
from pct.utils import hparam
import tensorflow as tf
tf.enable_eager_execution()
def create_generator_test():
yaml_dir = os.path.join(os.path.dirname(__file__), '../test_data/hparams.yml')
hparams = hparam.HParams()
hparams.from_yaml(yaml_dir)
data_generator = data_reader.TextDataGenerator('train', hparams)
index = 1
sentence_counter = []
while True:
try:
result = data_generator.get_next()
print('* Round: {}'.format(index))
print('* Input shape: {}'.format(result[0]['inputs'].shape))
print('* Target shape: {}'.format(result[0]['targets'].shape))
sentence_counter.append(result[0]['inputs'].shape[0])
index += 1
except:
break
print('* The total number of sentences: {}'.format(sum(sentence_counter)))
if __name__ == '__main__':
create_generator_test()
| [
"huangyhq@gmail.com"
] | huangyhq@gmail.com |
98fddd7d3b5f78ec7b9a0b3a6e52fca40a3f5363 | 114af993838bc6ef4c8e876a298aa7138493e0d5 | /extra-agencia/ticket/wizard/__init__.py | 6bfc59a69455a48e7b981b183c4065fb383562bd | [] | no_license | xtqgroup/agencias_comercio | f27b0a4d265de1256857680ac3f1d50200d3ba98 | 364ea6ba14ca962c50697e249e781883116eb97d | refs/heads/master | 2020-03-24T08:17:58.651913 | 2018-07-27T21:36:24 | 2018-07-27T21:36:24 | 142,591,377 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 55 | py | # -*- coding: utf-8 -*-
from . import pos_down_ticket
| [
"omarrodrigo.rp@gmail.com"
] | omarrodrigo.rp@gmail.com |
a50e4c86ed9764db44777c7fcb47ec51f6780d04 | 6b2dcf691bc7f019d86270ec0588f5232fc3e2b0 | /inflearn_practice/section7/최대점수 구하기.py | e8be936e34aa64f6cf45a818cae04129b1c64022 | [] | no_license | limgeonho/Algorithm | 02c55fbf5b09b718dbc2aee83a887143d121ddaf | 3d4d1ccd6ee3c52dc36ac3cf5f681690fcfdb6ab | refs/heads/master | 2023-06-01T21:05:00.100998 | 2021-06-21T15:04:26 | 2021-06-21T15:04:26 | 371,552,176 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | #최대점수 구하기
#다음문제를 푸는 지 풀지 않는지에 대한 선택(O, X)문제
def DFS(L, time, sum):
global res
if time > m:
return
if L == n:
if sum > res:
res = sum
else:
DFS(L+1, time + t[L], sum + v[L])
DFS(L+1, time, sum)
n, m = map(int, input().split())
v= list()
t = list()
for _ in range(n):
a, b = map(int, input().split())
v.append(a)
t.append(b)
res = -2147000000
DFS(0, 0, 0)
print(res) | [
"ghlim909@gmail.com"
] | ghlim909@gmail.com |
2c602f15eb747e68cdcf7c1763bf993220d7650d | c9ed63e95644dadf6d5221f8df7c59e6fdf98a6f | /api/serializers.py | 2edf8bb4094c46611283daa82bfbefb336fbe88b | [] | no_license | takasaki376/drf-car-api | 952006f5494baaa22188b3ba5a79ac8291542369 | c359b3a751dd8b2fe4e277add2ab6a517b325108 | refs/heads/master | 2023-06-08T20:57:42.650304 | 2021-07-03T14:26:18 | 2021-07-03T14:26:18 | 382,635,030 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,183 | py | from rest_framework import serializers
from .models import Segment, Brand, Vehicle
from django.contrib.auth.models import User
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ['id', 'username', 'password']
extra_kwargs = {'password': {'write_only': True, 'required': True, 'min_length': 5}}
def create(self, validated_data):
user = User.objects.create_user(**validated_data)
return user
class SegmentSerializer(serializers.ModelSerializer):
class Meta:
model = Segment
fields = ['id', 'segment_name']
class BrandSerializer(serializers.ModelSerializer):
class Meta:
model = Brand
fields = ['id', 'brand_name']
class VehicleSerializer(serializers.ModelSerializer):
segment_name = serializers.ReadOnlyField(source='segment.segment_name', read_only=True)
brand_name = serializers.ReadOnlyField(source='brand.brand_name', read_only=True)
class Meta:
model = Vehicle
fields = ['id', 'vehicle_name', 'release_year', 'price', 'segment', 'brand', 'segment_name', 'brand_name']
extra_kwargs = {'user': {'read_only': True}} | [
"75880059+takasaki376@users.noreply.github.com"
] | 75880059+takasaki376@users.noreply.github.com |
8d5b8f5a59be2454689b9a71c4726da252ece4dd | 985139b2e13a524a5eee9fe98b1b3561f4e842ad | /bitcoin_checker.py | 4a692f625a08d9f8840179dc11c9590cc682acad | [] | no_license | intelpol/Bitcoin_checker | 8e129dcfa216977c27bb9c9b19e0a6b85405471c | 9d7319314e35c3b10de43efd0d291d9e41b06af3 | refs/heads/master | 2021-06-14T19:39:17.121206 | 2017-03-01T20:46:52 | 2017-03-01T20:46:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,972 | py | from block_io import BlockIo
import os
import datetime
import time
import csv
import os.path
import sys
print "********* Bitcoin wallet checker started *********\n\n"
# check to see if config and results .csv files exist. If they don't create them. Also check for api_keys.csv
api_keys = [] # list to hold API key and secret pin
if os.path.isfile("api_keys.csv") == True:
print "[*] api_keys.csv identified as being in path"
with open('api_keys.csv', 'r') as userFile:
userFileReader = csv.reader(userFile)
for row in userFileReader:
api_keys.append(row)
else:
print "[*] api_keys.csv not identified as being in path. Creating new file now..."
try:
with open('api_keys.csv', 'w') as newFile:
newFileWriter = csv.writer(newFile)
newFileWriter.writerow([''])
print "[*] api_keys.csv file creation success. Go and get an API key from block.io. Add the Bitcoin key to A1, and your secret pin to A2 in api_keys.csv"
exit()
except:
if os.path.isfile("api_keys.csv") == True:
pass
else:
print "[*] Error in creating api_keys.csv. Quitting..."
exit()
config_ids = [] # list to hold wallet_ids to search against
if os.path.isfile("config.csv") == True:
print "[*] config.csv identified as being in path"
print "[*] Grabbing config wallet_ids from config.csv"
with open('config.csv', 'r') as userFile:
userFileReader = csv.reader(userFile)
for row in userFileReader:
config_ids.append(row[0])
config_ids.pop(0)
print "[*] config_ids collected from config.csv:", len(config_ids)
if len(config_ids) == 0:
print "[*] Zero bitcoin wallets founds in config.csv. Quitting..."
exit()
pass
else:
print "[*] config.csv not identified as being in path. Creating new file now..."
try:
with open('config.csv', 'w') as newFile:
newFileWriter = csv.writer(newFile)
newFileWriter.writerow(["Wallet ID"])
print "[*] config.csv file creation success"
print "[*] Go to config.csv and add new rows with bitcoin wallet IDs. Then rerun this script"
exit()
except:
if os.path.isfile("config.csv") == True:
pass
else:
print "[*] Error in creating config.csv. Quitting..."
exit()
tx_ids = []
if os.path.isfile("bitcoin.csv") == True:
print "[*] bitcoin.csv identified as being in path"
with open('bitcoin.csv', 'r') as userFile:
userFileReader = csv.reader(userFile)
for row in userFileReader:
tx_ids.append(row[4])
print "[*] TX_IDs already in bitcoin.csv:", len(tx_ids)-1
else:
print "[*] bitcoin.csv not identified as being in path. Creating new file now..."
try:
with open('bitcoin.csv', 'w') as newFile:
newFileWriter = csv.writer(newFile)
newFileWriter.writerow(["Sender", "Confidence", "TX_epoch", "TX_epoch_dtg", "TX_ID", "Amount", "Recipient"])
print "[*] bitcoin.csv file creation success"
except:
if os.path.isfile("bitcoin.csv") == True:
pass
else:
print "[*] Error in creating bitcoin.csv. Quitting..."
exit()
# Set up block_io
version = 2
try:
key1 = str(api_keys[0])[2:]
key1 = key1[:-2]
key2 = str(api_keys[1])[2:]
key2 = key2[:-2]
except:
print "[*] Error with reading API keys from api_keys.csv. Check that the API_key is in A1, and the secret pin is in A2. Quitting..."
exit()
block_io = BlockIo(key1, key2, version)
# set up counters
new_items = 0
old_items = 0
# define function to use block_io API to get transactions from a wallet_id
def check_wallet(wallet_id):
global new_items
global old_items
print "\n[*] Checking:", wallet_id
result = block_io.get_transactions(type='received', addresses=wallet_id)
for item in result['data']['txs']:
sender = item['senders'][0]
confidence = item['confidence']
tx_epoch = item['time']
tx_epoch_dtg = datetime.datetime.fromtimestamp(item['time']).strftime('%Y-%m-%d %H:%M:%S.%f')
tx_id = item['txid']
for item in item['amounts_received']:
amount = item['amount']
recipient = item['recipient']
if any(tx_id in s for s in tx_ids):
print "[*] TX_ID",tx_id ,"already in bitcoin.csv"
old_items += 1
else:
print "\n[*] Item not in bitcoin.csv, adding now..."
print "[*] Sender:", sender
print "[*] Confidence:", confidence
print "[*] EPOCH time:", tx_epoch
print "[*] EPOCH time converted:", tx_epoch_dtg
print "[*] TXID:", tx_id
print "[*] Amount:", amount
print "\n[*] Recipient:", recipient
with open('bitcoin.csv', 'a') as newFile:
newFileWriter = csv.writer(newFile)
newFileWriter.writerow([sender, confidence, tx_epoch, tx_epoch_dtg, tx_id, amount, recipient])
new_items += 1
print "[*] Adding success"
else:
pass
for item in config_ids:
check_wallet(item)
print "[*] Sleeping for 3 seconds for rate limiting"
time.sleep(3)
print "\n[*] New items:", new_items
print "[*] Old items:", old_items
print "[*] Wallets checked:", len(config_ids)
print "\n\n********* Bitcoin wallet checker completed *********"
| [
"noreply@github.com"
] | noreply@github.com |
a28ff0048e62de940dbd8dff215196a4fb481b90 | b0d98c5fd2e1f0cd87d443229e85c548f7c40f2d | /剑指Offer-Python版本/2.替换空格.py | 96f5cb8459dd7f871dd4c715d6ca82c59ff9a34d | [] | no_license | Kiiiiii123/CodeGym | 7c7e623e43e79189f295ba79c0f4fa99bdc46dc1 | fb66d5d6aa707153e78fefd70e2aac6567a899b0 | refs/heads/master | 2020-04-06T21:58:56.531115 | 2019-11-05T09:29:15 | 2019-11-05T09:29:15 | 157,820,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,673 | py | '''
题目:请实现一个函数,将一个字符串中的每个空格替换成“%20”。例如,当字符串为We Are Happy.则经过替换之后的字符串为We%20Are%20Happy。
'''
'''
方法一:先计算最终需要给出的长度,然后建立两个指针p1,p2,p1指向原始字符串的末尾,p2指向替换后的字符串的末尾。同时移动p1,p2, 将p1指的内容逐个复制到p2, 当p1遇到空格时,在p2处插入 %20, p1向前移动一个位置,p2向前移动3个位置,当p1和p2位置重合时,全部替换完成。时间复杂度O(n)
运行时间:24ms
占用内存:5864k
'''
class Solution:
# s 源字符串
def replaceSpace(self, s):
# write code here
if not isinstance(s,str) or len(s)<=0 or s==None:
return ''
spaceNum=0
for i in s:
if i==' ':
spaceNum+=1
newStrLen=len(s)+2*spaceNum
newStr=newStrLen*[None]
indexOfOrigin,indexOfNew=len(s)-1,newStrLen-1
while indexOfOrigin>=0 and indexOfNew>=indexOfOrigin:
if s[indexOfOrigin]==' ':
newStr[indexOfNew-2:indexOfNew+1]=['%','2','0']
indexOfNew-=3
indexOfOrigin-=1
else:
newStr[indexOfNew]=s[indexOfOrigin]
indexOfNew-=1
indexOfOrigin-=1
return ''.join(newStr)
'''
方法二:使用Python函数接口
运行时间:30ms
占用内存:5736k
'''
class Solution:
# s 源字符串
def replaceSpace(self, s):
# write code here
return s.replace(' ','%20')
| [
"noreply@github.com"
] | noreply@github.com |
690bf90029924555962d2aa02c4d1d296434d857 | 4bf53a42b336e67ce75e220dc87f75af9911f036 | /tapiriik/urls.py | 4ced6e6a50e2b58565261db7601a7cafecb0b985 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | patricksan/tapiriik | 5cee925d256f5e2b23397487ef807b5766b710ba | 1628f8759c9e2d0562b92dd25561a347389f6cf3 | refs/heads/master | 2020-12-11T08:07:10.991800 | 2018-02-27T13:14:59 | 2018-02-27T13:14:59 | 38,956,416 | 0 | 0 | Apache-2.0 | 2018-02-27T13:15:00 | 2015-07-12T09:22:01 | Python | UTF-8 | Python | false | false | 7,197 | py | from django.conf.urls import patterns, include, url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.generic import TemplateView
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'tapiriik.web.views.dashboard', name='dashboard'),
url(r'^auth/redirect/(?P<service>[^/]+)$', 'tapiriik.web.views.oauth.authredirect', {}, name='oauth_redirect', ),
url(r'^auth/redirect/(?P<service>[^/]+)/(?P<level>.+)$', 'tapiriik.web.views.oauth.authredirect', {}, name='oauth_redirect', ),
url(r'^auth/return/(?P<service>[^/]+)$', 'tapiriik.web.views.oauth.authreturn', {}, name='oauth_return', ),
url(r'^auth/return/(?P<service>[^/]+)/(?P<level>.+)$', 'tapiriik.web.views.oauth.authreturn', {}, name='oauth_return', ), # django's URL magic couldn't handle the equivalent regex
url(r'^auth/login/(?P<service>.+)$', 'tapiriik.web.views.auth_login', {}, name='auth_simple', ),
url(r'^auth/login-ajax/(?P<service>.+)$', 'tapiriik.web.views.auth_login_ajax', {}, name='auth_simple_ajax', ),
url(r'^auth/persist-ajax/(?P<service>.+)$', 'tapiriik.web.views.auth_persist_extended_auth_ajax', {}, name='auth_persist_extended_auth_ajax', ),
url(r'^auth/disconnect/(?P<service>.+)$', 'tapiriik.web.views.auth_disconnect', {}, name='auth_disconnect', ),
url(r'^auth/disconnect-ajax/(?P<service>.+)$', 'tapiriik.web.views.auth_disconnect_ajax', {}, name='auth_disconnect_ajax', ),
url(r'^auth/logout$', 'tapiriik.web.views.auth_logout', {}, name='auth_logout', ),
url(r'^account/setemail$', 'tapiriik.web.views.account_setemail', {}, name='account_set_email', ),
url(r'^account/settz$', 'tapiriik.web.views.account_settimezone', {}, name='account_set_timezone', ),
url(r'^account/configure$', 'tapiriik.web.views.account_setconfig', {}, name='account_set_config', ),
url(r'^account/rollback/?$', 'tapiriik.web.views.account_rollback_initiate', {}, name='account_rollback_initiate', ),
url(r'^account/rollback/(?P<task_id>.+)$', 'tapiriik.web.views.account_rollback_status', {}, name='account_rollback_status', ),
url(r'^rollback$', 'tapiriik.web.views.rollback_dashboard', {}, name='rollback_dashboard', ),
url(r'^configure/save/(?P<service>.+)?$', 'tapiriik.web.views.config.config_save', {}, name='config_save', ),
url(r'^configure/dropbox$', 'tapiriik.web.views.config.dropbox', {}, name='dropbox_config', ),
url(r'^configure/flow/save/(?P<service>.+)?$', 'tapiriik.web.views.config.config_flow_save', {}, name='config_flow_save', ),
url(r'^settings/?$', 'tapiriik.web.views.settings.settings', {}, name='settings_panel', ),
url(r'^dropbox/browse-ajax/?$', 'tapiriik.web.views.dropbox.browse', {}, name='dropbox_browse_ajax', ),
url(r'^dropbox/browse-ajax/(?P<path>.+)?$', 'tapiriik.web.views.dropbox.browse', {}, name='dropbox_browse_ajax', ),
url(r'^sync/status$', 'tapiriik.web.views.sync_status', {}, name='sync_status'),
url(r'^sync/activity$', 'tapiriik.web.views.sync_recent_activity', {}, name='sync_recent_activity'),
url(r'^sync/schedule/now$', 'tapiriik.web.views.sync_schedule_immediate', {}, name='sync_schedule_immediate'),
url(r'^sync/errors/(?P<service>[^/]+)/clear/(?P<group>.+)$', 'tapiriik.web.views.sync_clear_errorgroup', {}, name='sync_clear_errorgroup'),
url(r'^activities$', 'tapiriik.web.views.activities_dashboard', {}, name='activities_dashboard'),
url(r'^activities/fetch$', 'tapiriik.web.views.activities_fetch_json', {}, name='activities_fetch_json'),
url(r'^sync/remote_callback/trigger_partial_sync/(?P<service>.+)$', 'tapiriik.web.views.sync_trigger_partial_sync_callback', {}, name='sync_trigger_partial_sync_callback'),
url(r'^diagnostics/$', 'tapiriik.web.views.diag_dashboard', {}, name='diagnostics_dashboard'),
url(r'^diagnostics/queue$', 'tapiriik.web.views.diag_queue_dashboard', {}, name='diagnostics_queue_dashboard'),
url(r'^diagnostics/errors$', 'tapiriik.web.views.diag_errors', {}, name='diagnostics_errors'),
url(r'^diagnostics/error/(?P<error>.+)$', 'tapiriik.web.views.diag_error', {}, name='diagnostics_error'),
url(r'^diagnostics/graphs$', 'tapiriik.web.views.diag_graphs', {}, name='diagnostics_graphs'),
url(r'^diagnostics/user/unsu$', 'tapiriik.web.views.diag_unsu', {}, name='diagnostics_unsu'),
url(r'^diagnostics/user/(?P<user>.+)$', 'tapiriik.web.views.diag_user', {}, name='diagnostics_user'),
url(r'^diagnostics/payments/$', 'tapiriik.web.views.diag_payments', {}, name='diagnostics_payments'),
url(r'^diagnostics/ip$', 'tapiriik.web.views.diag_ip', {}, name='diagnostics_ip'),
url(r'^diagnostics/login$', 'tapiriik.web.views.diag_login', {}, name='diagnostics_login'),
url(r'^supported-activities$', 'tapiriik.web.views.supported_activities', {}, name='supported_activities'),
# url(r'^supported-services-poll$', 'tapiriik.web.views.supported_services_poll', {}, name='supported_services_poll'),
url(r'^payments/claim$', 'tapiriik.web.views.payments_claim', {}, name='payments_claim'),
url(r'^payments/claim-ajax$', 'tapiriik.web.views.payments_claim_ajax', {}, name='payments_claim_ajax'),
url(r'^payments/promo-claim-ajax$', 'tapiriik.web.views.payments_promo_claim_ajax', {}, name='payments_promo_claim_ajax'),
url(r'^payments/claim-wait-ajax$', 'tapiriik.web.views.payments_claim_wait_ajax', {}, name='payments_claim_wait_ajax'),
url(r'^payments/claim/(?P<code>[a-f0-9]+)$', 'tapiriik.web.views.payments_claim_return', {}, name='payments_claim_return'),
url(r'^payments/return$', 'tapiriik.web.views.payments_return', {}, name='payments_return'),
url(r'^payments/confirmed$', 'tapiriik.web.views.payments_confirmed', {}, name='payments_confirmed'),
url(r'^payments/ipn$', 'tapiriik.web.views.payments_ipn', {}, name='payments_ipn'),
url(r'^payments/external/(?P<provider>[^/]+)/refresh$', 'tapiriik.web.views.payments_external_refresh', {}, name='payments_external_refresh'),
url(r'^ab/begin/(?P<key>[^/]+)$', 'tapiriik.web.views.ab_web_experiment_begin', {}, name='ab_web_experiment_begin'),
url(r'^privacy$', 'tapiriik.web.views.privacy.privacy', name='privacy'),
url(r'^garmin_connect_users$', TemplateView.as_view(template_name='static/garmin_connect_users.html'), name='garmin_connect_users'),
url(r'^faq$', TemplateView.as_view(template_name='static/faq.html'), name='faq'),
url(r'^credits$', TemplateView.as_view(template_name='static/credits.html'), name='credits'),
url(r'^contact$', TemplateView.as_view(template_name='static/contact.html'), name='contact'),
# Examples:
# url(r'^$', 'tapiriik.views.home', name='home'),
# url(r'^tapiriik/', include('tapiriik.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
urlpatterns += staticfiles_urlpatterns()
| [
"cpf@cpfx.ca"
] | cpf@cpfx.ca |
e5524585750dc71d44126d50fdf1be0d81d5ef80 | 2e2288db8e4d6b7c34416986a3b567080b84fc68 | /Project1/bin/symilar | 47b0457ca0e6200104489b3966a32bd6bdb109f0 | [] | no_license | icseibel/Django_testing | 15b767e91c7fd887c2dc8194b921d1afa6488808 | 1a17a566e5135cf3f282f8fd025543b79e2bde91 | refs/heads/master | 2020-03-25T00:37:36.159849 | 2019-02-12T18:25:20 | 2019-02-12T18:25:20 | 143,195,552 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | #!/home/igor/Python/django/Project1/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_symilar
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_symilar())
| [
"igor@gaviao.DOM_QP.COM.BR"
] | igor@gaviao.DOM_QP.COM.BR | |
f8301a31535d82f006fc513834c7c3df4900a415 | ec0fb459fd5e3711eb128c741e024c393745ba8f | /Numero de ocupacion.py | 15ff92da2ad4ff444c2c71f529e8c9048cd5b226 | [] | no_license | Aitor-Fernandez/Proyecto-Termodinamica | 77689ab2d62be7fdd84b87851c7e28d8590ea254 | 1092635a8a03754c3278aa688aedf2c470d073cb | refs/heads/master | 2022-06-13T21:11:57.891262 | 2020-05-07T15:20:32 | 2020-05-07T15:20:32 | 262,085,914 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,577 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 26 15:17:20 2020
@author: afern
"""
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 22 00:45:29 2020
@author: afern
"""
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.widgets import Slider
KT=2.5
def p1(c,E,J,c0):
E=KT*E
J=J*KT
B=1/(KT)
num=2*(c/c0)*np.exp(-B*E)+2*(c**2/c0**2)*np.exp(-B*(2*E+J))
Z=1+2*(c/c0)*np.exp(-B*E)+(c**2/c0**2)*np.exp(-B*(2*E+J))
return (num/Z)
c0=760
J=3
E=-5
fig,ax=plt.subplots()
plt.ylim(0,2)
plt.subplots_adjust(left=0.25,bottom=0.4)
c=np.arange(1e-2,5e3,0.005)
ax.set_ylabel(r"$<N_{un}>$", fontsize=25)
ax.set_xlabel("p(mmHg)",fontsize=25)
#P1=p1(c,E,-2.5,c0)
#l1,=ax.plot(c,P1,label=r"$J(K_BT)=-2.5$",linewidth=3.3)
#plt.xscale("log")
#
#P1=p1(c,E,0,c0)
#l2,=ax.plot(c,P1,label=r"$J(K_BT)=0$",color="violet")
#plt.xscale("log")
#
#P1=p1(c,E,5,c0)
#l2,=ax.plot(c,P1,label=r"$J(K_BT)=5$")
#plt.xscale("log")
ax.legend(fontsize=15)
ax.margins(0) ##Hacce que la funcion se evalue sobre todo el eje
plt.show()
axcolor= "lightgoldenrodyellow"
axE=plt.axes([0.25,0.1,0.65,0.03],facecolor=axcolor)
axJ=plt.axes([0.25,0.15,0.65,0.03],facecolor=axcolor)
axC=plt.axes([0.25,0.2,0.65,0.03],facecolor=axcolor)
sE=Slider(axE,"E",-6,-4,valinit=-5)
sJ=Slider(axJ,"J",-10,10,valinit=-0.25)
sC=Slider(axC,"C",300,1000,valinit=760)
def update(val):
J=sJ.val
E=sE.val
C=sC.val
l1.set_ydata(p1(c,E,J,C))
fig.canvas.draw_idle()
sJ.on_changed(update)
sE.on_changed(update)
sC.on_changed(update)
| [
"noreply@github.com"
] | noreply@github.com |
1418789cec3d7f2c86bded4e3b07d899b55fb91b | 1f6fe8847ca9741de4d48bd958a60c562ae54ef3 | /m2m-relations/articles/migrations/0011_auto_20210808_1902.py | 27e5b384dd9db2938617c8a6b04b5c557556b3fb | [] | no_license | opavelo/Django-HW-2.2 | ff897aecf11f10de410d10c4121129cc92b68359 | 9f28e92ab3f66e61cc0308b3a0418d2da7540bbf | refs/heads/master | 2023-07-06T20:07:41.627632 | 2021-08-15T14:23:53 | 2021-08-15T14:23:53 | 396,107,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | # Generated by Django 3.2.6 on 2021-08-08 16:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0010_auto_20210808_1840'),
]
operations = [
migrations.RenameModel(
old_name='Scopes',
new_name='Scope',
),
migrations.RemoveField(
model_name='article',
name='tag',
),
migrations.AddField(
model_name='article',
name='scopes',
field=models.ManyToManyField(through='articles.Scope', to='articles.Tag'),
),
]
| [
"opavelo@gmail.com"
] | opavelo@gmail.com |
2e3a3c24699f253c7671d55206bcd6aa7151e478 | 5522054c40e9a35b68351bfa546c2e9fffd01340 | /mobileoperators/settings.py | 9fc9e98b471b6627856ba177fb3dccefadbf3c3f | [] | no_license | thepylot/Mobile-Networks-App | 6ee36243c4861063da8b1c086fc0db882a27cb09 | 4893b810b697e399564e1fb1bb6f738b61950b76 | refs/heads/master | 2021-11-27T14:22:13.843167 | 2019-02-08T10:37:25 | 2019-02-08T10:37:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,330 | py | """
Django settings for mobileoperators project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'qhe1goa#897=s5hq^ci--vyby&2ty8wp_2t4dq!85u1iq%3kgb'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'208.68.36.230',
'127.0.0.1',
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'mobile',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mobileoperators.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mobileoperators.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media') | [
"charliescene512@gmail.com"
] | charliescene512@gmail.com |
7d3882b1e196af38f720155251a182727eb79c77 | 42a9553bc6906f6c4d70f532477140b66183b1ef | /vLPR/convert_data.py | d9aeb6faa2122c7db2cc5836ee655ad134ad8165 | [] | no_license | jinlukang1/issue-Notebook | 5fb0f859245a1f8c78b3f1a6c6ff220d18e778ca | 807071b771d18ec15d400e55be5a303fc88403cf | refs/heads/master | 2021-06-22T01:24:09.919832 | 2020-11-26T14:14:19 | 2020-11-26T14:14:19 | 144,961,735 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 4,309 | py | import numpy as np
import glob
import cv2
import os
import json
import tqdm
# def real():
# split_list = ['train_aug']
# data_dir = r'C:\Users\Administrator\Desktop\vLPR experiment\Car_test_data'
# for split in split_list:
# im_path = os.path.join(data_dir, split, 'image')
# gt_path = os.path.join(data_dir, split, 'label')
# pos_mask_path = os.path.join(data_dir, split, 'pos_mask')
# char_path = os.path.join(data_dir, split, 'char')
# file_path = os.path.join(data_dir, 'list', split + '.txt')
# out_path = os.path.join(data_dir, split, 'npy')
# with open(file_path) as f:
# names = f.readlines()
# out_im = np.zeros([len(names), 50, 160, 3], dtype=np.uint8)
# out_gt = np.zeros([len(names), 50, 160], dtype=np.uint8)
# out_pos = np.zeros([len(names), 50, 160], dtype=np.uint8)
# out_char = np.zeros([len(names), 1, 7], dtype=np.uint8)
# for i, name in enumerate(names):
# print(i)
# name = name[:-1]
# im = cv2.imread(os.path.join(im_path, name + '.png'))
# im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
# out_im[i, :, :, :] = im
# gt = cv2.imread(os.path.join(gt_path, name + '.png'), 0)
# out_gt[i, :, :] = gt
# pos_mask = cv2.imread(os.path.join(
# pos_mask_path, name + '.png'), 0)
# out_pos[i, :, :] = pos_mask
# char = np.loadtxt(os.path.join(char_path, name + '.txt'))
# out_char[i, 0, :] = char
# np.save(os.path.join(out_path, split + '_im.npy'), out_im)
# np.save(os.path.join(out_path, split + '_gt.npy'), out_gt)
# np.save(os.path.join(out_path, split + '_pos.npy'), out_pos)
# np.save(os.path.join(out_path, split + '_char.npy'), out_char)
def npy_gen(data_type = None):
out_path = 'npy_data'
# data_type = 'test_val'
with open("Imagelist.txt") as f:
names = f.readlines()
# print(names)
out_im = np.zeros([len(names), 50, 160, 3], dtype=np.uint8)
out_gt = np.zeros([len(names), 50, 160], dtype=np.uint8)
out_pos = np.zeros([len(names), 50, 160], dtype=np.uint8)
out_char = np.zeros([len(names), 1, 7], dtype=np.uint8)
for i, name in tqdm.tqdm(enumerate(names)):
name = name[:-1]
img = cv2.imread(name)
img = cv2.resize(img,(160,50),interpolation=cv2.INTER_CUBIC)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
out_im[i, :, :, :] = img
gt = cv2.imread(name.replace('raw', 'seg_anno'))
for xi in range(gt.shape[0]):
for yi in range(gt.shape[1]):
if gt[xi, yi, 2] == 255:
gt[xi, yi, :] = 0
else:
gt[xi, yi, :] += 1
gt = cv2.resize(gt,(160,50), interpolation=cv2.INTER_NEAREST)
gt = cv2.cvtColor(gt, cv2.COLOR_BGR2RGB)
out_gt[i, :, :] = gt[:, :, 2]
pos_mask = cv2.imread(name.replace('raw', 'pos_anno'))
for xi in range(pos_mask.shape[0]):
for yi in range(pos_mask.shape[1]):
if pos_mask[xi, yi, 2] == 255:
pos_mask[xi, yi, :] = 0
pos_mask = cv2.resize(pos_mask,(160,50), interpolation=cv2.INTER_NEAREST)
pos_mask = cv2.cvtColor(pos_mask, cv2.COLOR_BGR2RGB)
out_pos[i, :, :] = pos_mask[:, :, 2]
with open(name.replace('raw', 'new_annotations').replace('png', 'json'), "r") as f:
data_annotation = f.read()
LP_dict = json.loads(data_annotation)
for j, num in enumerate(LP_dict['license_plate_number'].keys()):
out_char[i, 0, j] = LP_dict['license_plate_number'][num]
np.save(os.path.join(out_path, data_type + '_im.npy'), out_im)
np.save(os.path.join(out_path, data_type + '_gt.npy'), out_gt)
np.save(os.path.join(out_path, data_type + '_pos.npy'), out_pos)
np.save(os.path.join(out_path, data_type + '_char.npy'), out_char)
if __name__ == "__main__":
npy_gen()
| [
"jlk0617@mail.ustc.edu.cn"
] | jlk0617@mail.ustc.edu.cn |
5509325556388a74af56621122e33bf9f954c396 | 907edfeeccf9a11cbc8dbe7d104cbaa9d561baf1 | /Algorithms/Half-Toning/OrderedDither/4x4.py | a3cd87dca1aa47571fd672964e9896a1416bcd93 | [] | no_license | JGPreston/Final-year-project | c31076e006f3d20505d4e12501ebb281c822e10e | f46e932bc6fcf22f242c332dc52dd2a05da2a239 | refs/heads/master | 2023-08-17T19:40:29.165112 | 2021-10-10T16:59:06 | 2021-10-10T16:59:06 | 415,642,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,787 | py | import numpy as np
import os
import time
from PIL import Image
#Excel
import openpyxl
#Add path to directory
import sys
sys.path.append("../../Analysis/PSNR+SSIM/")
from PSNRSSIM import returnValues
def convert_halftoning(image):
#Matrix for 2x2 Ordered
the_4x4 = np.array([[0,8,2,10],
[12,4,14,6],
[3,11,1,9],
[15,7,13,5]])
image = np.array(image, 'float64') #Image to numpy array
image = np.divide(image, 256) #Divides image values by the range of pixel values. 256 for 8 bit images
the_4x4 = np.divide(the_4x4,16) #Divide the matrix by size
tiled = np.tile(the_4x4,(128,128)) #So the matrix spans the entire image
thresh_test = image > tiled #If the image value is larger than the threshold value, return true
image[thresh_test == True] = 255 #If true, make the pixel on the image white
image[thresh_test == False] = 0 #If false, make the pixel on the image black
return Image.fromarray(np.array(image, 'uint8')) #Return image from the numpy array
times = []
psnrValues = []
ssimValues = []
#Processes every file in the original images folder
fileList = []
for file in os.listdir("../../Images/Original/"):
fileList.append(file[:-4]) #Remove the file extension so
fileList = sorted(fileList, key=int) #it can be sorted by int
for file in fileList: #For every file in the sorted file list
filename = os.fsdecode(file)
filename+=".png" #Add png file extension. Converts any file format to png
image = Image.open("../../Images/Original/"+filename) #Open original image to halftone
original = Image.open("../../Images/Original/"+filename) #For comparing
print(filename)
start_time = time.time()
imageConverted = convert_halftoning(image)
finalTime = time.time() - start_time
imageConverted.save("../../Images/Basic Halftone/Ordered/4x4/"+filename)
psnr, ssim = returnValues(original,imageConverted) #Send original and processed image to get PSNR and SSIM values
psnrValues.append(psnr)
ssimValues.append(ssim)
times.append(finalTime)
excel_document = openpyxl.load_workbook("../../../Data/Data.xlsx") #Open excel
sheet = (excel_document['Basic Halftone']) #Selects sheet
#Input values to the sheet
multiple_cells = sheet['Q4' : 'Q51']
for value, row in enumerate(multiple_cells):
for cell in row:
cell.value = psnrValues[value]
multiple_cells = sheet['R4' : 'R51']
for value, row in enumerate(multiple_cells):
for cell in row:
cell.value = ssimValues[value]
multiple_cells = sheet['S4' : 'S51']
for value, row in enumerate(multiple_cells):
for cell in row:
cell.value = times[value]
#End of inputting values
excel_document.save("../../../Data/Data.xlsx")
| [
"32721120+JPreston-1@users.noreply.github.com"
] | 32721120+JPreston-1@users.noreply.github.com |
158a542bb1ab20775bdbe49c9f32f2e510e248cc | 12afd6e3938c54ab240d56b5a29b9ecc480fd059 | /liveClass/asgi.py | 5a6e050fccb9d827678244dac83e8ffdf82ea033 | [] | no_license | dhruv354/django-liveclass | 6ecbc9511852fce52b810220bdd42d55fb44e2c7 | 5251e8789e521db40bb2d94cdbd2cdef8d91b01c | refs/heads/master | 2023-06-16T05:18:15.650339 | 2021-07-12T10:58:03 | 2021-07-12T10:58:03 | 377,894,170 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
ASGI config for liveClass project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'liveClass.settings')
application = get_asgi_application()
| [
"dhruvsin@iitk.ac.in"
] | dhruvsin@iitk.ac.in |
a2ba4afc7c10c24b24bd646ab7250dcd81777313 | 0d9dd4ac458ac954e453e6f7810ca5e1c759f82d | /list | fb6c972de61fce49941283ab222a8e272a50cc63 | [
"MIT"
] | permissive | ovkulkarni/create-repo | 9335307481686c8109baae7d88cd819dd7ca0cb6 | 0073cd761106e0c5453429204e8da56ba249eb1d | refs/heads/master | 2021-01-10T05:20:54.898788 | 2016-03-30T14:14:03 | 2016-03-30T14:15:06 | 53,800,960 | 0 | 1 | null | 2016-03-14T02:44:39 | 2016-03-13T18:35:40 | Python | UTF-8 | Python | false | false | 3,590 | #!/usr/bin/env python3
######################################################################################
# #
#The MIT License (MIT) #
# #
#Copyright (c) 2016 Omkar Kulkarni #
# #
#Permission is hereby granted, free of charge, to any person obtaining a copy #
#of this software and associated documentation files (the "Software"), to deal #
#in the Software without restriction, including without limitation the rights #
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #
#copies of the Software, and to permit persons to whom the Software is #
#furnished to do so, subject to the following conditions: #
# #
#The above copyright notice and this permission notice shall be included in all #
#copies or substantial portions of the Software. #
# #
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
#SOFTWARE. #
# #
######################################################################################
import requests
import yaml
import sys
import json
import getpass
import os
from termcolor import colored
try:
current_dir = os.path.dirname(os.path.realpath(__file__))
with open(current_dir + '/config.yml', 'r') as f:
config = yaml.load(f.read())
password = getpass.getpass("Enter your Github Password: ")
session = requests.Session()
session.auth = (config["username"], password)
url = 'https://api.github.com/user/repos'
print('[=] Sending request to Github...')
r = session.get(url)
if r.status_code == 200:
returned = json.loads(r.text)
for item in returned:
if item["private"]:
print(colored("[PRIVATE] {} - {}".format(item["full_name"], item["html_url"]), "red"))
else:
print("{} - {}".format(item["full_name"], item["html_url"]))
else:
print("[-] Unable to access repositories. Github returned an error of {}".format(r.status_code))
print("[-] Here is the full content Github returned: {}".format(json.loads(r.text)["message"]))
except KeyboardInterrupt as e:
print("\nExiting...")
sys.exit()
except requests.ConnectionError as e:
print("\n[-] Not Connected To Internet!")
print("Exiting...")
sys.exit()
except BaseException as e:
print("\nReceived an error of {}".format(str(e)))
print("Exiting...")
sys.exit()
| [
"2019okulkarn@tjhsst.edu"
] | 2019okulkarn@tjhsst.edu | |
09ae77d9c9a9532415b5b86804f7bc5fc0eb11f9 | b5dc338a3ff623e08ec7dbd58347a4dde8f0bb39 | /base/pages/login/__init__.py | 046d9417bba3860db38522e9d14b8a49627c32d6 | [] | no_license | msapunov/Copernicus | b3709c5242ff2df6b1b44a8625cf9afe8d9439ef | bb9a7539487e77b3e6d7e0b82124da438ca0e8ab | refs/heads/mess | 2023-07-25T21:24:36.015304 | 2023-06-22T16:43:40 | 2023-06-22T16:43:40 | 37,662,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | from flask import Blueprint
bp = Blueprint("login", __name__)
from base.pages.login import url
| [
"matvey.sapunov@gmail.com"
] | matvey.sapunov@gmail.com |
235d93d8501286c26e0af8f3f8b3dcf5a24a9e2a | 3ede347af20d4115c5421627b4d7a8f93b4f1658 | /eta/eta/core/module.py | 1c530d4cd300c6df6ef692ad6d39007b07dbe7bb | [
"BSD-2-Clause"
] | permissive | grohup/eecs504 | 584c2967b15875a25efdce567d86d679bba29884 | f2bab5c5c767584dd33bc9a689370db532ba899a | refs/heads/master | 2020-03-29T17:13:00.547314 | 2018-10-20T17:52:18 | 2018-10-20T17:52:18 | 150,149,799 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 20,744 | py | '''
Core module infrastructure.
See `docs/modules_dev_guide.md` for detailed information about the design and
usage of ETA modules.
Copyright 2017-2018, Voxel51, LLC
voxel51.com
Brian Moore, brian@voxel51.com
'''
# pragma pylint: disable=redefined-builtin
# pragma pylint: disable=unused-wildcard-import
# pragma pylint: disable=wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import *
from future.utils import iteritems
# pragma pylint: enable=redefined-builtin
# pragma pylint: enable=unused-wildcard-import
# pragma pylint: enable=wildcard-import
from collections import OrderedDict
from glob import glob
import os
import eta
from eta.core.config import Config, ConfigError, Configurable
from eta.core.diagram import HasBlockDiagram, BlockdiagModule
import eta.core.log as etal
import eta.core.types as etat
import eta.core.utils as etau
def load_all_metadata():
'''Loads all module metadata files.
Assumes any JSON files in the `eta.config.module_dirs` directories are
module metadata files.
Returns:
a dictionary mapping module names to ModuleMetadata instances
Raises:
ModuleMetadataError: if any of the module metadata files are invalid
'''
return {k: _load_metadata(v) for k, v in iteritems(find_all_metadata())}
def load_metadata(module_name):
'''Loads the module metadata file for the module with the given name.
Module metadata files must JSON files in one of the directories in
`eta.config.module_dirs`.
Args:
module_name: the name of the module
Returns:
the ModuleMetadata instance for the given module
Raises:
ModuleMetadataError: if the module metadata file could not be found
or was invalid
'''
return _load_metadata(find_metadata(module_name))
def _load_metadata(config):
metadata = ModuleMetadata.from_json(config)
name = os.path.splitext(os.path.basename(config))[0]
if metadata.info.name != name:
raise ModuleMetadataError(
"Name '%s' from ModuleMetadata must match module name '%s'" % (
metadata.info.name, name))
return metadata
def find_all_metadata():
'''Finds all module metadata files.
Assumes any JSON files in the `eta.config.module_dirs` directories are
module metadata files. To load these files, use `load_all_metadata()`.
Returns:
a dictionary mapping module names to (absolute paths to) module
metadata filenames
Raises:
ModuleMetadataError: if the module names are not unique
'''
d = {}
mdirs = etau.make_search_path(eta.config.module_dirs)
for mdir in mdirs:
for path in glob(os.path.join(mdir, "*.json")):
name = os.path.splitext(os.path.basename(path))[0]
if name in d:
raise ModuleMetadataError(
"Found two '%s' modules. Names must be unique." % name)
d[name] = path
return d
def find_metadata(module_name):
'''Finds the module metadata file for the module with the given name.
Module metadata files must be JSON files in one of the directories in
`eta.config.module_dirs`.
Args:
module_name: the name of the module
Returns:
the (absolute) path to the module metadata file
Raises:
ModuleMetadataError: if the module metadata file could not be found
'''
try:
return find_all_metadata()[module_name]
except KeyError:
raise ModuleMetadataError(
"Could not find module '%s'" % module_name)
def find_exe(module_metadata):
'''Finds the executable for the given ModuleMetadata instance.
Args:
module_metadata: the ModuleMetadata instance for the module
Returns:
the (absolute) path to the module executable
Raises:
ModuleMetadataError: if the module executable could not be found
'''
meta_path = find_metadata(module_metadata.info.name)
exe_path = os.path.join(
os.path.dirname(meta_path), module_metadata.info.exe)
if not os.path.isfile(exe_path):
raise ModuleMetadataError(
"Could not find module executable '%s'" % exe_path)
return exe_path
#
# @todo should pass a PipelineConfig instance here, not just the path. The need
# to use PipelineConfig here is causing a circular import with
# eta.core.module, which suggests this is a bad design...
#
def setup(module_config, pipeline_config_path=None):
'''Perform module setup.
If a pipeline config is provided, it overrides any applicable values in
the module config.
Args:
module_config: a Config instance derived from BaseModuleConfig
pipeline_config_path: an optional path to a PipelineConfig
'''
if pipeline_config_path:
# Load pipeline config
from eta.core.pipeline import PipelineConfig
pipeline_config = PipelineConfig.from_json(pipeline_config_path)
# Inherit settings from pipeline
module_config.base.eta_config.update(pipeline_config.eta_config)
module_config.base.logging_config = pipeline_config.logging_config
# Setup logging
etal.custom_setup(module_config.base.logging_config)
# Apply config settings
eta.set_config_settings(**module_config.base.eta_config)
class BaseModuleConfig(Config):
'''Base module configuration class that defines common configuration
fields that all modules must support.
All fields defined here should provide default values.
Attributes:
base: an `eta.core.module.BaseModuleConfigSettings` instance defining
module configuration parameters
'''
def __init__(self, d):
self.base = self.parse_object(
d, "base", BaseModuleConfigSettings, default=None)
if self.base is None:
self.base = BaseModuleConfigSettings.default()
class BaseModuleConfigSettings(Config):
'''Base module configuration settings that all modules must support.
All fields defined here should provide default values.
Attributes:
eta_config: a dictionary defining custom ETA config settings to apply
before running the module
logging_config: an `eta.core.log.LoggingConfig` instance defining
the logging configuration settings for the module
'''
def __init__(self, d):
self.eta_config = self.parse_dict(d, "eta_config", default={})
self.logging_config = self.parse_object(
d, "logging_config", etal.LoggingConfig,
default=etal.LoggingConfig.default())
class GenericModuleConfig(Config):
'''Generic module configuration class.
This class is used by `eta.core.builder.PipelineBuilder` to build
module configuration files.
'''
def __init__(self, d):
self.data = self.parse_array(d, "data", default=[])
self.parameters = self.parse_dict(d, "parameters", default={})
class ModuleMetadataConfig(Config):
'''Module metadata configuration class.'''
def __init__(self, d):
self.info = self.parse_object(d, "info", ModuleInfoConfig)
self.inputs = self.parse_object_array(d, "inputs", ModuleInputConfig)
self.outputs = self.parse_object_array(
d, "outputs", ModuleOutputConfig)
self.parameters = self.parse_object_array(
d, "parameters", ModuleParameterConfig)
def attributes(self):
return ["info", "inputs", "outputs", "parameters"]
class ModuleInfoConfig(Config):
'''Module info configuration class.'''
def __init__(self, d):
self.name = self.parse_string(d, "name")
self.type = self.parse_string(d, "type")
self.version = self.parse_string(d, "version")
self.description = self.parse_string(d, "description")
self.exe = self.parse_string(d, "exe")
def attributes(self):
return ["name", "type", "version", "description", "exe"]
class ModuleInputConfig(Config):
'''Module input descriptor configuration.'''
def __init__(self, d):
self.name = self.parse_string(d, "name")
self.type = self.parse_string(d, "type")
self.description = self.parse_string(d, "description")
self.required = self.parse_bool(d, "required", default=True)
def attributes(self):
return ["name", "type", "description", "required"]
class ModuleOutputConfig(Config):
'''Module output descriptor configuration.'''
def __init__(self, d):
self.name = self.parse_string(d, "name")
self.type = self.parse_string(d, "type")
self.description = self.parse_string(d, "description")
self.required = self.parse_bool(d, "required", default=True)
def attributes(self):
return ["name", "type", "description", "required"]
class ModuleParameterConfig(Config):
'''Module parameter descriptor configuration.'''
def __init__(self, d):
self.name = self.parse_string(d, "name")
self.type = self.parse_string(d, "type")
self.description = self.parse_string(d, "description")
self.required = self.parse_bool(d, "required", default=True)
if not self.required:
self.default = self.parse_raw(d, "default")
elif "default" in d:
raise ConfigError(
"Module parameter '%s' is required, so it should not have a "
"default value" % self.name)
def attributes(self):
attrs = ["name", "type", "description", "required"]
if not self.required:
attrs.append("default")
return attrs
class ModuleInfo(Configurable):
'''Module info descriptor.
Attributes:
name: the name of the module
type: the eta.core.types.Type of the module
version: the version of the module
description: a free text description of the module
exe: the executable for the module
'''
def __init__(self, config):
self.validate(config)
self.name = config.name
self.type = self._parse_type(config.type)
self.version = config.version
self.description = config.description
self.exe = config.exe
@staticmethod
def _parse_type(type_str):
type_ = etat.parse_type(type_str)
if not etat.is_module(type_):
raise ModuleMetadataError(
"'%s' is not a valid module type" % type_)
return type_
class ModuleInput(Configurable):
'''Module input descriptor.
Module inputs must be subclasses of eta.core.types.Data.
Attributes:
name: the name of the input
type: the eta.core.types.Type of the input
description: a free text description of the input
required: whether the input is required
'''
def __init__(self, config):
'''Creates a new ModuleInput instance.
Args:
config: a ModuleInputConfig instance
Raises:
'''
self.validate(config)
self.name = config.name
self.type = self._parse_type(config.type)
self.description = config.description
self.required = config.required
def is_valid_path(self, path):
'''Returns True/False indicating whether the given path is a valid
setting for this input.'''
return self.type.is_valid_path(path)
@property
def is_required(self):
'''Returns True/False if this input is required.'''
return self.required
def _parse_type(self, type_str):
type_ = etat.parse_type(type_str)
if not etat.is_data(type_):
raise ModuleMetadataError((
"Module input '%s' has type '%s' but must be a subclass "
"of Data") % (self.name, type_))
return type_
class ModuleOutput(Configurable):
'''Module output descriptor.
Module outputs must be subclasses of eta.core.types.ConcreteData.
Attributes:
name: the name of the output
type: the eta.core.types.Type of the output
description: a free text description of the output
required: whether the output is required
'''
def __init__(self, config):
'''Creates a new ModuleOutput instance.
Args:
config: a ModuleOutputConfig instance
Raises:
'''
self.validate(config)
self.name = config.name
self.type = self._parse_type(config.type)
self.description = config.description
self.required = config.required
def is_valid_path(self, path):
'''Returns True/False indicating whether the given path is a valid
setting for this output.'''
return self.type.is_valid_path(path)
@property
def is_required(self):
'''Returns True/False if this output is required.'''
return self.required
def _parse_type(self, type_str):
type_ = etat.parse_type(type_str)
if not etat.is_concrete_data(type_):
raise ModuleMetadataError((
"Module output '%s' has type '%s' but must be a subclass "
"of ConcreteData") % (self.name, type_))
return type_
class ModuleParameter(Configurable):
'''Module parameter descriptor.
Module parameters must be subclasses of eta.core.types.Builtin or
eta.core.types.ConcreteData.
Attributes:
name: the name of the parameter
type: the eta.core.types.Type of the parameter
description: a free text description of the parameter
required: whether the parameter is required
'''
def __init__(self, config):
self.validate(config)
self.name = config.name
self.type = self._parse_type(config.name, config.type)
self.description = config.description
self.required = config.required
if not self.required:
self._default = config.default
self._validate_default()
def is_valid_value(self, val):
'''Returns True/False indicating whether the given value is a valid
setting for this parameter.'''
if self.is_builtin:
return self.type.is_valid_value(val)
return self.type.is_valid_path(val)
@property
def is_required(self):
'''Returns True/False if this parameter is required.'''
return self.required
@property
def is_builtin(self):
'''Returns True/False if this parameter is a Builtin.'''
return etat.is_builtin(self.type)
@property
def is_data(self):
'''Returns True/False if this parameter is Data.'''
return etat.is_data(self.type)
@property
def default_value(self):
'''Gets the default value for this parameter.'''
if self.is_required:
raise ModuleMetadataError(
"Module parameter '%s' is required, so it has no default "
"value" % self.name)
return self._default
@staticmethod
def _parse_type(name, type_str):
type_ = etat.parse_type(type_str)
if not etat.is_builtin(type_) and not etat.is_concrete_data(type_):
raise ModuleMetadataError(
"Module parameter '%s' has type '%s' but must be a subclass "
"of Builtin or ConcreteData" % (name, type_))
return type_
def _validate_default(self):
if self._default is None:
# We always allow None, which implies that the module can function
# without this parameter being set to a valid typed value
is_valid = True
elif self.is_builtin:
is_valid = self.type.is_valid_value(self._default)
else:
is_valid = self.type.is_valid_path(self._default)
if not is_valid:
raise ModuleMetadataError(
"Default value '%s' is invalid for module parameter '%s' of "
"'%s'" % (self._default, self.name, self.type))
class ModuleMetadata(Configurable, HasBlockDiagram):
'''Class that encapsulates the architecture of a module.
A module definition is valid if all of the following are true:
- the module has at least one input and output
- all input, output, and parameter names are mutually unique
- all inputs have types that are subclasses of eta.core.types.Data
- all outputs have types that are subclasses of
eta.core.types.ConcreteData
- all parameters have types that are subclasses of
eta.core.types.Builtin or eta.core.types.ConcreteData
- any default parameters are valid values for their associated types
Attributes:
info: a ModuleInfo instance describing the module
inputs: a dictionary mapping input names to ModuleInput instances
describing the inputs
outputs: a dictionary mapping output names to ModuleOutput instances
describing the outputs
parameters: a dictionary mapping parameter names to ModuleParameter
instances describing the parameters
'''
def __init__(self, config):
'''Initializes a ModuleMetadata instance.
Args:
config: a ModuleMetadataConfig instance
Raises:
ModuleMetadataError: if the module definition was invalid
'''
self.validate(config)
self.info = None
self.inputs = OrderedDict()
self.outputs = OrderedDict()
self.parameters = OrderedDict()
self._parse_metadata(config)
def has_input(self, name):
'''Returns True/False if the module has an input `name`.'''
return name in self.inputs
def has_output(self, name):
'''Returns True/False if the module has an output `name`.'''
return name in self.outputs
def has_parameter(self, name):
'''Returns True/False if the module has a parameter `name`.'''
return name in self.parameters
def is_valid_input(self, name, path):
'''Returns True/False if `path` is a valid path for input `name`.'''
return self.get_input(name).is_valid_path(path)
def is_valid_output(self, name, path):
'''Returns True/False if `path` is a valid path for output `name`.'''
return self.get_output(name).is_valid_path(path)
def is_valid_parameter(self, name, val):
'''Returns True/False if `val` is a valid value for parameter
`name`.
'''
return self.get_parameter(name).is_valid_value(val)
def get_input(self, name):
'''Returns the ModuleInput instance for input `name`.'''
return self.inputs[name]
def get_output(self, name):
'''Returns the ModuleOutput instance for output `name`.'''
return self.outputs[name]
def get_parameter(self, name):
'''Returns the ModuleParameter instance for parameter `name`.'''
return self.parameters[name]
def to_blockdiag(self):
'''Returns a BlockdiagModule representation of this module.'''
bm = BlockdiagModule(self.info.name)
for name in self.inputs:
bm.add_input(name)
for name in self.outputs:
bm.add_output(name)
for name in self.parameters:
bm.add_parameter(name)
return bm
def _parse_metadata(self, config):
self.info = ModuleInfo(config.info)
if not config.inputs:
raise ModuleMetadataError(
"Module '%s' must have at least one input" % self.info.name)
for i in config.inputs:
self._verify_uniqueness(i.name)
self.inputs[i.name] = ModuleInput(i)
if not config.outputs:
raise ModuleMetadataError(
"Module '%s' must have at least one output" % self.info.name)
for o in config.outputs:
self._verify_uniqueness(o.name)
self.outputs[o.name] = ModuleOutput(o)
for p in config.parameters:
self._verify_uniqueness(p.name)
self.parameters[p.name] = ModuleParameter(p)
def _verify_uniqueness(self, name):
if name == self.info.name:
raise ModuleMetadataError(
"Module '%s' cannot have a field with the same name" % name)
is_duplicate = (
name in self.inputs or
name in self.outputs or
name in self.parameters
)
if is_duplicate:
raise ModuleMetadataError(
"Module '%s' cannot have duplicate field '%s'" % (
self.info.name, name))
class ModuleMetadataError(Exception):
'''Exception raised when an invalid module metadata file is encountered.'''
pass
| [
"grohup@gmail.com"
] | grohup@gmail.com |
2eb061649dea4034c7bc947f57dc0afdf9c98428 | d4d99d7df14dd8c3d3dfd283b751b7844a55825f | /user_auth/urls.py | 81b02508f29b84436236cc311c2e7b75ce6abf02 | [] | no_license | roshansk/opp-live | 88add9c4e57617eb44bcd332a17f3851b0148bf6 | 5a7b7b6933fa74c8bb2566f77734254a5370ca29 | refs/heads/master | 2023-04-25T16:33:28.265852 | 2021-05-25T11:53:09 | 2021-05-25T11:53:09 | 369,150,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | from django.urls import path
from .views import registerUser, loginUser
urlpatterns = [
path('register/', registerUser , name='register'),
path('login/', loginUser , name='login' ),
] | [
"roshansk16@gmail.com"
] | roshansk16@gmail.com |
c9efbd7958f58d6be21252c2a8fe2a2f432794bb | 7cc2c04f440b0e473e861f13ce56385a4d5f5b1b | /untitled/Aula 4.py | 0b9f9d88fc712840317af896695a7849597efb77 | [] | no_license | Helberte-AC/aula-python | a9bb41d711081f9510f70fe7ac43260a3e3f4878 | c490132633a524476dd617160ec3238b9026d560 | refs/heads/master | 2022-12-02T21:42:58.515557 | 2020-08-27T01:12:08 | 2020-08-27T01:12:08 | 290,635,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,473 | py | #print('Aula 4')
# Comando for n° primo.
# a = int(input('Entre com o n°: '))
# div = 0
# for x in range(1, a+1):
# resto = a % x
# print(x, resto)
# if resto == 0:
# div += 1
# if div == 2:
# print('n° {} é primo' .format(a))
# else:
# // print('n° {} é primo'.format(a))//
# a = int(input('Entre com o n°: '))
# for num in range(a + 1):
# div = 0
# for x in range(1, num + 1):
# resto = num % x
# #print(x, resto)
# if resto == 0:
# div += 1
# if div == 2:
# print(num)
# // comando while//
#
# nota = int(input('Entre com a nota: '))
# while nota > 10:
# nota = int(input('Nota inválida. Entre com a nota correta: '))
#// Media de aprovação //
a = int(input('1° bimestre: '))
while a > 10:
a = int(input('Você informou nota errada, 1° bimestre. '))
b = int(input('2° bimestre: '))
while b > 10:
b = int(input('Você informou nota errada, 2° bimestre. '))
c = int(input('3° bimestre: '))
while c > 10:
c = int(input('Você informou nota errada, 3° bimestre. '))
d = int(input('4° bimestre: '))
while d > 10:
d = int(input('Você informou nota errada, 4° bimestre. '))
media = (a + b + c + d)/4
print('Media: {}'. format(media))
if media >= 6:
print('Você alcançou as médias bimestrais.')
else:
print('Você perdeu média, estude mais.')
if media >= 6 or not media < 6:
print('Aprovado')
else:
print('Você está de recuperção.') | [
"hacbeto@yahoo.com.br"
] | hacbeto@yahoo.com.br |
8ab079d204faffce22220c5e5835af9b6e6961dd | 7ae5ac25692c66129a1d93e74ed2c97edba6248e | /decorator.py | 3b045bd7c54c9f0e42fd216529beeb6324174f15 | [] | no_license | sanjeevbhatia3/Python-Programming | 6a257f1905de925b69bf4fdefcd47dd91bab6e6b | db26f49d9f7bdbe4d06f578714f18699b6300824 | refs/heads/main | 2023-07-18T12:59:55.813312 | 2021-09-03T01:22:51 | 2021-09-03T01:22:51 | 369,617,871 | 0 | 0 | null | 2021-05-21T22:05:10 | 2021-05-21T18:09:07 | null | UTF-8 | Python | false | false | 360 | py | # decorator function
def car_description(input_func):
def car_details():
print(f'My car color is blue')
input_func()
print(f'Car has BOSS music system')
return car_details
# original function
@car_description
def my_car():
print(f'Car model is Tesla Model-X')
my_car()
# car_info = car_description(my_car)
# car_info()
| [
"noreply@github.com"
] | noreply@github.com |
d6d3b5c8bfd76e841ccc623bc12cd79b4b8329e9 | 28b5d1e148d34a5c1dc2800da86dd367e7ffef84 | /2019/day3puzzle2.py | f2378ac94d64f1f1edf683135ff4c934c3a1c933 | [] | no_license | harivrdhn/AdventOfCode | 36dd123600289261960c99580e2029aa2f16db27 | ee63af216a93f592313919df7e1cd419ad18366d | refs/heads/master | 2020-09-26T06:24:41.685884 | 2020-01-09T03:22:51 | 2020-01-09T03:22:51 | 226,187,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,077 | py | import os
def new_intersection(line1, line2):
dist = 0
if line1[1][2] != line2[1][2]:
if line1[1][2] == 2:
if line2[0][1] in range(min(line1[0][1], line1[1][1]), max(line1[0][1], line1[1][1])):
if line1[0][0] in range(min(line2[0][0], line2[1][0]), max(line2[0][0], line2[1][0])):
dist = line1[0][3] + line2[0][3] + abs(line1[0][0] - line2[0][0]) + abs(line2[0][1] - line1[0][1])
else:
if line1[0][1] in range(min(line2[0][1], line2[1][1]), max(line2[0][1], line2[1][1])):
if line2[0][0] in range(min(line1[0][0], line1[1][0]), max(line1[0][0], line1[1][0])):
dist = line1[0][3] + line2[0][3] + abs(line2[0][0] - line1[0][0]) + abs(line1[0][1] - line2[0][1])
return dist
def getPoints():
working_directory = os.path.dirname(__file__)
input_file_path = working_directory + '/day3input.txt'
with open(input_file_path) as fp:
line1 = fp.readline().rstrip('\n').split(",")
line2 = fp.readline().split(",")
return line2points(line1), line2points(line2)
def line2points(directions):
# point = x, y, orientation, steps
points = [(0,0,0,0)]
start = (0,0,0,0)
for val in directions:
step = int(val[1:])
if val[0] == "U":
start = (start[0] + step, start[1], 1, start[3] + step)
elif val[0] == "D":
start = (start[0] - step, start[1], 1, start[3] + step)
elif val[0] == "R":
start = (start[0], start[1] + step, 2, start[3] + step)
elif val[0] == "L":
start = (start[0], start[1] - step, 2, start[3] + step)
points.append(start)
return points
dist = 0
segment1, segment2 = getPoints()
for i in range(0, len(segment1) - 1):
for j in range(0, len(segment2) - 1):
dist = new_intersection((segment1[i], segment1[i+1]), (segment2[j], segment2[j+1]))
if dist != 0:
break
if dist != 0:
print(dist)
break
| [
"hapyaram@microsoft.com"
] | hapyaram@microsoft.com |
09da270e1e2c06a0e560ef30f7fadd91ddaec7e6 | 5330918e825f8d373d3907962ba28215182389c3 | /CMGTools/ZJetsTutorial/python/samples/run2012/diboson.py | b5453eedeeb69a296e71f73508c838dda097261d | [] | no_license | perrozzi/cmg-cmssw | 31103a7179222c7aa94f65e83d090a5cf2748e27 | 1f4cfd936da3a6ca78f25959a41620925c4907ca | refs/heads/CMG_PAT_V5_18_from-CMSSW_5_3_22 | 2021-01-16T23:15:58.556441 | 2017-05-11T22:43:15 | 2017-05-11T22:43:15 | 13,272,641 | 1 | 0 | null | 2017-05-11T22:43:16 | 2013-10-02T14:05:21 | C++ | UTF-8 | Python | false | false | 2,913 | py | import CMGTools.RootTools.fwlite.Config as cfg
# exclusive madgraph samples
# -- -- -- -- -- -- -- --
WWJetsTo2L2Nu = cfg.MCComponent(
name = 'WWJetsTo2L2Nu',
files = [],
xSection = 5.824, #PG from twiki: https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorkingSummer2012#MC_samples_and_cross_sections
nGenEvents = 1,
triggers = [],
effCorrFactor = 1 )
WZJetsTo2L2Q = cfg.MCComponent(
name = 'WZJetsTo2L2Q',
files = [],
xSection = 2.207, #PG from twiki: https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorkingSummer2012#MC_samples_and_cross_sections
nGenEvents = 1,
triggers = [],
effCorrFactor = 1 )
WZJetsTo3LNu = cfg.MCComponent(
name = 'WZJetsTo3LNu',
files = [],
xSection = 1.058, #PG from twiki: https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorkingSummer2012#MC_samples_and_cross_sections
nGenEvents = 1,
triggers = [],
effCorrFactor = 1 )
ZZJetsTo2L2Nu = cfg.MCComponent(
name = 'ZZJetsTo2L2Nu',
files = [],
xSection = 0.716, #PG from twiki: https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorkingSummer2012#MC_samples_and_cross_sections
nGenEvents = 1,
triggers = [],
effCorrFactor = 1 )
ZZJetsTo2L2Q = cfg.MCComponent(
name = 'ZZJetsTo2L2Q',
files = [],
xSection = 2.502, #PG from twiki: https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorkingSummer2012#MC_samples_and_cross_sections
nGenEvents = 1,
triggers = [],
effCorrFactor = 1 )
ZZJetsTo4L = cfg.MCComponent(
name = 'ZZJetsTo4L',
files = [],
xSection = 0.181, #PG from twiki: https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorkingSummer2012#MC_samples_and_cross_sections
nGenEvents = 1,
triggers = [],
effCorrFactor = 1 )
mc_diboson_xcl = [
WWJetsTo2L2Nu,
WZJetsTo2L2Q,
WZJetsTo3LNu,
ZZJetsTo2L2Nu,
ZZJetsTo2L2Q,
ZZJetsTo4L
]
# inclusive pythia samples
# -- -- -- -- -- -- -- --
WW = cfg.MCComponent(
name = 'WW',
files = [],
# xSection = 57.1097, # correction factor from Valentina
xSection = 54.838, #PG numbers from Andrew
nGenEvents = 1,
triggers = [],
effCorrFactor = 1 )
WZ = cfg.MCComponent(
name = 'WZ',
files = [],
# xSection = 32.3161,
# xSection = 32.3161 * 0.97, #PG scale factor wrt exclusive samples XS
xSection = 33.21, #PG number from Andrew
nGenEvents = 1,
triggers = [],
effCorrFactor = 1 )
ZZ = cfg.MCComponent(
name = 'ZZ',
files = [],
# xSection = 8.25561, # correction factor from Valentina
# xSection = 8.3 * 2.13, #PG scale factor wrt exclusive samples XS
xSection = 17.654, #PG number from Andrew
nGenEvents = 1,
triggers = [],
effCorrFactor = 1 )
# inclusive pythia samples
mc_diboson_inc = [
WW,
WZ,
ZZ
]
# exclusive madgraph samples
mc_diboson = mc_diboson_xcl
| [
"colin.bernet@cern.ch"
] | colin.bernet@cern.ch |
dd539c83e1d88b90fd293cba625a36db7451ac85 | add74ecbd87c711f1e10898f87ffd31bb39cc5d6 | /xcp2k/new-cp2k.py | 11ac45a17e0441860ee71dfff7ef01d83976f612 | [] | no_license | superstar54/xcp2k | 82071e29613ccf58fc14e684154bb9392d00458b | e8afae2ccb4b777ddd3731fe99f451b56d416a83 | refs/heads/master | 2021-11-11T21:17:30.292500 | 2021-11-06T06:31:20 | 2021-11-06T06:31:20 | 62,589,715 | 8 | 2 | null | null | null | null | UTF-8 | Python | false | false | 25,853 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
"""This module defines an ASE interface to CP2K.
Developed on the basis of
1) pycp2k by Singroup
https://github.com/SINGROUP/pycp2k
2) ase.calculator.cp2k by Ole Schuett
https://gitlab.com/ase/ase/blob/master/ase/calculators/cp2k.py
3) jasp by John Kitchin
https://github.com/jkitchin/jasp
Before running, two environment flay should be set:
1) $CP2K_DATA_DIR, path of the directory containing the basis set files (basis set, pseudo_potential, ...)
2) $ASE_CP2K_COMMAND, pointing to the command launching cp2k e.g. 'cp2k.sopt' or 'mpirun -n 4 cp2k.ssmp'.
For more information about cp2k, please visit:
http://www.cp2k.org
Author: Xing Wang <xing.wang@psi.ch>
"""
import sys
from subprocess import Popen, PIPE
import os
from os.path import join, isfile, split, islink
import numpy as np
import ase.io
from ase import Atoms, Atom
from ase.calculators.calculator import FileIOCalculator, all_changes, Parameters
from ase.units import Rydberg
from xcp2k.cp2k_tools import *
from xcp2k.cp2krc import *
from scipy.constants import physical_constants, c, h, hbar, e
from xcp2k.classes._CP2K_INPUT1 import _CP2K_INPUT1
from xcp2k.inputparser import CP2KInputParser
import logging
import traceback
logger = logging.getLogger('CP2K')
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(filename)s[line:%(lineno)d] %(levelname)s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
# logger.setLevel(logging.DEBUG)
class CP2K(FileIOCalculator):
"""ASE-Calculator for CP2K.
CP2K is a program to perform atomistic and molecular simulations of solid
state, liquid, molecular, and biological systems. It provides a general
framework for different methods such as e.g., density functional theory
(DFT) using a mixed Gaussian and plane waves approach (GPW) and classical
pair and many-body potentials.
CP2K is freely available under the GPL license.
It is written in Fortran 2003 and can be run efficiently in parallel.
Check http://www.cp2k.org about how to obtain and install CP2K.
Make sure that you also have the CP2K-shell available, since it is required
by the CP2K-calulator.
Arguments:
debug: bool
Flag to enable debug mode. Default is ``False``.
nodes: int
Number of nodes used for the calcuation. Default is ``1``.
env: str
System of the Cluster.
Default is ``SLURM``.
"""
name = 'cp2k'
implemented_properties = ['energy', 'energies', 'forces', 'stress', 'charges', 'frequencies']
def __init__(self, restart=None, mode = 0, label = 'cp2k', ignore_bad_restart_file=False,
queue = None,
atoms=None, command=None,
debug=False, **kwargs):
"""Construct CP2K-calculator object."""
# {'nodes': None, 'ntasks-per-node': None, partition': None, 'account': None, 'time': '01:00:00'},
FileIOCalculator.__init__(self, restart, ignore_bad_restart_file,
label, atoms, **kwargs)
self.prefix = label.split('/')[-1]
self.directory = './' + label[0:-len(self.prefix)]
self.set_queue(queue)
if debug:
logger.setLevel(logging.DEBUG)
self.CP2K_INPUT = _CP2K_INPUT1()
self._debug = debug
self.out = None
self.inp = None
self.symmetry = None
self.results = {}
self.parameters = {} # calculational parameters
self.atoms = None
self.positions = None
if atoms is not None:
atoms.calc = self
self.atoms = atoms
self.natoms = len(atoms)
def set_queue(self, queue = None):
command = os.environ.get('ASE_CP2K_COMMAND')
if queue:
# Write the file
if not os.path.exists(self.directory):
os.makedirs(self.directory)
with open('%s.job_file' % self.directory, 'w') as fh:
fh.writelines("#!/bin/bash\n")
fh.writelines("#SBATCH --job-name=%s \n" % self.prefix)
fh.writelines("#SBATCH --output=%s.out\n" % self.prefix)
fh.writelines("#SBATCH --error=%s.err\n" % self.prefix)
fh.writelines("#SBATCH --wait\n")
for key, value in queue.items():
if value:
fh.writelines("#SBATCH --%s=%s\n" %(key, value))
fh.writelines('''export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK \n
module load CP2K/6.1-intel-2018a \n
ulimit -s unlimited\n
export ASE_CP2K_COMMAND="mpirun cp2k.popt -i cp2k.inp -o cp2k.out"\n
export CP2K_DATA_DIR=/home/ubelix/dcb/xw20n572/apps/cp2k-7.1.0/data\n''')
fh.writelines("%s \n" % command)
self.command = "sbatch {0}".format('.job_file')
else:
self.command = command
def update(self, atoms):
if self.calculation_required(atoms, ['energy']):
if (self.atoms is None or
self.atoms.positions.shape != atoms.positions.shape):
# Completely new calculation just reusing the same
# calculator, so delete any old VASP files found.
self.clean()
self.calculate(atoms)
def write(self, label):
'Write atoms, parameters and calculated results into restart files.'
logger.debug("Writting restart to: ", label)
self.atoms.write(label + '_restart.traj')
f = open(label + '_params.ase', 'a')
for key, val in self.parameters.items():
f.write('{0} = {1} \n'.format(key, val))
f.close()
open(label + '_results.ase', 'w').write(repr(self.results))
def read(self, label):
'Read atoms, parameters and calculated results from restart files.'
self.atoms = ase.io.read(label + '_restart.traj')
#self.parameters = Parameters.read(label + '_params.ase')
results_txt = open(label + '_results.ase').read()
self.results = eval(results_txt, {'array': np.array})
def read_inp(self, ):
#
if self.inp is None:
self.inp = join(self.directory, 'cp2k.inp')
inputparser = CP2KInputParser()
inpcalc = inputparser.parse(self, self.inp)
# print(inpcalc.CP2K_INPUT)
self.prefix = inpcalc.CP2K_INPUT.GLOBAL.Project_name
# print(inpcalc.CP2K_INPUT.FORCE_EVAL_list[0].SUBSYS.COORD.Default_keyword)
self.natoms = len(inpcalc.CP2K_INPUT.FORCE_EVAL_list[0].SUBSYS.COORD.Default_keyword)
self.inpcalc = inpcalc
# print(inputparser)
# print(calc.CP2K_INPUT)
def update_atoms(self, atoms):
"""read new geometry when ."""
# Updata atoms positions and cell
if self.CP2K_INPUT.GLOBAL.Run_type.upper() == 'GEO_OPT':
xyzfile = join(self.directory, self.prefix+'-pos-1.xyz')
atoms_sorted = ase.io.read(xyzfile)
atoms.positions = atoms_sorted.positions
self.atoms = atoms
if self.CP2K_INPUT.GLOBAL.Run_type.upper() == 'CELL_OPT':
xyzfile = join(self.directory, self.prefix+'-pos-1.xyz')
atoms_sorted = ase.io.read(xyzfile)
atoms.positions = atoms_sorted.positions
atoms.cell = self.read_cell()
self.atoms = atoms
#
def read_cell(self,):
#
cell = np.zeros([3, 3])
n = len(self.outlines)
for i in range(n):
if 'CELL| Volume' in self.outlines[i]:
for j in range(3):
data = self.outlines[i + 1 + j].split()
for icell in range(3):
cell[j, icell] = float(data[4 + icell])
return cell
#
def read_results(self, out = None):
# self.read_inp()
if not out:
self.out = join(self.directory, 'cp2k.out')
# print(self.out)
with open(self.out, 'r') as f:
self.outlines = f.readlines()
self.read_info()
converged = self.read_convergence()
if not converged:
os.system('tail -20 ' + self.out)
# raise RuntimeError('CP2K did not converge!\n' +
# 'The last lines of output are printed above ' +
# 'and should give an indication why.')
self.read_energy()
self.read_geometry()
# self.read_forces()
# self.read_time()
#self.read_stress()
#
def read_info(self):
#
energies = []
for line in self.outlines:
if line.rfind('GLOBAL| Project name') > -1:
self.prefix = line.split()[-1]
if line.rfind('NUMBER OF NEB REPLICA') > -1:
self.nimages = int(line.split()[-1])
if line.rfind('BAND TOTAL ENERGY [au]') > -1:
e = float(line.split()[-1])
energies.append(e)
self.band_total_energies = energies
def set_results(self, atoms):
#self.read(atoms)
self.old_params = self.params.copy()
self.atoms = atoms.copy()
self.positions = atoms.positions # +++++++++++##????
self.name = 'cp2k'
def read_convergence(self):
converged = False
for n, line in enumerate(self.outlines[-100:-1]):
if line.rfind('PROGRAM ENDED AT') > -1:
converged = True
if line.rfind('The number of warnings') > -1:
data = int(line.split()[9])
if data>0:
print(line)
return converged
def read_energy(self):
energies = []
free_energies = []
cone = physical_constants['Hartree energy in eV'][0]
#
for line in self.outlines:
if line.rfind('ENERGY|') > -1:
E0 = float(line.split()[8])*cone
energies.append(E0)
self.results['energy'] = E0
elif line.rfind('Total energy uncorrected') > -1:
F = float(line.split()[5])
free_energies.append(F)
self.results['free_energy'] = F
self.results['energies'] = energies
self.results['free_energies'] = free_energies
def read_forces(self):
"""Method that reads forces from the output file.
If 'all' is switched on, the forces for all ionic steps
in the output file will be returned, in other case only the
forces for the last ionic configuration are returned."""
conf = physical_constants['atomic unit of force'][0]/physical_constants['electron volt'][0]*10**(-10)
forces = np.zeros([self.natoms, 3])
for n, line in enumerate(self.outlines):
if line.rfind('# Atom Kind Element') > -1:
try :
for iatom in range(self.natoms):
data = self.outlines[n + iatom + 1].split()
for iforce in range(3):
forces[iatom, iforce] = float(data[3 + iforce])*conf
except:
print('read forces error, cp2k run may be interupt')
self.results['forces'] = forces
def read_bader_charge(self, filename = None, atoms = None):
if filename is None:
filename = 'ACF.dat'
# if 'ACF.dat' is None:
# os.system('bader *.cube')
if atoms is None:
atoms = self.atoms
natoms = len(atoms)
bader_charge = np.zeros([natoms])
with open(filename, 'r') as f:
lines = f.readlines()
for iatom in range(natoms):
data = lines[iatom + 2].split()
bader_charge[iatom] = float(data[4])
self.results['bader_charge'] = bader_charge
def read_charges_moments(self):
"""Method that reads charges from the output file.
"""
self.get_number_of_spins()
index = 4
if self.spin == 2:
index = 5
for n, line in enumerate(self.outlines):
if line.rfind('Mulliken Population Analysis') > -1:
charges = []
moments = []
for iatom in range(self.natoms):
data = self.outlines[n + iatom + 3].split()
charges.append([iatom, data[1], float(data[index])])
if self.spin == 2:
moments.append([iatom, data[1], float(data[index + 1])])
self.results['charges-M'] = charges
self.results['moments-M'] = moments
#
for n, line in enumerate(self.outlines):
if line.rfind('Hirshfeld Charges') > -1:
charges = []
moments = []
for iatom in range(self.natoms):
data = self.outlines[n + iatom + 3].split()
charges.append([iatom, data[1], float(data[index + 1])])
if self.spin == 2:
moments.append([iatom, data[1], float(data[index])])
self.results['charges-H'] = charges
self.results['moments-H'] = moments
def read_stress(self):
stress = None
for n, line in enumerate(self.outlines):
if (line.rfind('STRESS TENSOR [GPa]') > -1):
stress = []
for i in [n + 3, n + 4, n + 5]:
data = self.outlines[i].split()
stress += [float(data[1]), float(data[2]), float(data[3])]
# rearrange in 6-component form and return
self.results['stress'] = np.array([stress[0], stress[4], stress[8],
stress[5], stress[2], stress[1]])
def read_time(self):
for n, line in enumerate(self.outlines):
if (line.rfind('TOTAL TIME') > -1):
time = float(self.outlines[n + 2].split()[6])
self.results['time'] = time
#
def read_frequency(self):
frequencies = []
#
# print(self.out)
for line in self.outlines:
if line.rfind('VIB|Frequency') > -1:
for f in line.split()[2:]:
frequencies.append(float(f))
self.results['frequencies'] = frequencies
def clean(self):
"""Method which cleans up after a calculation.
The default files generated by cp2k will be deleted IF this
method is called.
"""
files = ['cp2k.out']
for f in files:
try:
os.remove(f)
except OSError:
pass
def calculation_required(self, atoms, quantities):
if (self.positions is None or
(self.atoms != atoms) or
(self.directory != self.old_directory) or
(self.params != self.old_params) or not self.converged):
return True
return False
def set_atoms(self, atoms):
if (atoms != self.atoms):
self.converged = None
self.atoms = atoms.copy()
def get_atoms(self):
atoms = self.atoms.copy()
atoms.set_calculator(self)
return atoms
def read_version(self):
version = None
for line in self.outlines:
if line.find('CP2K| version string') != -1: # find the first occurence
version = "CP@K version " + line.split[-1]
break
return version
def get_time(self):
return self.results['time']
def get_forces(self, atoms):
self.update(atoms)
return self.results['forces']
def get_charges(self, atoms):
self.update(atoms)
return self.results['charges']
def get_stress(self, atoms):
self.update(atoms)
if self.stress is None:
raise NotImplementedError
return self.stress
def create_cell(self, CELL, atoms):
"""Creates the cell for a SUBSYS from an ASE Atoms object.
Creates the cell unit vectors and replicates the periodic boundary
conditions. Notice that this doesn't affect the PBCs used for
electrostatics! (use create_poisson())
args:
subsys: pycp2k.parsedclasses._subsys1
The SUBSYS for which the cell is created.
atoms: ASE Atoms
The ASE Atoms object from which the cell is extracted.
"""
cell = atoms.get_cell()
A = cell[0, :]
B = cell[1, :]
C = cell[2, :]
CELL.A = A.tolist()
CELL.B = B.tolist()
CELL.C = C.tolist()
pbc = atoms.get_pbc()
periodicity = []
if pbc[0]:
periodicity.append("X")
if pbc[1]:
periodicity.append("Y")
if pbc[2]:
periodicity.append("Z")
if len(periodicity) == 0:
CELL.Periodic = "NONE"
else:
CELL.Periodic = "".join(periodicity)
#
if hasattr(atoms, 'symmetry'):
CELL.Symmetry = atoms.symmetry
def create_coord(self, COORD, atoms, molnames=None, symbol = 'True'):
"""Creates the atomic coordinates for a SUBSYS from an ASE Atoms object.
args:
subsys: pycp2k.parsedclasses._subsys1
The SUBSYS for which the coordinates are created.
atoms: ASE Atoms
Atoms from which the coordinates are extracted.
molnames: list of strings
The MOLNAME for each atom in correct order
"""
atom_list = []
for i_atom, atom in enumerate(atoms):
if symbol:
if hasattr(atoms, 'kinds'):
new_atom = [atoms.kinds[i_atom], atom.position[0], atom.position[1], atom.position[2]]
else:
new_atom = [atom.symbol, atom.position[0], atom.position[1], atom.position[2]]
else:
new_atom = [atom.position[0], atom.position[1], atom.position[2]]
if molnames is not None:
new_atom.append(molnames[i_atom])
atom_list.append(new_atom)
COORD.Default_keyword = atom_list
def create_constraint(self, constraint, atoms, molnames=None):
"""Creates the atomic coordinates for a SUBSYS from an ASE Atoms object.
args:
subsys: pycp2k.parsedclasses._subsys1
The SUBSYS for which the coordinates are created.
atoms: ASE Atoms
Atoms from which the coordinates are extracted.
molnames: list of strings
The MOLNAME for each atom in correct order
"""
#write constraint
from ase.constraints import FixAtoms, FixScaled
self.natoms = len(atoms)
sflags = np.zeros((self.natoms, 3), dtype=bool)
sflags_all = []
if self.atoms.constraints:
for constr in self.atoms.constraints:
if isinstance(constr, FixScaled):
sflags[constr.a] = constr.mask
elif isinstance(constr, FixAtoms):
sflags_all = sflags_all + constr.index.tolist()
# this is the same like "kind" module
for iatom, atom in enumerate(self.atoms):
fixed = ''.join([a for a, b in zip('XYZ', sflags[iatom]) if b])
if len(fixed) != 0:
fixed_atoms = constraint.FIXED_ATOMS_add()
fixed_atoms.Components_to_fix = fixed
fixed_atoms.List = iatom + 1
fixed_lists = ''.join(' ' + str(x + 1) for x in sflags_all)
#print(sflags_all)
if len(sflags_all) != 0:
fixed_atoms = constraint.FIXED_ATOMS_add()
fixed_atoms.List = fixed_lists
def create_poisson(self, poisson, atoms):
"""Creates the periodicity for a POISSON section and tries to guess a
good solver.
args:
poisson: pycp2k.parsedclasses._poisson1
The poisson section from DFT or MM for which the periodicity is
created.
atoms: ASE Atoms
The atoms from which the periodicity is extracted.
"""
# Define periodicity
pbc = atoms.get_pbc()
if sum(pbc) == 0:
poisson.Periodic = "NONE"
else:
poisson.Periodic = pbc[0]*"X" + pbc[1]*"Y" + pbc[2]*"Z"
def write_input(self, atoms, properties=None, system_changes=None):
"""Creates an input file for CP2K executable from the object tree
defined in CP2K_INPUT.
"""
#self.old_input = self.new_input
#print("write_input_file")
self.pre_write_input_file()
SUBSYS = self.CP2K_INPUT.FORCE_EVAL_list[0].SUBSYS
CONSTRAINT = self.CP2K_INPUT.MOTION.CONSTRAINT
# write atoms
self.create_cell(SUBSYS.CELL, self.atoms)
self.create_coord(SUBSYS.COORD, self.atoms)
self.create_constraint(CONSTRAINT, self.atoms)
# write Kind
#kinds = dict([(s.Section_parameters, s) for s in SUBSYS.KIND_list])
#print(kinds)
#for elem in set(self.atoms.get_chemical_symbols()):
# if elem not in kinds.keys():
# KIND = SUBSYS.KIND_add(elem) # Section_parameters can be provided as argument.
# KIND.Basis_set = "DZVP-MOLOPT-SR-GTH"
# KIND.Potential = "GTH-PBE"
input_contents = self.CP2K_INPUT._print_input(-1)
# Write the file
if len(self.prefix) > 0 and not os.path.exists(self.directory):
os.makedirs(self.directory) # cp2k expects dirs to exist
with open(join(self.directory, 'cp2k.inp'), 'w') as input_file:
input_file.write(input_contents)
def pre_write_input_file(self):
"""Creates an input file for CP2K executable from the object tree
defined in CP2K_INPUT.
"""
#self.old_input = self.new_input
#print("write_input_file")
GLOBAL = self.CP2K_INPUT.GLOBAL
FORCE_EVAL = self.CP2K_INPUT.FORCE_EVAL_list[0]
DFT = FORCE_EVAL.DFT
SCF = DFT.SCF
# project name
GLOBAL.Project_name = self.prefix
if GLOBAL.Run_type is None:
GLOBAL.Run_type = self.params['global']['RUN_TYPE']
#
if not FORCE_EVAL.Method:
FORCE_EVAL.Method = "Quickstep"
# xc functional
#if self.params['xc']['XC'] is not None:
# DFT.XC.XC_FUNCTIONAL.Section_parameters = self.params['xc']['XC']
# forces
#calc_forces = ['ENERGY_FORCE', 'GEO_OPT', 'CELL_OPT', 'MD']
#if GLOBAL.Run_type.upper() in calc_forces:
# self.CP2K_INPUT.FORCE_EVAL_list[0].PRINT.FORCES.Section_parameters = "ON"
# ***todo
#self.CP2K_INPUT.FORCE_EVAL_list[0].PRINT.FORCES.Filename = "forces"
# basis_set
#if not DFT.Basis_set_file_name:
# DFT.Basis_set_file_name = "BASIS_MOLOPT"
#if not DFT.Potential_file_name:
# DFT.Potential_file_name = "POTENTIAL"
def get_atomic_kinds(self):
"""Returns number of atomic kind.
['O', 'C']
"""
kinds = {}
# print(self.out)
nk = 0
for line in self.outlines:
if line.rfind('Atomic kind:') > -1:
nk += 1
kind = line.split()[3]
na = int(line.split()[-1])
flag=True
for k, e in kinds.items():
# print(k, e)
if e[0]==kind:
flag=False
kinds[k][1] = na
if flag:
kinds[nk] = [kind, na]
print(kinds)
self.kinds = kinds
def get_fermi_level(self):
"""Return the Fermi level."""
energies = []
free_energies = []
cone = physical_constants['Hartree energy in eV'][0]
#
# print(self.out)
for line in self.outlines:
if line.rfind('Fermi Energy') > -1 and line.rfind('eV') > -1:
Ef = float(line.split()[-1])
if line.rfind('Fermi Energy') > -1 and line.rfind('eV') == -1:
Ef = float(line.split()[-1])*cone
self.Ef = Ef
#
def read_bandgap(self,):
"""Return the Fermi level."""
#
# print(self.out)
bandgap = 10000000
for line in self.outlines:
if line.rfind('HOMO - LUMO gap') > -1:
tempe = float(line.split()[-1])
if tempe < bandgap:
bandgap = tempe
self.bandgap = bandgap
def get_number_of_spins(self):
"""Returns number of spins.
1 if not spin-polarized
2 if spin-polarized
"""
# print(self.out)
for line in self.outlines:
if line.rfind('DFT| Spin') > -1:
method = line.split()[-1]
break
if method=='UKS':
spin=2
else:
spin=1
self.spin = spin
def read_geometry(self, prefix = None):
atoms = None
if prefix:
self.prefix = prefix
# print(self.prefix)
filename = self.directory + '/{0}.in'.format(self.prefix)
filename1 = self.directory + '/{0}-pos-1.xyz'.format(self.prefix)
# print(filename)
if os.path.isfile(filename):
atoms = ase.io.read(filename)
atoms.wrap()
elif os.path.isfile(filename1):
atoms = ase.io.read(filename1)
atoms.wrap()
atoms.cell = self.read_cell()
self.results['geometry'] = atoms
return atoms | [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
af3cf75f0f2968cd7056e7186e2f4bea356f2051 | 52719672d2bfb067014e25dc7e2dc8476c560399 | /learning_log/users/views.py | 469fd1b0d041f59dc1f13fa0fbbcbb0b706aedc3 | [
"Apache-2.0"
] | permissive | CowryGolden/django-learning | fd810ac25a074a043d3d2c399b499a6fff999f59 | 7d3e2af259b0aafd9c5607db3b6ff06b02dd97e9 | refs/heads/master | 2020-04-07T22:51:49.854688 | 2018-11-27T06:28:11 | 2018-11-27T06:28:11 | 158,788,109 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,120 | py | from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.contrib.auth import login, logout, authenticate
from django.contrib.auth.forms import UserCreationForm
# Create your views here.
def logout_view(request):
""" 注销用户 """
logout(request)
return HttpResponseRedirect(reverse('learning_logs:index'))
def register(request):
""" 注册新用户 """
if request.method != 'POST':
# 显示空的注册表单
form = UserCreationForm()
else:
# 处理填好的表单
form = UserCreationForm(data=request.POST)
if form.is_valid():
new_user = form.save()
# 让用户自动登录,再重定向到主页
authenticated_user = authenticate(username=new_user.username, password=request.POST['password1'])
login(request, authenticated_user)
return HttpResponseRedirect(reverse('learning_logs:index'))
context = {'form': form}
return render(request, 'users/register.html', context)
| [
"zhoujincheng777@qq.com"
] | zhoujincheng777@qq.com |
ede3e83aba291ec2e06da80e4ed453ad83cac121 | 0597e1ecca428e94671483ee835ce97a36d61b00 | /main.py | d14230e63dbbac020cb1acbcf27c6c871bf9dc5e | [] | no_license | CharlieWelly/aggregate_data | b1b0d7ade0ed002bbca77ff231cbbdf99d924cd2 | 6f68e3f065d193c37b32954945e21211764a083a | refs/heads/main | 2023-08-21T00:31:12.828904 | 2021-11-01T06:33:35 | 2021-11-01T06:33:35 | 422,065,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | import sys
import v04
import time
MODES = {
"-n1": "NHOM_1",
"-n2": "NHOM_2",
"-n3": "NHOM_3",
"-n4": "NHOM_4",
}
def main():
start = time.perf_counter()
args = sys.argv[1:]
mode = args[1]
name = args[2]
v04.setUp(MODES[mode])
if args[0] == "-folder":
df = v04.loop_thruFolder(name)
if args[0] == "-file":
df = v04.loop_thruFile(name)
df1 = v04.toUserFriendly(df)
df.to_csv("./%s_developer_aggregated.csv" % MODES[mode])
df1.to_excel("./%s_userfriendly_aggregated.xlsx" % MODES[mode])
print("time elapse: %s" % (time.perf_counter() - start))
if __name__ == "__main__":
main()
| [
"VNTCC@ptr.com"
] | VNTCC@ptr.com |
f14113651e9161aa124f82bfeea2f92d8cf29fda | a2ff11c460c135da3338ae2dc082fc6e972b4b20 | /bin/easy_install | aca03ec14af26b5d4818e1e15d00e43e172ef71e | [] | no_license | AnshuOnGit/SkLearnDecisionTree | 7c0cd411a847f830671a494d02839d37d210d282 | 28bdd91361f6d2fcaf85cf7e44840cab9464a1d5 | refs/heads/master | 2021-01-23T02:54:23.923941 | 2017-03-30T06:40:11 | 2017-03-30T06:40:11 | 86,032,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | #!/home/anshu/smart_india_hackathon/dev_env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"anshu.kumar726@gmail.com"
] | anshu.kumar726@gmail.com | |
15eb1723482f5ae0d64e94905c26e5e5a78c0c5e | 52501f15893c8742ee15014a88789a35de5ccbdd | /src/profiles/api/serializers.py | 51c1f60e3a353dd76a11c958ceba4fcb95c3bc48 | [] | no_license | milosb793/django-gallery-api | f9b8aeb787f32a5a5fe2164fb78a7318e6fc30bc | 9b49cdfdcfbbc911cec23ed30ded30f6c4042522 | refs/heads/master | 2021-05-06T02:05:55.428397 | 2017-10-17T13:09:47 | 2017-10-17T13:09:47 | 114,498,504 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,970 | py | from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from django.forms import PasswordInput
from rest_framework import serializers
from rest_framework.fields import SerializerMethodField
from rest_framework.serializers import ModelSerializer
from src.profiles.models import Profile
from src.gallery.helpers import log
class UserSerializer(serializers.ModelSerializer):
"""
Класа која претвара инстанцу модела класе у JSON објекат;
"""
class Meta:
# о ком моделу је реч
model = User
# која поља ће се серијализовати
fields = [
'first_name',
'last_name',
'username',
'email',
'password',
'is_active',
]
# означавање да се лозинка може само уписати, а не и исчитати
extra_kwargs = {"password": {"write_only": True}}
read_only_fields = [
'id', 'timestamp', 'updated', 'email'
]
def create(self, validated_data):
"""
Метода која се позива при извршењу POST захтева;
или експлицитно позивом perform_create;
Пре креирања валидира податке
:param validated_data:
:return: User
"""
user = User.objects.create(
first_name=validated_data['first_name'],
last_name=validated_data['last_name'],
username=validated_data['username'],
email=validated_data['email'],
is_active=validated_data['is_active'],
)
user.set_password(validated_data['password'])
user.save()
return user
def update(self, instance, validated_data):
"""
Метода која се позива при извршењу PUT захтева,
или експлицитно позивом perform_update;
Пре ажурирања валидира податке.
:param instance:
:param validated_data:
:return: User
"""
# екстраховање свих података и креирање
instance = User(**validated_data)
instance.save()
return instance
class ProfileSerializer(ModelSerializer):
"""
Класа која претвара инстанцу модела класе у JSON објекат;
Враћа само основне податке о Профилу
"""
class Meta:
model = Profile
fields = [
'id',
'user',
'profile_picture',
# 'timestamp',
# 'updated'
]
# Репрезентација атрибута profile_picture само као локацију слике
def get_profile_picture(self, obj):
return obj.profile_picture.url
def update(self, instance, validated_data):
"""
Метода која се позива при извршењу PUT захтева,
или експлицитно позивом perform_update;
Пре ажурирања валидира податке.
:param instance:
:param validated_data:
:return: Profile
"""
user = instance.user
user.first_name = validated_data.get("first_name", instance.user.first_name)
user.last_name = validated_data.get("last_name", instance.user.last_name)
user.username = validated_data.get("username", instance.user.username)
user.email = validated_data.get("email", instance.user.email)
user.is_active = validated_data.get("is_active", instance.user.is_active)
password = validated_data.get("password", None)
if password is not None:
user.set_password(password)
user.save()
instance.user = user
instance.profile_picture = validated_data.get("profile_picture", instance.profile_picture)
instance.save()
return instance
class CreateProfileSerializer(ModelSerializer):
"""
Класа за серијализацију инстанце модела Профиле;
Серијализује JSON објекат у инстанцу модела Профил
"""
user = UserSerializer()
class Meta:
model = Profile
fields = [
'user',
'profile_picture',
]
def create(self, validated_data):
"""
Метода која се позива при извршењу POST захтева,
или експлицитно позивом perform_create;
Пре ажурирања валидира податке.
:param instance:
:param validated_data:
:return: Profile
"""
usr_data = validated_data['user']
user = User.objects.create(**validated_data['user'])
user.set_password(usr_data['password'])
user.save()
profile = Profile.objects.create(
user=user,
profile_picture=validated_data['profile_picture']
)
profile.save()
return profile
class ProfileUpdateSerializer(ModelSerializer):
"""
Класа за серијализацију инстанце модела Лајк,
код ажурирања; Враћа све постојеће податке за модел Лајк
"""
# омогућавање парцијалног ажурирања корисника
user = UserSerializer(partial=True)
class Meta:
model = Profile
fields = [
'id',
"user",
"profile_picture"
]
read_only_fields = [
'id', 'timestamp', 'updated', 'email'
] | [
"milosb793@gmail.com"
] | milosb793@gmail.com |
2441cabae99ae34d4d6dd1b980b760e07462a3ee | bee9a140f51f85c612f4e869747aae3d155188c5 | /src/main/python/systemds/operator/algorithm/builtin/l2svm.py | cd7db9e4dc52e3b06e2141612f7bb57105b73816 | [
"Apache-2.0"
] | permissive | clarapueyoballarin/systemds | cb64a494afd14da142269c788c76edb236d8b755 | a68a71ddb089ebdd52e8f316a03bda281f4532ba | refs/heads/master | 2023-05-22T16:39:04.409220 | 2021-06-17T16:14:15 | 2021-06-17T16:14:15 | 341,305,828 | 0 | 1 | Apache-2.0 | 2021-02-24T22:51:31 | 2021-02-22T18:59:49 | Java | UTF-8 | Python | false | false | 2,344 | py | # -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
# Autogenerated By : src/main/python/generator/generator.py
# Autogenerated From : scripts/builtin/l2svm.dml
from typing import Dict, Iterable
from systemds.operator import OperationNode, Matrix
from systemds.script_building.dag import OutputType
from systemds.utils.consts import VALID_INPUT_TYPES
def l2svm(X: OperationNode, Y: OperationNode, **kwargs: Dict[str, VALID_INPUT_TYPES]) -> Matrix:
"""
:param X: matrix X of feature vectors
:param Y: matrix Y of class labels have to be a single column
:param intercept: No Intercept ( If set to TRUE then a constant bias column is added to X)
:param epsilon: Procedure terminates early if the reduction in objective function value is less than epsilon (tolerance) times the initial objective function value.
:param lambda: Regularization parameter (lambda) for L2 regularization
:param maxIterations: Maximum number of conjugate gradient iterations
:param maxii: -
:param verbose: Set to true if one wants print statements updating on loss.
:param columnId: The column Id used if one wants to add a ID to the print statement, Specificly usefull when L2SVM is used in MSVM.
:return: 'OperationNode' containing model matrix
"""
X._check_matrix_op()
Y._check_matrix_op()
params_dict = {'X':X, 'Y':Y}
params_dict.update(kwargs)
return Matrix(X.sds_context, 'l2svm', named_input_nodes=params_dict)
| [
"baunsgaard@tugraz.at"
] | baunsgaard@tugraz.at |
05a8c695d31a36031017aac34163e8814b4e9df0 | ed3ad9fa628f87bba681bafe7c981890fa7355f6 | /Homework3/python-challenge/PyBank/main.py/Bank_script.py | 20f7dbe148015e01a9871d6d851dd8345d4a4abd | [] | no_license | mneustadts/python-challenge | 60c562d2db5e82a29ef287b5145e1675ddf049ba | c9728de00367dd09259665221fb107bf7934c05b | refs/heads/master | 2022-11-13T02:54:12.763721 | 2020-07-10T02:58:17 | 2020-07-10T02:58:17 | 277,185,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,026 | py | import os
import csv
csvpath = os.path.join('..','Resources','budget_data.csv')
with open(csvpath) as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
print(csvreader)
csv_header = next(csvreader)
month = []
pnl = []
pnl_change = []
monthly_change = []
print(f"Header: {csv_header}")
for row in csvreader:
month.append(row[0])
pnl.append(row[1])
pnl_int = map(int,pnl)
total_pnl = (sum(pnl_int))
i = 0
for i in range(len(pnl) - 1):
profit_loss = int(pnl[i+1]) - int(pnl[i])
pnl_change.append(profit_loss)
Total = sum(pnl_change)
monthly_change = Total / len(pnl_change)
# Months of greatest change
greatest_increase = max(pnl_change)
m = pnl_change.index(greatest_increase)
month_increase = month[m+1]
greatest_decrease = min(pnl_change)
n = pnl_change.index(greatest_decrease)
month_decrease = month[n+1]
print(f'Financial Analysis'+'\n')
print(f'----------------------------'+'\n')
print(f"Total number of months: ${str(len(month))}")
print(f"Total: ${str(total_pnl)}")
print(f"Average monthly change in pnl_changes : ${str(round(monthly_change,2))}")
print(f"Greatest Increase in Profits: {month_increase} (${greatest_increase})")
print(f"Greatest Decrease in Profits: {month_decrease} (${greatest_decrease})")
bank_file = os.path.join("..","analysis", "bank_data.txt")
with open(bank_file, 'w') as out_file:
out_file.write("Financial Analysis\n")
out_file.write("----------------------------\n")
out_file.write("Total number of months: {str(len(month)}\n")
out_file.write(f"Total: ${str(total_pnl)}\n")
out_file.write(f"Average Change: ${str(monthly_change)}\n")
out_file.write(f"Greatest Increase in Profits: {month_increase} (${greatest_increase})\n")
out_file.write(f"Greatest Decrease in Losses: {month_decrease} (${greatest_decrease})\n") | [
"noreply@github.com"
] | noreply@github.com |
7df275e453924594ecc556050cc35c4bbab9dfc8 | 4276339542799a7b9bd0c254097d7fe4aa419363 | /home/check_images.py | e65af81cd04860131ad4a7debf20eb1739a53c72 | [] | no_license | Olayiwola7/Artificial-Intelligence-with-Python-Project-1 | 1d4b43b3198b434f713fe6f94189462e062a00d3 | 1e8161e108d701373d71ef9e63788ad5779904a5 | refs/heads/main | 2023-08-03T13:28:50.346309 | 2021-10-01T06:11:45 | 2021-10-01T06:11:45 | 412,344,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,374 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# */AIPND-revision/intropyproject-classify-pet-images/check_images.py
#
# TODO 0: Add your information below for Programmer & Date Created.
# PROGRAMMER:Olayiwola Arowolo
# DATE CREATED: 3/14/2021
# REVISED DATE: 20/3/2021
# PURPOSE: Classifies pet images using a pretrained CNN model, compares these
# classifications to the true identity of the pets in the images, and
# summarizes how well the CNN performed on the image classification task.
# Note that the true identity of the pet (or object) in the image is
# indicated by the filename of the image. Therefore, your program must
# first extract the pet image label from the filename before
# classifying the images using the pretrained CNN model. With this
# program we will be comparing the performance of 3 different CNN model
# architectures to determine which provides the 'best' classification.
#
# Use argparse Expected Call with <> indicating expected user input:
# python check_images.py --dir <directory with images> --arch <model>
# --dogfile <file that contains dognames>
# Example call:
# python check_images.py --dir pet_images/ --arch vgg --dogfile dognames.txt
##
# Imports python modules
from time import time, sleep
# Imports print functions that check the lab
from print_functions_for_lab_checks import *
# Imports functions created for this program
from get_input_args import get_input_args
from get_pet_labels import get_pet_labels
from classify_images import classify_images
from adjust_results4_isadog import adjust_results4_isadog
from calculates_results_stats import calculates_results_stats
from print_results import print_results
# Main program function defined below
def main():
# TODO 0: Measures total program runtime by collecting start time
start_time = time()
sleep_time = (100)
# TODO 1: Define get_input_args function within the file get_input_args.py
# This function retrieves 3 Command Line Arugments from user as input from
# the user running the program from a terminal window. This function returns
# the collection of these command line arguments from the function call as
# the variable in_arg
in_arg = get_input_args()
# Function that checks command line arguments using in_arg
check_command_line_arguments(in_arg)
# TODO 2: Define get_pet_labels function within the file get_pet_labels.py
# Once the get_pet_labels function has been defined replace 'None'
# in the function call with in_arg.dir Once you have done the replacements
# your function call should look like this:
# get_pet_labels(in_arg.dir)
# This function creates the results dictionary that contains the results,
# this dictionary is returned from the function call as the variable results
results = get_pet_labels(in_arg.dir)
# Function that checks Pet Images in the results Dictionary using results
check_creating_pet_image_labels(results)
# TODO 3: Define classify_images function within the file classiy_images.py
# Once the classify_images function has been defined replace first 'None'
# in the function call with in_arg.dir and replace the last 'None' in the
# function call with in_arg.arch Once you have done the replacements your
# function call should look like this:
# classify_images(in_arg.dir, results, in_arg.arch)
# Creates Classifier Labels with classifier function, Compares Labels,
# and adds these results to the results dictionary - results
classify_images(in_arg.dir, results, in_arg.arch)
# Function that checks Results Dictionary using results
check_classifying_images(results)
# TODO 4: Define adjust_results4_isadog function within the file adjust_results4_isadog.py
# Once the adjust_results4_isadog function has been defined replace 'None'
# in the function call with in_arg.dogfile Once you have done the
# replacements your function call should look like this:
# adjust_results4_isadog(results, in_arg.dogfile)
# Adjusts the results dictionary to determine if classifier correctly
# classified images as 'a dog' or 'not a dog'. This demonstrates if
# model can correctly classify dog images as dogs (regardless of breed)
adjust_results4_isadog(results, in_arg.dogfile)
# Function that checks Results Dictionary for is-a-dog adjustment using results
check_classifying_labels_as_dogs(results)
# TODO 5: Define calculates_results_stats function within the file calculates_results_stats.py
# This function creates the results statistics dictionary that contains a
# summary of the results statistics (this includes counts & percentages). This
# dictionary is returned from the function call as the variable results_stats
# Calculates results of run and puts statistics in the Results Statistics
# Dictionary - called results_stats
results_stats = calculates_results_stats(results)
# Function that checks Results Statistics Dictionary using results_stats
check_calculating_results(results, results_stats)
# TODO 6: Define print_results function within the file print_results.py
# Once the print_results function has been defined replace 'None'
# in the function call with in_arg.arch Once you have done the
# replacements your function call should look like this:
# print_results(results, results_stats, in_arg.arch, True, True)
# Prints summary results, incorrect classifications of dogs (if requested)
# and incorrectly classified breeds (if requested)
print_results(results, results_stats, in_arg.arch, True, True)
# TODO 0: Measure total program runtime by collecting end time
end_time = time()
# TODO 0: Computes overall runtime in seconds & prints it in hh:mm:ss format
tot_time = end_time - start_time
print("\n** Total Elapsed Runtime:",
str(int((tot_time/3600)))+":"+str(int((tot_time%3600)/60))+":"
+str(int((tot_time%3600)%60)) )
# Call to main function to run the program
if __name__ == "__main__":
main()
| [
"45934230+Olayiwola7@users.noreply.github.com"
] | 45934230+Olayiwola7@users.noreply.github.com |
4420d1af4f7b9d796eff20fa1340da4faba48719 | 970d419db064dd5bd079cdbf4482338ae9db7967 | /lenovo/thinkcloud/swagger/views.py | e3c14c816c058db136a358725952301915607a50 | [
"CC-BY-4.0",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] | permissive | onap/multicloud-openstack | 766d2c5646d5c2fd4e3beb3a35d169ad1e496cf1 | d0e9fd36a6fefe632a0d0622585af49be0594ce6 | refs/heads/master | 2023-06-11T22:04:42.824417 | 2022-03-22T07:54:06 | 2022-03-22T08:05:35 | 115,764,883 | 4 | 3 | NOASSERTION | 2021-06-29T19:07:08 | 2017-12-30T01:05:05 | Python | UTF-8 | Python | false | false | 2,049 | py | # Copyright (c) 2017-2018 Lenovo Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from rest_framework.response import Response
from newton_base.swagger import views as newton_json_view
logger = logging.getLogger(__name__)
class SwaggerJsonView(newton_json_view.SwaggerJsonView):
def get(self, request):
'''
reuse newton code and update the basePath
:param request:
:return:
'''
resp = super(SwaggerJsonView, self).get(request)
json_data = resp.data if resp else None
if json_data:
json_data["basePath"] = "/api/multicloud-thinkcloud/v0/"
json_data["info"]["title"] = "Service NBI of MultiCloud plugin for OpenStack Thinkcloud"
return Response(data=json_data, status=200)
else:
return Response(data={'error': 'internal error'}, status=500)
class APIv1SwaggerJsonView(newton_json_view.SwaggerJsonView):
def get(self, request):
'''
reuse newton code and update the basePath
:param request:
:return:
'''
resp = super(APIv1SwaggerJsonView, self).get(request)
json_data = resp.data if resp else None
if json_data:
json_data["basePath"] = "/api/multicloud-thinkcloud/v1/"
json_data["info"]["title"] = "Service NBI v1 of MultiCloud plugin for Thinkcloud"
return Response(data=json_data, status=200)
else:
return Response(data={'error': 'internal error'}, status=500)
| [
"310397125@qq.com"
] | 310397125@qq.com |
bffd4513031c134591a90b558e1174567f6690bc | 3155c38585c5d1cf27c4d8065cb5821f5b980983 | /package/awesome_panel/database/settings.py | 0c78182ac946b4d7c64acbe8293eb5400c0aa261 | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] | permissive | jlstevens/awesome-panel | 460c86ac886a86fa1a3f6ec79b6186f292ca10bc | c67b0f4529a3ce6a8517648f49fef8358e2e2c8b | refs/heads/master | 2020-11-25T03:11:10.018557 | 2019-12-16T20:57:07 | 2019-12-16T20:57:07 | 228,474,317 | 0 | 0 | Apache-2.0 | 2019-12-16T20:55:56 | 2019-12-16T20:55:55 | null | UTF-8 | Python | false | false | 459 | py | """In this module we provide a list of settings"""
GITHUB_URL = "https://github.com/MarcSkovMadsen/awesome-panel/"
GITHUB_BLOB_MASTER_URL = "https://github.com/MarcSkovMadsen/awesome-panel/blob/master/"
GITHUB_RAW_URL = "https://raw.githubusercontent.com/MarcSkovMadsen/awesome-panel/master/"
GITHUB_THUMBNAILS_URL = (
"https://github.com/MarcSkovMadsen/awesome-panel/blob/master/assets/images/thumbnails/"
)
THUMBNAILS_ROOT = "assets/images/thumbnails/"
| [
"MASMA@orsted.dk"
] | MASMA@orsted.dk |
341f3d6642671fb82aeba75ca4bc26459d43bd1f | f1593773b199c435114b316348b81126aa212cd6 | /web_flask/6-number_odd_or_even.py | 55e21f462a589f3a87e7aec051ee81b1abdeeef8 | [] | no_license | ledbagholberton/AirBnB_clone_v2 | 0f0f0889ed7fac9767e45b7fc17eafc388469738 | 8fefc58e76184fcfe86ec16dde1791fd8ff4777f | refs/heads/master | 2020-07-07T02:20:17.093914 | 2019-09-10T06:13:44 | 2019-09-10T06:13:44 | 203,214,786 | 0 | 0 | null | 2019-08-19T17:01:24 | 2019-08-19T17:01:24 | null | UTF-8 | Python | false | false | 1,942 | py | #!/usr/bin/python3
""" start a Flask Web application
"""
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/', strict_slashes=False)
def hello_hbnb():
""" Print Hello HBNB """
return 'Hello HBNB!'
@app.route('/hbnb', strict_slashes=False)
def only_hbnb():
""" Print HBNB """
return 'HBNB'
@app.route('/c/<text>', strict_slashes=False)
def cissome(text):
""" Print C + <name> without underscore """
return("C {}".format(text.replace("_", " ")))
@app.route('/python/', strict_slashes=False)
@app.route('/python', strict_slashes=False)
def pythonalone():
""" Print Python is cool ...by default """
return("Python is cool")
@app.route('/python/<text>', strict_slashes=False)
def pythonissome(text):
""" Print Python + <name> without underscore """
return("Python {}".format(text.replace("_", " ")))
@app.route('/number/<nummer>', strict_slashes=False)
def numberisint(nummer):
""" Print number if it s a number """
if nummer.isdigit():
return("{} is a number".format(nummer))
@app.route('/number_template/<nummer>', strict_slashes=False)
def number_template(nummer):
""" Print a template with a variable """
if nummer.isdigit():
return render_template('5-number.html', name=nummer)
else:
return render_template('no_found.html'), 404
@app.route('/number_odd_or_even/<nummer>', strict_slashes=False)
def number_even(nummer):
""" Print a template witheven or odd """
if nummer.isdigit():
if (int(nummer) % 2) == 0:
return render_template('6-number_odd_or_even.html',
name=nummer, kind="even")
else:
return render_template('6-number_odd_or_even.html',
name=nummer, kind="odd")
else:
return render_template('no_found.html'), 404
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000)
| [
"789@holbertonschool.com"
] | 789@holbertonschool.com |
216454819387e5322cad158675193d6819924fe1 | ac2cd550f9e4f60e38176b76497d9016f346a71e | /lesson_7.py | bbaaa1d298f9676188cbb6203fe3f4f0664d6bea | [] | no_license | AndreyevichMak/QA-Automation | f4dafc3a9ba255a527ffc8bb022b4df4776f186f | 862523b88ed9de957b1c07617246ac83b83af6b6 | refs/heads/master | 2022-12-05T01:18:09.291782 | 2020-09-01T12:03:15 | 2020-09-01T12:03:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | def test_7(app):
app.login_opencart()
app.click_on_mp3player()
app.click_on_test()
#assert 'There are no products to list in this category.' in driver.find_element_by_css_selector('#content ').text | [
"makarenkoandrew@mail.ru"
] | makarenkoandrew@mail.ru |
25dee2f580534fe5863bb4fc7a0cefb45fdbbc30 | d15b3857b3b5268a9652205364d2636e98803538 | /jingjingc/05_sklearn_and_ml/hall_of_fame_classifier.py | 29e1719ffcec1053e90fcacd7190c00345a2f871 | [] | no_license | bbalin12/DAT5_BOS_students | 1308327e6587d080a9fd1aaffb432f2c2ecd12bf | c33adef5943e026e7aa9b918e20a1c31df294899 | refs/heads/master | 2020-05-30T20:11:30.771193 | 2015-04-29T22:52:41 | 2015-04-29T22:52:41 | 29,398,534 | 6 | 23 | null | 2015-04-29T22:52:44 | 2015-01-17T17:09:00 | Python | UTF-8 | Python | false | false | 10,553 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Feb 14 12:47:56 2015
@author: jchen
"""
# Build a preditive model that predicts whether a player was inducted to the Baseball Hall of Fame
# before 2000 using their batting, pitching, and fielding results- not the number of votes they received.
# Please make sure to use K-fold cross validaiton and grid search to find your best model.
from __future__ import division
# import packages
import numpy as np
import sqlite3
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
import matplotlib.pyplot as plt
CROSS_VALIDATION_AMOUNT=.2
# connect to SQLite database
conn = sqlite3.connect('/Users/jchen/Documents/SQLite/lahman2013.sqlite')
# SQL query object
# pull in all relevant
sql = '''
select m.nameGiven as player_name,
h.inducted,
sum(b.AB) as at_bats,
sum(b.R) as runs,
sum(b.H) as hits,
sum(b.RBI) as rbi,
sum(p.GS) as p_games_started,
sum(p.CG) as p_complete_games,
sum(p.SHO) as shutouts,
sum(p.W) as p_wins,
sum(p.IPOuts) as outs_pitched,
sum(f.PO) as putouts,
sum(f.A) as assists,
sum(f.E) as errors,
(b.H+b.BB+b.HBP)*1.0/(b.AB+b.BB+b.SF+b.HBP) as OBP,
(b.H+b."2B"+(b."3B"*2)+(b.HR*3))*1.0/b.AB as SLG,
(p.W + p.BB)/(p.IPOuts/3) as WHIP
from HallOfFame h
left join Batting b on h.playerID=b.playerID
left join Pitching p on h.playerID=p.playerID
left join Fielding f on h.playerID=f.playerID
left join Master m on h.playerID=m.playerID
where h.yearID < 2000
and h.category='Player'
group by nameGiven, inducted
order by player_name;
'''
# read into data frame
df = pd.read_sql(sql, conn)
# close out connection
conn.close()
# count up null values in each columns
df.isnull().sum() # lots of null values for pitching stats
# drop all-null rows
df.dropna(how='all', inplace=True)
# split up batters and pitchers, since not likely to get into hall of fame on performance of both batting and pitching
# use fielding stats for both
# designate relevant batting and pitching stats
batting_vars=['player_name', 'inducted', 'at_bats', 'runs', 'hits', 'rbi', 'putouts', 'assists', 'errors', 'OBP', 'SLG']
pitching_vars=['player_name', 'inducted', 'p_games_started', 'p_complete_games', 'shutouts', 'p_wins', 'outs_pitched', 'putouts', 'assists', 'errors', 'WHIP']
df_batting = df[pd.notnull(df['at_bats'])][batting_vars]
df_pitching = df[pd.notnull(df['p_games_started'])][pitching_vars]
# check new data frames
df_batting.describe()
df_pitching.describe()
# fill missing values with mean
df_batting.fillna(df_batting.mean(), inplace=True)
df_pitching.fillna(df_pitching.mean(), inplace=True)
# split out response and explanatory variables
batting_response_series = df_batting.inducted
batting_explanatory_variables = df_batting[batting_vars[2:]] # all other variables
pitching_response_series = df_pitching.inducted
pitching_explanatory_variables = df_pitching[pitching_vars[2:]]
# Let's look at batting first
# break up data into test and train
batting_holdout_num = round(len(df_batting.index) * CROSS_VALIDATION_AMOUNT, 0)
batting_test_indices = np.random.choice(df_batting.index, batting_holdout_num, replace = False)
batting_train_indices = df_batting.index[~df_batting.index.isin(batting_test_indices)]
batting_response_train = batting_response_series.ix[batting_train_indices,]
batting_explanatory_train = batting_explanatory_variables.ix[batting_train_indices,]
batting_response_test = batting_response_series.ix[batting_test_indices,]
batting_explanatory_test = batting_explanatory_variables.ix[batting_test_indices,]
# instantiate KNN classifier, with p=2 for Euclidian distance
batting_knn = KNeighborsClassifier(n_neighbors=3, p = 2)
batting_knn.fit(batting_explanatory_train, batting_response_train)
batting_predicted_response = batting_knn.predict(batting_explanatory_test)
# calculating accuracy
number_correct = len(batting_response_test[batting_response_test == batting_predicted_response])
total_in_test_set = len(batting_response_test)
accuracy = number_correct / total_in_test_set
print accuracy*100
# not a stellar accuracy: 81.5%
# repeat for pitching
pitching_holdout_num = round(len(df_pitching.index) * CROSS_VALIDATION_AMOUNT, 0)
pitching_test_indices = np.random.choice(df_pitching.index, pitching_holdout_num, replace = False)
pitching_train_indices = df_pitching.index[~df_pitching.index.isin(batting_test_indices)]
pitching_response_train = pitching_response_series.ix[pitching_train_indices,]
pitching_explanatory_train = pitching_explanatory_variables.ix[pitching_train_indices,]
pitching_response_test = pitching_response_series.ix[pitching_test_indices,]
pitching_explanatory_test = pitching_explanatory_variables.ix[pitching_test_indices,]
# instantiate KNN classifier, with p=2 for Euclidian distance
pitching_knn = KNeighborsClassifier(n_neighbors=3, p = 2)
pitching_knn.fit(pitching_explanatory_train, pitching_response_train)
pitching_predicted_response = pitching_knn.predict(pitching_explanatory_test)
number_correct = len(pitching_response_test[pitching_response_test == pitching_predicted_response])
total_in_test_set = len(pitching_response_test)
accuracy = number_correct / total_in_test_set
print accuracy*100
# roughly the same, at 79%
###########################
# K-fold CV
###########################
# reinstantiate batting classifier
batting_knn = KNeighborsClassifier(n_neighbors=3, p = 2)
# compute scores
batting_scores = cross_val_score(batting_knn, batting_explanatory_variables, batting_response_series, cv=10, scoring='accuracy')
print batting_scores
batting_mean_accuracy = np.mean(batting_scores)
print batting_mean_accuracy*100
# pitching
pitching_knn = KNeighborsClassifier(n_neighbors=3, p = 2)
# compute scores
pitching_scores = cross_val_score(pitching_knn, pitching_explanatory_variables, pitching_response_series, cv=10, scoring='accuracy')
print pitching_scores
pitching_mean_accuracy = np.mean(pitching_scores)
print pitching_mean_accuracy*100
# this time pitching is lower
############################
# Grid search for optimal k
############################
# instatiate classifier for batting
batting_knn = KNeighborsClassifier(p = 2)
batting_k_range = range(1, 60, 2)
batting_param_grid = dict(n_neighbors=batting_k_range)
batting_grid = GridSearchCV(batting_knn, batting_param_grid, cv=10, scoring='accuracy')
batting_grid.fit(batting_explanatory_variables, batting_response_series)
# get optimal estimator for batting
batting_grid_scores = batting_grid.grid_scores_
batting_grid_mean_scores = [result[1] for result in batting_grid_scores]
plt.figure()
plt.plot(batting_k_range, batting_grid_mean_scores)
batting_best_oob_score = batting_grid.best_score_
print batting_grid.best_params_ # best k-value at 27 - that's pretty high. might be overfitting here
print batting_best_oob_score
batting_knn_opt = batting_grid.best_estimator_
# repeat for pitching
pitching_knn = KNeighborsClassifier(p = 2)
pitching_k_range = range(1, 60, 2)
pitching_param_grid = dict(n_neighbors=pitching_k_range)
pitching_grid = GridSearchCV(pitching_knn, pitching_param_grid, cv=10, scoring='accuracy')
pitching_grid.fit(pitching_explanatory_variables, pitching_response_series)
# get optimal estimator for pitching
pitching_grid_scores = pitching_grid.grid_scores_
pitching_grid_mean_scores = [result[1] for result in pitching_grid_scores]
plt.figure()
plt.plot(pitching_k_range, pitching_grid_mean_scores)
pitching_best_oob_score = pitching_grid.best_score_
print pitching_grid.best_params_ # best k-value at 23
print pitching_best_oob_score
pitching_knn_opt = pitching_grid.best_estimator_
##############################
# Test model on post-2005 data
##############################
conn = sqlite3.connect('/Users/jchen/Documents/SQLite/lahman2013.sqlite')
sql = '''
select m.nameGiven as player_name,
h.inducted,
sum(b.AB) as at_bats,
sum(b.R) as runs,
sum(b.H) as hits,
sum(b.RBI) as rbi,
sum(p.GS) as p_games_started,
sum(p.CG) as p_complete_games,
sum(p.SHO) as shutouts,
sum(p.W) as p_wins,
sum(p.IPOuts) as outs_pitched,
sum(f.PO) as putouts,
sum(f.A) as assists,
sum(f.E) as errors,
(b.H+b.BB+b.HBP)*1.0/(b.AB+b.BB+b.SF+b.HBP) as OBP,
(b.H+b."2B"+(b."3B"*2)+(b.HR*3))*1.0/b.AB as SLG,
(p.W + p.BB)/(p.IPOuts/3) as WHIP
from HallOfFame h
left join Batting b on h.playerID=b.playerID
left join Pitching p on h.playerID=p.playerID
left join Fielding f on h.playerID=f.playerID
left join Master m on h.playerID=m.playerID
where h.yearID >= 2000
and h.category='Player'
group by nameGiven, inducted
order by player_name;
'''
df = pd.read_sql(sql, conn)
conn.close()
df_batting = df[pd.notnull(df['at_bats'])][batting_vars]
df_pitching = df[pd.notnull(df['p_games_started'])][pitching_vars]
# fill missing values with mean
df_batting.fillna(df_batting.mean(), inplace=True)
df_pitching.fillna(df_pitching.mean(), inplace=True)
# set response and explanatory data
batting_response_series = df_batting.inducted
batting_explanatory_variables = df_batting[batting_vars[2:]] # all other variables
pitching_response_series = df_pitching.inducted
pitching_explanatory_variables = df_pitching[pitching_vars[2:]]
# predict batting
batting_opt_knn_preds = batting_knn_opt.predict(batting_explanatory_variables)
batting_number_correct = len(batting_response_series[batting_response_series == batting_opt_knn_preds])
batting_total_in_test_set = len(batting_response_series)
batting_accuracy = batting_number_correct / batting_total_in_test_set
## compare actual accuracy with accuracy anticipated by grid search.
print batting_accuracy* 100
print batting_best_oob_score* 100
# interestingly enough, higher accuracy on new data
# do the same with pitching
pitching_opt_knn_preds = pitching_knn_opt.predict(pitching_explanatory_variables)
pitching_number_correct = len(pitching_response_series[pitching_response_series == pitching_opt_knn_preds])
pitching_total_in_test_set = len(pitching_response_series)
pitching_accuracy = pitching_number_correct / pitching_total_in_test_set
print pitching_accuracy* 100
print pitching_best_oob_score * 100
# way higher accuracy on out of sample data
# perhaps best taken with a grain of salt
# could be that the metrics used are more influential in determining hall of fame chances in modern times compared with previous decades
# (or could be chance)
| [
"jchen027@gmail.com"
] | jchen027@gmail.com |
2bb5f48e70aa47b02fa50d27b781476cfbf204c7 | 91bd01ea54744c308c95d3615d7db12e736fb18b | /dinbot/core/utils.py | c036cf491f328a53c129de0cc6dc3ec39425d0f6 | [] | no_license | kangfend/dinbot | 5e1ad14ac34604fd2883db4b7abed0b93b9c1886 | 0ace25dc694bc171fd72faf9ba196b04b609bd29 | refs/heads/master | 2021-01-01T18:11:52.668105 | 2014-10-22T14:50:11 | 2014-10-22T14:50:11 | 25,586,020 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,260 | py | from __future__ import unicode_literals
import copy
class Choices(object):
"""
A class to encapsulate handy functionality for lists of choices
for a Django model field.
Each argument to ``Choices`` is a choice, represented as either a
string, a two-tuple, or a three-tuple.
If a single string is provided, that string is used as the
database representation of the choice as well as the
human-readable presentation.
If a two-tuple is provided, the first item is used as the database
representation and the second the human-readable presentation.
If a triple is provided, the first item is the database
representation, the second a valid Python identifier that can be
used as a readable label in code, and the third the human-readable
presentation. This is most useful when the database representation
must sacrifice readability for some reason: to achieve a specific
ordering, to use an integer rather than a character field, etc.
Regardless of what representation of each choice is originally
given, when iterated over or indexed into, a ``Choices`` object
behaves as the standard Django choices list of two-tuples.
If the triple form is used, the Python identifier names can be
accessed as attributes on the ``Choices`` object, returning the
database representation. (If the single or two-tuple forms are
used and the database representation happens to be a valid Python
identifier, the database representation itself is available as an
attribute on the ``Choices`` object, returning itself.)
Option groups can also be used with ``Choices``; in that case each
argument is a tuple consisting of the option group name and a list
of options, where each option in the list is either a string, a
two-tuple, or a triple as outlined above.
"""
def __init__(self, *choices):
# list of choices expanded to triples - can include optgroups
self._triples = []
# list of choices as (db, human-readable) - can include optgroups
self._doubles = []
# dictionary mapping db representation to human-readable
self._display_map = {}
# dictionary mapping Python identifier to db representation
self._identifier_map = {}
# set of db representations
self._db_values = set()
self._process(choices)
def _store(self, triple, triple_collector, double_collector):
self._identifier_map[triple[1]] = triple[0]
self._display_map[triple[0]] = triple[2]
self._db_values.add(triple[0])
triple_collector.append(triple)
double_collector.append((triple[0], triple[2]))
def _process(self, choices, triple_collector=None, double_collector=None):
if triple_collector is None:
triple_collector = self._triples
if double_collector is None:
double_collector = self._doubles
store = lambda c: self._store(c, triple_collector, double_collector)
for choice in choices:
if isinstance(choice, (list, tuple)):
if len(choice) == 3:
store(choice)
elif len(choice) == 2:
if isinstance(choice[1], (list, tuple)):
# option group
group_name = choice[0]
subchoices = choice[1]
tc = []
triple_collector.append((group_name, tc))
dc = []
double_collector.append((group_name, dc))
self._process(subchoices, tc, dc)
else:
store((choice[0], choice[0], choice[1]))
else:
raise ValueError(
"Choices can't take a list of length %s, only 2 or 3"
% len(choice)
)
else:
store((choice, choice, choice))
def __len__(self):
return len(self._doubles)
def __iter__(self):
return iter(self._doubles)
def __getattr__(self, attname):
try:
return self._identifier_map[attname]
except KeyError:
raise AttributeError(attname)
def __getitem__(self, key):
return self._display_map[key]
def __add__(self, other):
if isinstance(other, self.__class__):
other = other._triples
else:
other = list(other)
return Choices(*(self._triples + other))
def __radd__(self, other):
# radd is never called for matching types, so we don't check here
other = list(other)
return Choices(*(other + self._triples))
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._triples == other._triples
return False
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
', '.join(("%s" % repr(i) for i in self._triples))
)
def __contains__(self, item):
return item in self._db_values
def __deepcopy__(self, memo):
return self.__class__(*copy.deepcopy(self._triples, memo))
| [
"kangfend@gmail.com"
] | kangfend@gmail.com |
d897c122c6f3abcf6658e12b25dbc23863ee63f2 | aef3292c8f42b72fccd413bf3a9c9f8b6cf2afc8 | /torchvex/meaningful_perturbation/__init__.py | 8a739372cf4cd886ce844717edd3c2e3e119dee7 | [
"MIT"
] | permissive | vlue-c/Visual-Explanation-Methods-PyTorch | 89bf8029eb5e4c9f1a218900383d444c585ada94 | e3eb592fe3aa0c719ef3e5d0f75238a2a3e84c3d | refs/heads/main | 2023-01-20T12:55:43.175054 | 2020-11-27T08:02:34 | 2020-11-27T08:02:34 | 311,287,634 | 8 | 3 | null | null | null | null | UTF-8 | Python | false | false | 75 | py | from .mask import MeaningfulPerturbation
from .mask import gaussian_filter
| [
"hslee0390@gmail.com"
] | hslee0390@gmail.com |
5766dd55a3b6fec054f0dbaca2ac130393cbe232 | 53f0f5468dfad0a2d456fd08569262ace19f23b3 | /src/profiles_project/profiles_api/migrations/0001_initial.py | b33ffbc20779fcb61eaf29cd6c76632f5fb53f07 | [] | no_license | flyingamy/profiles-rest-api | 3df9e61a589d639f40dbc4fd5a3aeb518d8a52a5 | 500a011dd4b343a1768ffec72c6b0bdb0a6b5253 | refs/heads/master | 2021-05-09T14:49:45.346273 | 2018-01-26T18:49:06 | 2018-01-26T18:49:06 | 119,072,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,777 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-01-25 14:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| [
"zhaoxuemei86@gmail.com"
] | zhaoxuemei86@gmail.com |
c1dd70fbc1dce2200deb95adda63bb4f34f84596 | 0cfe86a9678f46c58062d78240f87d9bd458deed | /web-development/engineapp/main.py | d28d93eacd11d8fe07ffd7e7b8f944359696a6b4 | [] | no_license | SIMMI2KUMARI/deep_ml_ALL_merged | e8434a64efceaf00144e8ef83c40c0cb95dcbeac | 813fccd2dcabce49a57ec1e4ea03a2499c74cb97 | refs/heads/main | 2023-02-20T13:57:19.241857 | 2021-01-26T21:27:56 | 2021-01-26T21:27:56 | 333,220,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,050 | py |
import os
import webapp2
import jinja2
import hashlib
import re
from string import letters
import urllib2
from xml.dom import minidom
from google.appengine.ext import db
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir),
autoescape = True)
# def hash_str(s):
# return hashlib.md5(s).hexdigest()
# def make_secure_val(s):
# return "%s|%s" % (s, hash_str(s))
# def check_secure_val(h):
# val = h.split('|')[0]
# hashed = h.split('|')[1]
# if hashed == hash_str(val):
# return val
# else:
# return 'None'
# IP_URl = "http://freegeoip.net/xml/"
# def get_coords(ip):
# ip = "4.2.2.2"
# url = IP_URl + ip
# content = None
# try:
# content = urllib2.urlopen(url).read()
# except:
# return
# if content:
# x = minidom.parseString(content)
# lat = x.getElementsByTagName("Latitude")[0].childNodes[0].nodeValue
# lon = x.getElementsByTagName("Longitude")[0].childNodes[0].nodeValue
# return db.GeoPt(lat,lon)
# else:
# return "None"
temp_user = ''
# Define base handler class
class Handler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
# Code for sign up
## Email address and Password entered must be part of database
allowed = {'priya': 'sneha', 'marie.mizgala@scotiabank.com': 's6324800'}
class Signup(Handler):
def get(self):
self.render("signup-form3.html")
def post(self):
have_error = False
email = self.request.get('email')
password = self.request.get('password')
params = dict(email = email,
password = password)
if allowed.has_key(email):
if allowed[email] == password:
have_error = False
else:
have_error = True
params['error_password'] = "That's not a valid password."
else:
have_error = True
params['error_email'] = "That's not a valid email."
if have_error:
self.render('signup-form3.html', **params)
else:
username = email.split('.')[0]
self.write(username)
temp_user = username
self.redirect('/mainpage')
class Case(db.Model):
application = db.StringProperty(required = True)
insurer = db.StringProperty(required = True)
date = db.StringProperty(required = True)
ins_comment = db.TextProperty(required=True)
inv_comment = db.TextProperty(required = False)
created = db.DateTimeProperty(auto_now_add = True)
def top_case():
cases = db.GqlQuery("SELECT * FROM Case ORDER BY created DESC LIMIT 50 ")
cases = list(cases)
return (cases)
class MainHandler(Handler):
def render_front(self, application="", insurer = "", date = "", ins_comment = "", inv_comment="", username = "", error=""):
cases = db.GqlQuery("SELECT * FROM Case ORDER BY created DESC LIMIT 50 ")
username = temp_user
self.render("edd_display2.html", application=application, insurer=insurer, date = date, ins_comment = ins_comment, inv_comment = inv_comment,
username = str(username), error=error, cases = cases)
def get(self):
return self.render_front()
def post(self):
application = self.request.get("application")
insurer = self.request.get("insurer")
date = self.request.get("date")
ins_comment = self.request.get("ins_comment")
inv_comment = self.request.get("inv_comment")
if not inv_comment:
inv_comment = ""
if application and insurer and date and ins_comment:
a = Case(application = application, insurer = insurer, date = date, ins_comment = ins_comment, inv_comment=inv_comment)
a.put()
self.redirect("/mainpage")
self.render_front()
else:
error = "Need first four fields"
self.render_front(application, insurer, date, ins_comment, inv_comment, error)
### Add the new handler
class ModHandler(Handler):
def render_front(self, application="", insurer = "", date = "", ins_comment = "", inv_comment="", username = "", error=""):
cases = db.GqlQuery("SELECT * FROM Case ORDER BY created DESC LIMIT 50 ")
username = temp_user
self.render("edd_modify2.html", application=application, insurer=insurer, date = date, ins_comment = ins_comment, inv_comment = inv_comment,
username = str(username), error=error, cases = cases)
def get(self):
return self.render_front()
def post(self):
if self.request.get("ex_application"):
new_application = self.request.get("ex_application")
new_insurer = self.request.get("ex_insurer")
new_date = self.request.get("ex_date")
new_ins_comment = self.request.get("ex_ins_comment")
new_inv_comment = self.request.get("ex_inv_comment")
for case in cases:
if case.application == new_application:
if ((case.insurer != new_insurer) or (case.date != new_date) or(case.ins_comment!= new_ins_comment) or (case.inv_comment != new_inv_comment)):
a = Case(application = case.application, insurer = case.insurer, date = case.date, ins_comment = case.ins_comment, inv_comment= case.inv_comment)
b = Case(application = new_application, insurer = new_insurer, date = new_date, ins_comment = new_ins_comment, inv_comment= new_inv_comment)
b.put()
a.delete()
self.redirect("/mainpage")
# self.redirect("/modcases")
# self.render_front()
app = webapp2.WSGIApplication([ ('/signup', Signup),
('/mainpage', MainHandler),
('/modcases', ModHandler)],
debug=True)
| [
"noreply@github.com"
] | noreply@github.com |
2cfe92888dff587c17cf100a3ba9d25a5c0a2633 | fda2c891c6a159c54504e768012e61924abc11a3 | /ejercicios_python/informe_funciones.py | 6673a472f3263a2dba47065efa01e0fc30f17dce | [] | no_license | luciasucunza/unsam_python | 4bf54b8b9f9034c212e30e943815f86b71f7c751 | 26a6e491f88e8212c575a56a79a3dab4cc2497ae | refs/heads/main | 2023-06-03T16:26:08.681480 | 2021-06-17T19:03:09 | 2021-06-17T19:03:09 | 377,932,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,456 | py | # informe_funciones.py
import csv
import fileparse as fp
#------------Función:leer_camion------------
def leer_camion(nombre_archivo):
camion = fp.parse_csv(nombre_archivo)
return camion
#------------Función:leer_precios------------
def leer_precios(nombre_archivo):
'Devuelve los precios en forma de dic cionario'
precios = fp.parse_csv(nombre_archivo, types=[str, float], has_headers=False)
return dict(precios)
#------------Función:hacer_informe()------------
def hacer_informe(lista_cajones, dicc_precios ):
lista_tuplas = []
for d in lista_cajones:
tupla = ( d['nombre'], d['cajones'], ('$'+str(d['precio'])), dicc_precios[d['nombre']]-d['precio'])
lista_tuplas.append(tupla)
return lista_tuplas
#------------Función:imprimir_informe()------------
def imprimir_informe(informe):
headers = ('Nombre', 'Cajones', 'Precio', 'Cambio')
tira = '----------'
print(f'{headers[0]:>10s} {headers[1]:>10s} {headers[2]:>10s} {headers[3]:>10s}')
print(f'{tira:>10s} {tira:>10s} {tira:>10s} {tira:>10s}')
for row in informe:
print('%10s %10d %10s %10.2f' %row)
#------------Función:imprimir_informe()------------
def informe_camion(nombre_archivo_camion, nombre_archivo_precios):
camion = leer_camion(nombre_archivo_camion)
precios = leer_precios(nombre_archivo_precios)
informe = hacer_informe(camion, precios)
imprimir_informe(informe)
| [
"luciasucunza@frba.utn.edu.ar"
] | luciasucunza@frba.utn.edu.ar |
b6fd1cb50b6a90fba0a8720a1c21ec4355b2ffec | 38f0c9ac9dd1dfaf2310165ba9fea5b4ad787f6b | /src/cfehome/settings/production.py | 90477df7bdac71c27aa8ca044d31927fb9d097c7 | [] | no_license | Tabas32/knircoding | d58a60f8944d5b879854ddfaeaf143dd6cb5ff12 | 5fb6c6a95848830ea783fb155b0e06c00b8eddd2 | refs/heads/master | 2021-09-02T01:19:31.477419 | 2017-12-29T16:00:15 | 2017-12-29T16:00:15 | 115,107,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,727 | py | """
Django settings for cfehome project.
Generated by 'django-admin startproject' using Django 2.0.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY', 'SOME+RANDOM+KEY(z9+3vnm(jb0u@&w68t#5_e8s9-lbfhv-')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['knircoding.herokuapp.com']
# Application definition
INSTALLED_APPS = [
'datacatcher',
'webapp',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'cfehome.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cfehome.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
import dj_database_url
db_from_env = dj_database_url.config()
DATABASES['default'].update(db_from_env)
DATABASES['default']['CONN_MAX_AGE'] = 500
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
CORS_REPLACE_HTTPS_REFERER = True
HOST_SCHEME = "https://"
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SECURE_SSL_REDIRECT = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_HSTS_SECONDS = 1000000
SECURE_FRAME_DENY = True
| [
"marian.sabat@centrum.sk"
] | marian.sabat@centrum.sk |
499ca439f8deb4c3c382d1c47c6df47873853d24 | e4de060c295fba0d0386d0a7678e744ced18b920 | /build/car_szenario/cmake/car_szenario-genmsg-context.py | 309534aaf9d5f6c3f170188f065ab4b9cae655ff | [] | no_license | jbenzhhn/carla_hhn | af9497d01ce1f34ee0016ca660a0cc5af5f71be8 | abd803bcdd506641c8152ec994468518ea809f1b | refs/heads/master | 2023-04-05T10:50:28.934452 | 2021-04-07T14:31:41 | 2021-04-07T14:31:41 | 355,151,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 788 | py | # generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/automotive/catkin_ws/src/car_szenario/msg/RoadInfo.msg"
services_str = ""
pkg_name = "car_szenario"
dependencies_str = "std_msgs;geometry_msgs;nav_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "car_szenario;/home/automotive/catkin_ws/src/car_szenario/msg;std_msgs;/opt/ros/melodic/share/std_msgs/cmake/../msg;geometry_msgs;/opt/ros/melodic/share/geometry_msgs/cmake/../msg;nav_msgs;/opt/ros/melodic/share/nav_msgs/cmake/../msg;actionlib_msgs;/opt/ros/melodic/share/actionlib_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python2"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/melodic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| [
"johannes.benz@hs-heilbronn.de"
] | johannes.benz@hs-heilbronn.de |
b21da1aba578f06baeba015e296dc1269a4c6702 | 46fe34e8ff3a75380fb47d01f5eb758da0ff7df7 | /appbackup.py | 2a0395e5622b3982f068e9a70ec0a29a7cfc5022 | [
"MIT"
] | permissive | Quyzi/autodock | ae12b07ee8ae4757f5429b7a66c3a15505b447df | 56516b62d01d4e9fbf0a6ff62c710fdd84c42838 | refs/heads/master | 2021-01-15T15:05:06.876902 | 2013-12-24T18:24:43 | 2013-12-24T18:24:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,824 | py | '''
Automates the backing up of customer containers
'''
import salt.client
from etcd import Etcd
class AppBackup(object):
def __init__(self, manager, logger):
self.etcd = Etcd(logger)
self.logger = logger
self.manager = manager
self.salt_client = salt.client.LocalClient()
# Backup this users formation to /mnt/ceph/docker_customer_backups
def backup_formation(self, user, formation):
self.logger.info('Saving the formation {formation}'.format(
formation=formation))
formation = self.manager.load_formation_from_etcd(user, formation)
for app in formation.application_list:
self.logger.info('Running commit on {hostname}'.format(
hostname=app.hostname))
# Try to commmit the container and wait 10 mins for this to return
results = self.salt_client.cmd(app.host_server, 'cmd.run',
['docker commit {container_id}'.format(container_id=app.container_id)],
expr_form='list', timeout=600)
if results:
if "Error: No such container" in results[app.host_server]:
self.logger.error('Could not find container')
else:
commit_id = results[app.host_server]
self.logger.info('Running save on {hostname}'.format(
hostname=app.hostname))
self.salt_client.cmd(app.host_server, 'cmd.run',
['docker save {image_id} > /mnt/ceph/docker_customer_backups/'
'{hostname}.tar'.format(
image_id=commit_id[0:12], hostname=app.hostname)],
expr_form='list', timeout=600)
self.logger.info('Cleaning up the commit image')
self.salt_client.cmd(app.host_server, 'cmd.run',
['docker rmi {image_id}'.format(image_id=commit_id[0:12])],
expr_form='list')
self.logger.info('Done saving app')
| [
"xfactor973@gmail.com"
] | xfactor973@gmail.com |
34a4d7d23e1a7739f3a8b30538d2078ec65dce64 | b801e38bfd1bddb7c328a529f4fdea41c592323a | /pong.py | 242e8ff6c0569a3ffc2a57370955f5d03863ccff | [] | no_license | alexaustin007/Pong-game | 5e6a48140f53325cbbb3421343745e1522082d39 | af817c0ab50baae217257dd4883eb0d3eac8a8fc | refs/heads/main | 2023-04-20T21:43:43.675426 | 2021-05-12T04:55:38 | 2021-05-12T04:55:38 | 366,596,331 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,343 | py | '''import turtle #turtle used for basic graphics especially games
wn = turtle.Screen()
wn.title("pingpong by @alexaustin007")
wn.bgcolor("black")
wn.setup(width=800,height=600)
wn.tracer(0) #it speeds up the game quite a bit
#score
score_a = 0
score_b = 0
# Paddle A
paddle_a = turtle.Turtle()
paddle_a.speed(0) #This is the speed of animation and not the speed that the paddle moves
paddle_a.shape("square")
paddle_a.color("white")
paddle_a.penup() #penup doesnt draw lines by moving because if we dont write this turtle will draw lines when paddle moving
paddle_a.goto(-350,0) # X cordinate
paddle_a.shapesize(stretch_wid= 5,stretch_len=1) #it will stretch the shape because bydefault its 20px by 20px
# Paddle B
paddle_b = turtle.Turtle()
paddle_b.speed(0)
paddle_b.shape("square")
paddle_b.color("white")
paddle_b.penup()
paddle_b.goto(350,0) #Right side of screen
paddle_b.shapesize(stretch_wid = 5,stretch_len = 1)
# ball
ball = turtle.Turtle()
ball.speed(0)
ball.shape("circle")
ball.color("white")
ball.penup()
ball.goto(0,0) # middle of screen
ball.dx = 0.5 # x movement of the ball
ball.dy = 0.5 # y movement of the ball
#Pen
pen = turtle.Turtle() #for naming scoreboard and counting small 't' for module name capital T for class name
pen.penup()
pen.color('white')
pen.goto(0,260)
pen.hideturtle()
pen.speed(0) #animation speed not the movement speed
pen.write("Rohan :0 Austin: 0", align = 'center' , font = ("Courier", 24,"normal"))
#Functions
def paddle_a_up():
y = paddle_a.ycor() #ycor is a turtle module which returns the y cor
y += 20
paddle_a.sety(y)
def paddle_a_down():
y = paddle_a.ycor()
y -= 20
paddle_a.sety(y)
def paddle_b_up():
y = paddle_b.ycor()
y += 20
paddle_b.sety(y)
def paddle_b_down():
y = paddle_b.ycor()
y -= 20
paddle_b.sety(y)
#keyboard binding
wn.listen() #this tells to listen to keyboard inputs
wn.onkeypress(paddle_a_up,"w") #when pressed w in keyboard it will call the function
wn.onkeypress(paddle_a_down,"s")
wn.onkeypress(paddle_b_up,"Up")
wn.onkeypress(paddle_b_down,"Down")
# main game loop
while True:
wn.update()
#Ball moving
ball.setx(ball.xcor() + ball.dx) #The ball moves with the delta speed as set
ball.sety(ball.ycor() + ball.dy) #The ball moves with the delta speed as set
#border checking
if ball.ycor() > 290:
ball.sety(290) #if its greater than 290 it will set the ball at 290
ball.dy *= -1
if ball.ycor() < -290:
ball.sety(-290)
ball.dy *= -1
if ball.xcor() > 390:
ball.setx(390)
ball.dx *= -1
pen.clear()
score_a += 1 #if ball touches the right side player 'a' get points
pen.write("rohan:{} austin:{}".format(score_a,score_b), align = 'center' , font = ("Courier", 24,"normal")) # format is used to store score values in specific player name
if ball.xcor() < -390:
ball.setx(-390)
ball.dx *= -1
score_b += 1 #if ball touches the left side player 'b' get points
pen.clear()
pen.write("rohan:{} austin:{}".format(score_a,score_b), align = 'center' , font = ("Courier", 24,"normal"))
#paddle and ball collision
if (ball.xcor() > 340 and ball.xcor() < 350) and (ball.ycor() < paddle_b.ycor()+ 40 and ball.ycor() > paddle_b.ycor()-40):
ball.setx(340)
ball.dx *= -1
if (ball.xcor() < -340 and ball.xcor() > -350) and (ball.ycor() < paddle_a.ycor()+ 40 and ball.ycor() > paddle_a.ycor()-40):
ball.setx(-340)
ball.dx *= -1'''
'''string = input("Enter the string: ")
liststring = list(string)
len_liststring = list(liststring)
for i in range(len_liststring-1):
for j in range(len_liststring-i-1):
if liststring[j] > liststring[j+1]:
liststring[j],liststring[j+1] = liststring[j+1],liststring[j]
for m in liststring:
new_string += m
print(new_string)'''
'''number = [212,-3837,20,23,5,33,67,3,5,5244,-83]
for i in range(len(number)):
for j in range(i+1,len(number)):
if number[i] > number[j]:
number[i],number[j] = number[j],number[i]
print(number)'''
'''def quick_sort(sequence):
if len(sequence) <= 1:
return sequence
else:
pivot = sequence.pop()
greater = []
lower = []
for item in sequence:
if item > pivot:
greater.append(item)
else:
lower.append(item)
return quick_sort(lower) + [pivot] + quick_sort(greater)
print(quick_sort([2,4,1,1,1,11,2,5335,122,3,112,-727,0,-26726]))'''
'''def binary_search(sequence,item):
begin_index = 0
end_index = len(sequence)-1
while begin_index <= end_index:
midpoint = (begin_index+end_index) // 2
midpoint_value = sequence[midpoint]
if midpoint_value == item:
return midpoint
elif item < midpoint_value:
end_index = midpoint-1
else:
begin_index = midpoint+1
return None
sequence_a = [1,2,3,4,5,7,9,16,500]
item_a = 16
print(binary_search(sequence_a,item_a))'''
| [
"noreply@github.com"
] | noreply@github.com |
3dffb927230c836a6335dd500e68d9cd4c8c3cbe | 8ab78974988f4447b5895fa15a5679e70ea20aee | /hclustfact/__init__.py | b7a50eae5b932f3fa51e98e2936b70d67c7820c4 | [] | no_license | cbongiorno/HclustFactor | 771147dc0171ecfa0e7c63bbca6138efc0a133fe | 0fe5999aa69b81a1141dc396f09b9bc6ebda51b6 | refs/heads/master | 2021-03-24T12:52:42.172348 | 2016-12-06T19:11:18 | 2016-12-06T19:11:18 | 75,723,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42 | py | from hclustfact.MainHclF import Hierarchy
| [
"pvofeta@gmail.com"
] | pvofeta@gmail.com |
ad0aff4df6556899c3625912e906f94cb6070946 | a9e00a787b624c1eb5ff7e2236dd5df48f56789c | /12.py | 7790707dbbf04e04a841698406c07f24730030cd | [] | no_license | shreyansbhavsar/compititive-coding- | e1e4ce064719ce60fd792b7e4e21ce8b75a66568 | 9a7f7371d8176282a3b059ec2f2c65a853d176dd | refs/heads/master | 2022-06-22T23:20:58.355990 | 2022-05-25T19:23:50 | 2022-05-25T19:23:50 | 261,390,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,138 | py | import numpy as np
from sys import stdin
def show_result(data):
T = data[0][0] # test case num
n_idx = 1
for i in range(T):
n = data[n_idx][0]
origin_tasks = data[(n_idx+1):(n_idx+1+n)]
tasks = sorted(origin_tasks, key=lambda x: x[0])
sorted_idx = sorted([i for i in range(n)], key=lambda x: origin_tasks[x][0])
ans = []
j_flags = -1
c_flags = -1
for j in range(n):
if j_flags <= tasks[j][0]:
j_flags = tasks[j][1]
ans += 'C'
elif c_flags <= tasks[j][0]:
c_flags = tasks[j][1]
ans += 'J'
else:
ans = 'IMPOSSIBLE'
break
# check
if len(ans) != n:
ans = 'IMPOSSIBLE'
if ans != 'IMPOSSIBLE':
ans = np.array(ans)[sorted_idx]
print('Case #{}: {}'.format(i+1, ''.join(ans)))
n_idx = n_idx + 1 + n
if __name__ == '__main__':
raw_data = [val.rstrip() for val in stdin.readlines()]
data = [list(map(int, val.split(' '))) for val in raw_data]
show_result(data) | [
"shreyansbhavsar9950@gmail.com"
] | shreyansbhavsar9950@gmail.com |
a615d2b2528d631e64114794df2779334b205637 | 57bfb6d66710adf452a0e2f60398ea33b928f4b4 | /utils/utils.py | 329c872d658f04a6c5b94eb50651d22f5f0bae4e | [
"MIT"
] | permissive | mreq/SublimeMagic | 35402366e11042a767122a4edceee4cdee14855c | dc93c280d27055b4fad85b3c376fcf7ae31a5586 | refs/heads/master | 2021-01-20T18:58:18.705062 | 2017-01-07T23:09:45 | 2017-01-07T23:09:45 | 64,035,290 | 8 | 1 | null | 2016-12-31T14:49:49 | 2016-07-23T20:39:13 | Python | UTF-8 | Python | false | false | 659 | py | import sublime
def find_previous_delimiter(view, line, re_delimiter, start):
found = None
while start > line.a and start > 0:
region = sublime.Region(start - 1, start)
if re_delimiter.match(view.substr(region)):
found = start
break
start -= 1
return found
def find_next_delimiter(view, line, re_delimiter, delimiter_length, start):
found = None
while start < line.b and start < 999999:
region = sublime.Region(start, start + delimiter_length)
if re_delimiter.match(view.substr(region)):
found = start
break
start += 1
return found
| [
"contact@petrmarek.eu"
] | contact@petrmarek.eu |
abe2db4794797e77069626385bfd10436a5096be | 62f68303f59cc4344d390b3abf91b1a71aa2a4fe | /siki_website_sale_social_share/__openerp__.py | 4168ae099c478a73d57f46c2f56e1c91ee8c710f | [] | no_license | ControlWebManager/Requerimientos_SIKI_SAS | b8b0f514ce979a1b91f63d5f4076bc408fd6bbca | 7d3f9f69de82633b5ddb18657a15da55e71c1d32 | refs/heads/master | 2020-05-20T16:07:59.464118 | 2019-11-27T22:55:55 | 2019-11-27T22:55:55 | 185,659,981 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,108 | py | # -*- coding: utf-8 -*-
{
'name': 'Botones Compartir producto en Redes Social',
'summary': 'Share contents in social networks',
'depends': [
'website',
'website_sale',
],
'author': 'SIKI SAS, Developer Ing Henry Vivas',
'website': 'www.sikisoftware.com',
"support": "controlwebmanager@gmail.com",
'category': 'Website',
'description': """
Share products on social networks
=================================
Social Media
* Facebook,
* Twitter,
* Pinterest
* Whatsapp
List of modifications:
----------------------
* V.-1.0 Modify and improve functionality buttons compatir of products in stores( Req. 1063 )
* V.-1.1 Se adapta funcionalidad de Whatsapp Shared Button
* V.-2.0 Modificación de código para adaptar diseño con diferentes variantes en cuanto aplicaciones instaladas
""",
'data': [
'views/website.xml',
'views/website_sale.xml',
],
'demo': [
],
'test': [
],
'qweb': [
],
'js': [
],
'css': [
],
'installable': True,
}
| [
"noreply@github.com"
] | noreply@github.com |
375d8559fca821199d9f096f090ba521769263c6 | 913a7b6ec21e1e49ab0975ed928e98d35e602a96 | /Python Codewars/Greed is good.py | 5f3ca060eeac229a489ec2f10af30b5729fe57a1 | [] | no_license | akshaymanoj11794/CodeFun | 60a90cbb0f7181cf0948a083703311a5ff356717 | ed96dc12f30d6fb927fbafb45a5417e3fa9abd90 | refs/heads/master | 2021-01-14T05:54:37.003788 | 2020-02-24T01:36:37 | 2020-02-24T01:36:37 | 242,619,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,451 | py | # Greed is a dice game played with five six-sided dice. Your mission, should you choose to accept it, is to score a throw according to these rules. You will always be given an array with five six-sided dice values.
# Three 1's => 1000 points
# Three 6's => 600 points
# Three 5's => 500 points
# Three 4's => 400 points
# Three 3's => 300 points
# Three 2's => 200 points
# One 1 => 100 points
# One 5 => 50 point
# A single die can only be counted once in each roll. For example, a "5" can only count as part of a triplet (contributing to the 500 points) or as a single 50 points, but not both in the same roll.
# Example scoring
# Throw Score
# --------- ------------------
# 5 1 3 4 1 50 + 2 * 100 = 250
# 1 1 1 3 1 1000 + 100 = 1100
# 2 4 4 5 4 400 + 50 = 450
# In some languages, it is possible to mutate the input to the function. This is something that you should never do. If you mutate the input, you will not be able to pass all the tests.
def score(dice):
sumscore=0
dice_dict={1:0 ,2:0 ,3:0 ,4:0 ,5:0 ,6:0}
print(dice)
print(dice_dict)
for index,value in enumerate(dice):
dice_dict[value]+=1
extra=1
if(dice_dict[value]==3):
if(value==1):
extra=10
sumscore+=(value*100*extra)
dice_dict[value]=0
sumscore+=dice_dict[1]*100
sumscore+=dice_dict[5]*50
print(sumscore)
return sumscore | [
"akshaymanoj11794@gmail.com"
] | akshaymanoj11794@gmail.com |
c873d90bb0bf04eb663dd8c4746c5467ba2e175a | 622d5683c2e53d378c3c3ba9453d7abd8058581b | /icse38/manage.py | c9bcdf74f316f322ef337946d28ef53fe29a4441 | [
"MIT"
] | permissive | rp17/icse38site | 5afcdd8ee986e9bb4c0fd9aa0dd82245c986117f | eff2f4c33db8f07140d681177148614c7401b976 | refs/heads/master | 2021-01-18T14:15:53.334441 | 2015-01-14T07:24:38 | 2015-01-14T07:24:38 | 26,727,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "icse38.settings.local")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"rp31@txstate.edu"
] | rp31@txstate.edu |
73faa7aa222e5a2139a1e51d55fc948bf578dafc | 059a61afa19361fe2dd3509cda7924a3eb74b8e0 | /bookmanager/book/models.py | 4dab062e1015fd45c479fdd1df213fcc75dbe06f | [
"MIT"
] | permissive | songaiwen/Django2 | bf3628b7dcd1c28b65644ecfb4442091fdf54991 | 685e41a7f90e4d245f361f8fb78992aebd422978 | refs/heads/master | 2020-03-19T17:54:17.669938 | 2018-06-19T01:38:43 | 2018-06-19T01:38:43 | 136,783,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 705 | py | from django.db import models
"""
create your models here
定义模型类
模型迁移
操作数据库
"""
#1.定义模型需要集成model.Model
#准备书籍列表信息的模型类
class BookInfo(models.Model):
#创建字段,字段类型,自动创建主键并自动增长
name = models.CharField(max_length=20)
def __str__(self):
#将模型类以字符串的方式输出
return self.name
#准备人物列表信息的模型类
class PeopleInfo(models.Model):
name = models.CharField(max_length=20)
gender = models.BooleanField()
#外键约束,人物属于哪本书
book = models.ForeignKey(BookInfo)
def __str__(self):
return self.name
| [
"576883213@qq.com"
] | 576883213@qq.com |
f78c251f5afd3689e2c6083e1fc40349ec45fb72 | 3fa4aedf320396c3d780ba3cd3c4760ac007ee30 | /nba_api/stats/endpoints/boxscoremiscv2.py | 9a949ea6e5741e796fda0f6a041829728481a016 | [
"MIT"
] | permissive | Fragadule/nba_api | fb4adfe14d355223838df80aa52ab68d2be3c492 | 9df8ba11ade56a1f6b4ff0791adc276052a286c6 | refs/heads/master | 2020-04-13T19:51:43.835155 | 2018-12-28T13:41:47 | 2018-12-28T13:41:47 | 163,414,364 | 0 | 0 | MIT | 2018-12-28T13:37:28 | 2018-12-28T13:37:28 | null | UTF-8 | Python | false | false | 1,841 | py | from nba_api.stats.endpoints._base import Endpoint
from nba_api.stats.library.http import NBAStatsHTTP
from nba_api.stats.library.parameters import EndPeriod, EndRange, RangeType, StartPeriod, StartRange
class BoxScoreMiscV2(Endpoint):
endpoint = 'boxscoremiscv2'
expected_data = {'sqlPlayersMisc': ['GAME_ID', 'TEAM_ID', 'TEAM_ABBREVIATION', 'TEAM_CITY', 'PLAYER_ID', 'PLAYER_NAME', 'START_POSITION', 'COMMENT', 'MIN', 'PTS_OFF_TOV', 'PTS_2ND_CHANCE', 'PTS_FB', 'PTS_PAINT', 'OPP_PTS_OFF_TOV', 'OPP_PTS_2ND_CHANCE', 'OPP_PTS_FB', 'OPP_PTS_PAINT', 'BLK', 'BLKA', 'PF', 'PFD'], 'sqlTeamsMisc': ['GAME_ID', 'TEAM_ID', 'TEAM_NAME', 'TEAM_ABBREVIATION', 'TEAM_CITY', 'MIN', 'PTS_OFF_TOV', 'PTS_2ND_CHANCE', 'PTS_FB', 'PTS_PAINT', 'OPP_PTS_OFF_TOV', 'OPP_PTS_2ND_CHANCE', 'OPP_PTS_FB', 'OPP_PTS_PAINT', 'BLK', 'BLKA', 'PF', 'PFD']}
def __init__(self,
game_id,
end_period=EndPeriod.default,
end_range=EndRange.default,
range_type=RangeType.default,
start_period=StartPeriod.default,
start_range=StartRange.default):
self.nba_response = NBAStatsHTTP().send_api_request(
endpoint=self.endpoint,
parameters={
'GameID': game_id,
'EndPeriod': end_period,
'EndRange': end_range,
'RangeType': range_type,
'StartPeriod': start_period,
'StartRange': start_range
},
)
data_sets = self.nba_response.get_data_sets()
self.data_sets = [Endpoint.DataSet(data=data_set) for data_set_name, data_set in data_sets.items()]
self.sql_players_misc = Endpoint.DataSet(data=data_sets['sqlPlayersMisc'])
self.sql_teams_misc = Endpoint.DataSet(data=data_sets['sqlTeamsMisc'])
| [
"swarchon@gmail.com"
] | swarchon@gmail.com |
f9d72241ffe73408acb4f8526964a5d6d7df5baf | b2c420540d6b053fde4404f79fcaecc2f20d4f17 | /Flight/flightRegistrationApp/models.py | 4e72378253d8be1107582edd330b70734f5987f6 | [] | no_license | haruboss/flight-application | 44a2de81e83b69413f9d0402f5fe0fca537d0c1e | 75b7091f201cb025e380b51ac6b602b3bedb0f60 | refs/heads/main | 2023-07-10T09:40:36.554262 | 2021-08-31T11:24:37 | 2021-08-31T11:24:37 | 401,676,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 584 | py | from django.db import models
TRIP_CHOICE =(
("One Way", "One Way"),
("Round Way", "Round Way"),
)
class Flight(models.Model):
name = models.CharField(max_length=50)
source_city = models.CharField(max_length=50)
destination_city = models.CharField(max_length=50)
trip_category = models.CharField(max_length=50, choices=TRIP_CHOICE, default=None)
departure_date = models.DateField(auto_now=False)
departure_time = models.TimeField(auto_now=False)
flight_charges = models.PositiveIntegerField()
def __str__ (self):
return self.name
| [
"harshit.saxena2016@gmail.com"
] | harshit.saxena2016@gmail.com |
9e576c530de7906567dbe5b9d96b25f93accd231 | cf8be80fe9d7acfae03d86430d1c8ff8d22a8655 | /ribosome/components/internal/mapping.py | 3f86df4834cd10bddda137ef16920a9828206e20 | [
"MIT"
] | permissive | tek/ribosome-py | 4da2faf3f7c2d646c5a90bf73e81ec12bd360d38 | 8bd22e549ddff1ee893d6e3a0bfba123a09e96c6 | refs/heads/master | 2022-12-21T22:46:49.075358 | 2020-08-31T16:22:51 | 2020-08-31T16:22:51 | 66,086,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,570 | py | from typing import Callable
from amino import do, curried, Do, __, _, Either
from amino.lenses.lens import lens
from amino.logging import module_log
from ribosome.nvim.io.state import NS
from ribosome.data.plugin_state import PluginState
from ribosome.nvim.io.compute import NvimIO
from ribosome.compute.program import Program
from ribosome.config.component import Components
from ribosome.nvim.api.command import nvim_command
from ribosome.data.mapping import Mapping, MapMode
log = module_log()
def mapping_handler(mapping: Mapping) -> Callable[[Components], Either[str, Program]]:
def mapping_handler(components: Components) -> Either[str, Program]:
return components.all.find_map(__.mappings.lift(mapping)).to_either(f'no handler for {mapping}')
return mapping_handler
def mapping_cmd(plugin: str, mapping: Mapping, mode: MapMode) -> NvimIO[None]:
buf = '<buffer>' if mapping.buffer else ''
keys = mapping.keys.replace('<', '<lt>')
rhs = f''':call {plugin}Map('{mapping.ident}', '{keys}')<cr>'''
return nvim_command(
f'{mode.mnemonic}map',
buf,
'<silent>',
mapping.keys,
rhs,
)
@do(NS[PluginState, None])
def activate_mapping(mapping: Mapping) -> Do:
handler = yield NS.inspect_either(mapping_handler(mapping)).zoom(lens.components)
yield NS.modify(__.append.active_mappings((mapping.ident, handler)))
plugin = yield NS.inspect(_.camelcase_name)
yield NS.lift(mapping.modes.traverse(curried(mapping_cmd)(plugin, mapping), NvimIO))
__all__ = ('activate_mapping',)
| [
"torstenschmits@gmail.com"
] | torstenschmits@gmail.com |
d9ef3d332490ccca0d0304377ebc18ec573ce446 | d69fc7055bb66d379b181f3f59751afe34ae2322 | /freezes.py | 74325098c804f4fe0ffdeea7431bb53a9f3ed6b2 | [] | no_license | marofmar/LearnPyTorch | 816c888b5bf8c3a1248057ee868106adfadd239e | 8dfd2ceb1d53b6c3e38135a288b64aeb3692fd11 | refs/heads/master | 2023-03-15T20:06:33.824233 | 2021-03-11T07:09:46 | 2021-03-11T07:09:46 | 345,507,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | import torch, torchvision
from torch import nn, optim
model = torchvision.models.resnet18(pretrained=True)
# freeze all the params in the network
for param in model.parameters():
param.requires_grad = False
model.fc = nn.Linear(512, 10) # replace the last classifying layer to the linear of 10 classes
optimizer = optim.SGD(model.fc.parameters(), lr = 1e-2, momentum=0.9) # except the model.fc, all layers were frozen
| [
"yjchung@promedius.ai"
] | yjchung@promedius.ai |
cc585b85d4dff7afbef3367121220d3d47856371 | 49d261e9e23f1f15e36f8c40c1d15b281c117efe | /clase 1/_4_tipo_tuple.py | dd01e6c9bc3b918e45c8b05d8d94531ce5e87ef4 | [] | no_license | LiamAlexis/programacion-en-python | 3b2ddb9f54766f74a0692de45bc998114a58d074 | 6a3b883864e7b021c1c79631cd644fc8e9d7f93a | refs/heads/main | 2023-08-12T03:13:04.736775 | 2021-10-15T00:38:39 | 2021-10-15T00:38:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,197 | py | """
Tuplas en Python
"""
tupla_1 = 1, 2, 3, 4, 5, 2, 10, 2
tupla_2 = 'hola', 34, True, 2.5
tupla_3 = 1, 2, [9, 6, 3], 'Hola', 'personaje'
tupla_4 = tuple([2, True, 'palabra'])
# Acceso a los elementos de una tupla
# print(tupla_2[0])
# print(tupla_2[-1])
# print(tupla_3)
# tupla_3[2].append('c')
# print(tupla_3)
# print(type(tupla_3))
# Pedirle su longitud
# print(len(tupla_3))
# Ver la cantidad de ocurrencias de un elemento
# print(tupla_1.count(2))
# Corroborar la exitencia de un elemento
# print(340 in tupla_2)
# tuple unpacking desempaquetando una tupla
# tupla_5 = 4, "una palabra", False
# elem_1, elem_2, elem_3 = tupla_5
# print(elem_2)
# Estructura ciclica o iterativa
# Recorrer los elementos e imprimirlos
print(tupla_1)
for elemento in tupla_1:
if elemento == 4:
break
print(elemento)
# Estrutura selectiva
# Imprimir un mensaje en base a una condición
# if 2.5 in tupla_2:
# print('El elemento 2.5 esta contenido en la tupla 2.')
# Tambien podemos ejecutar un bloque de codigo si la
# condicion da False
# if 101 in tupla_2:
# print('El elemento 101 existe dentro de la tupla 2')
# else:
# print('La condición no se cumplio.')
| [
"nikolas090189@gmail.com"
] | nikolas090189@gmail.com |
45bf5ef9b36c9ae30464888f0e2bd5c5942bd3b6 | b5a9d42f7ea5e26cd82b3be2b26c324d5da79ba1 | /tensorflow/contrib/training/python/training/sequence_queueing_state_saver_test.py | 78d7c44ef28e450af4715332642d6a448fb9e068 | [
"Apache-2.0"
] | permissive | uve/tensorflow | e48cb29f39ed24ee27e81afd1687960682e1fbef | e08079463bf43e5963acc41da1f57e95603f8080 | refs/heads/master | 2020-11-29T11:30:40.391232 | 2020-01-11T13:43:10 | 2020-01-11T13:43:10 | 230,088,347 | 0 | 0 | Apache-2.0 | 2019-12-25T10:49:15 | 2019-12-25T10:49:14 | null | UTF-8 | Python | false | false | 28,300 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.SequenceQueueingStateSaver."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.contrib.training.python.training import sequence_queueing_state_saver as sqss
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class SequenceQueueingStateSaverTest(test.TestCase):
def testSequenceInputWrapper(self):
with self.cached_session():
length = 3
key = "key"
padded_length = 4
sequences = {
"seq1": np.random.rand(padded_length, 5),
"seq2": np.random.rand(padded_length, 4, 2)
}
context = {"context1": [3, 4]}
input_wrapper = sqss._SequenceInputWrapper(length, key, sequences,
context)
self.assertTrue(isinstance(input_wrapper.length, ops.Tensor))
self.assertTrue(isinstance(input_wrapper.key, ops.Tensor))
self.assertTrue(isinstance(input_wrapper.sequences["seq1"], ops.Tensor))
self.assertTrue(isinstance(input_wrapper.sequences["seq2"], ops.Tensor))
self.assertTrue(isinstance(input_wrapper.context["context1"], ops.Tensor))
def testStateSaverWithTwoSimpleSteps(self):
with self.cached_session() as sess:
batch_size_value = 2
batch_size = constant_op.constant(batch_size_value)
num_unroll = 2
length = 3
key = string_ops.string_join([
"key_", string_ops.as_string(
math_ops.cast(10000 * random_ops.random_uniform(()),
dtypes.int32))
])
padded_length = 4
sequences = {
"seq1": np.random.rand(padded_length, 5),
"seq2": np.random.rand(padded_length, 4, 2)
}
context = {"context1": [3, 4]}
initial_states = {
"state1": np.random.rand(6, 7),
"state2": np.random.rand(8)
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states,
capacity=100)
initial_key_value_0, _ = sess.run((key, state_saver.prefetch_op))
initial_key_value_1, _ = sess.run((key, state_saver.prefetch_op))
initial_key_value_0 = initial_key_value_0.decode("ascii")
initial_key_value_1 = initial_key_value_1.decode("ascii")
# Step 1
next_batch = state_saver.next_batch
(key_value, next_key_value, seq1_value, seq2_value, context1_value,
state1_value, state2_value, length_value, _, _) = sess.run(
(next_batch.key, next_batch.next_key, next_batch.sequences["seq1"],
next_batch.sequences["seq2"], next_batch.context["context1"],
next_batch.state("state1"), next_batch.state("state2"),
next_batch.length,
next_batch.save_state("state1", next_batch.state("state1") + 1),
next_batch.save_state("state2", next_batch.state("state2") - 1)))
expected_first_keys = set(
("00000_of_00002:%s" % x).encode("ascii")
for x in (initial_key_value_0, initial_key_value_1))
expected_second_keys = set(
("00001_of_00002:%s" % x).encode("ascii")
for x in (initial_key_value_0, initial_key_value_1))
expected_final_keys = set(
("STOP:%s" % x).encode("ascii")
for x in (initial_key_value_0, initial_key_value_1))
self.assertEqual(set(key_value), expected_first_keys)
self.assertEqual(set(next_key_value), expected_second_keys)
self.assertAllEqual(context1_value,
np.tile(context["context1"], (batch_size_value, 1)))
self.assertAllEqual(seq1_value,
np.tile(sequences["seq1"][np.newaxis, 0:2, :],
(batch_size_value, 1, 1)))
self.assertAllEqual(seq2_value,
np.tile(sequences["seq2"][np.newaxis, 0:2, :, :],
(batch_size_value, 1, 1, 1)))
self.assertAllEqual(state1_value,
np.tile(initial_states["state1"],
(batch_size_value, 1, 1)))
self.assertAllEqual(state2_value,
np.tile(initial_states["state2"],
(batch_size_value, 1)))
self.assertAllEqual(length_value, [2, 2])
# Step 2
(key_value, next_key_value, seq1_value, seq2_value, context1_value,
state1_value, state2_value, length_value, _, _) = sess.run(
(next_batch.key, next_batch.next_key, next_batch.sequences["seq1"],
next_batch.sequences["seq2"], next_batch.context["context1"],
next_batch.state("state1"), next_batch.state("state2"),
next_batch.length,
next_batch.save_state("state1", next_batch.state("state1") + 1),
next_batch.save_state("state2", next_batch.state("state2") - 1)))
self.assertEqual(set(key_value), expected_second_keys)
self.assertEqual(set(next_key_value), expected_final_keys)
self.assertAllEqual(context1_value,
np.tile(context["context1"], (batch_size_value, 1)))
self.assertAllEqual(seq1_value,
np.tile(sequences["seq1"][np.newaxis, 2:4, :],
(batch_size_value, 1, 1)))
self.assertAllEqual(seq2_value,
np.tile(sequences["seq2"][np.newaxis, 2:4, :, :],
(batch_size_value, 1, 1, 1)))
self.assertAllEqual(state1_value, 1 + np.tile(initial_states["state1"],
(batch_size_value, 1, 1)))
self.assertAllEqual(state2_value, -1 + np.tile(initial_states["state2"],
(batch_size_value, 1)))
self.assertAllEqual(length_value, [1, 1])
# Finished. Let's make sure there's nothing left in the barrier.
self.assertEqual(0, state_saver.barrier.ready_size().eval())
def testStateSaverFailsIfPaddedLengthIsNotMultipleOfNumUnroll(self):
with self.cached_session() as sess:
batch_size = constant_op.constant(32)
num_unroll = 17
bad_padded_length = 3
length = array_ops.placeholder(dtypes.int32)
key = array_ops.placeholder(dtypes.string)
sequences = {
"seq1": array_ops.placeholder(
dtypes.float32, shape=(None, 5))
}
context = {}
initial_states = {
"state1": array_ops.placeholder(
dtypes.float32, shape=())
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states)
with self.assertRaisesOpError(
"should be a multiple of: 17, but saw value: %d" % bad_padded_length):
sess.run([state_saver.prefetch_op],
feed_dict={
length: 1,
key: "key",
sequences["seq1"]: np.random.rand(bad_padded_length, 5),
initial_states["state1"]: 1.0
})
def _testStateSaverFailsIfCapacityTooSmall(self, batch_size):
with self.cached_session() as sess:
num_unroll = 2
length = array_ops.placeholder(dtypes.int32)
key = array_ops.placeholder(dtypes.string)
sequences = {
"seq1": array_ops.placeholder(
dtypes.float32, shape=(None, 5)),
"seq2": array_ops.placeholder(
dtypes.float32, shape=(None,))
}
context = {}
initial_states = {
"state1": array_ops.placeholder(
dtypes.float32, shape=())
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states,
capacity=10)
sess.run([state_saver.prefetch_op],
feed_dict={
length: 1,
key: "key",
sequences["seq1"]: np.random.rand(num_unroll, 5),
sequences["seq2"]: np.random.rand(num_unroll),
initial_states["state1"]: 1.0
})
def testStateSaverFailsIfCapacityTooSmallTensor(self):
batch_size_value = 32
batch_size = constant_op.constant(batch_size_value)
with self.assertRaisesOpError(
".*capacity needs to be >= batch_size.*"):
self._testStateSaverFailsIfCapacityTooSmall(batch_size)
def testStateSaverFailsIfCapacityTooSmallInt(self):
batch_size = 32
with self.assertRaisesRegexp(
ValueError,
"capacity %d needs to be >= batch_size %d" % (10, batch_size)):
self._testStateSaverFailsIfCapacityTooSmall(batch_size)
def testStateSaverFailsIfInconsistentPaddedLength(self):
with self.cached_session() as sess:
batch_size = constant_op.constant(32)
num_unroll = 17
length = array_ops.placeholder(dtypes.int32)
key = array_ops.placeholder(dtypes.string)
sequences = {
"seq1": array_ops.placeholder(
dtypes.float32, shape=(None, 5)),
"seq2": array_ops.placeholder(
dtypes.float32, shape=(None,))
}
context = {}
initial_states = {
"state1": array_ops.placeholder(
dtypes.float32, shape=())
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states)
with self.assertRaisesOpError(
"Dimension 0 of tensor labeled sorted_sequences_seq2 "
"should be: %d, shape received: %d" % (num_unroll, 2 * num_unroll)):
sess.run([state_saver.prefetch_op],
feed_dict={
length: 1,
key: "key",
sequences["seq1"]: np.random.rand(num_unroll, 5),
sequences["seq2"]: np.random.rand(2 * num_unroll),
initial_states["state1"]: 1.0
})
def testStateSaverFailsIfInconsistentWriteState(self):
# TODO(b/26910386): Identify why this infrequently causes timeouts.
with self.cached_session() as sess:
batch_size = constant_op.constant(1)
num_unroll = 17
length = array_ops.placeholder(dtypes.int32)
key = array_ops.placeholder(dtypes.string)
sequences = {
"seq1": array_ops.placeholder(
dtypes.float32, shape=(None, 5))
}
context = {}
initial_states = {
"state1": array_ops.placeholder(
dtypes.float32, shape=())
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states)
next_batch = state_saver.next_batch
with self.assertRaisesRegexp(KeyError, "state was not declared: state2"):
save_op = next_batch.save_state("state2", None)
with self.assertRaisesRegexp(ValueError, "Rank check failed for.*state1"):
save_op = next_batch.save_state("state1", np.random.rand(1, 1))
with self.assertRaisesOpError(
r"convert_state1:0 should be: 1, shape received:\] \[1 1\]"):
state_input = array_ops.placeholder(dtypes.float32)
with ops.control_dependencies([state_saver.prefetch_op]):
save_op = next_batch.save_state("state1", state_input)
sess.run([save_op],
feed_dict={
length: 1,
key: "key",
sequences["seq1"]: np.random.rand(num_unroll, 5),
initial_states["state1"]: 1.0,
state_input: np.random.rand(1, 1)
})
def testStateSaverWithManyInputsReadWriteThread(self):
batch_size_value = 32
num_proc_threads = 100
with self.cached_session() as sess:
batch_size = constant_op.constant(batch_size_value)
num_unroll = 17
length = array_ops.placeholder(dtypes.int32)
key = array_ops.placeholder(dtypes.string)
sequences = {
"seq1": array_ops.placeholder(
dtypes.float32, shape=(None, 5)),
"seq2": array_ops.placeholder(
dtypes.float32, shape=(None, 4, 2)),
"seq3": array_ops.placeholder(
dtypes.float64, shape=(None,))
}
context = {
"context1": array_ops.placeholder(
dtypes.string, shape=(3, 4)),
"context2": array_ops.placeholder(
dtypes.int64, shape=())
}
initial_states = {
"state1": array_ops.placeholder(
dtypes.float32, shape=(6, 7)),
"state2": array_ops.placeholder(
dtypes.int32, shape=())
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states)
next_batch = state_saver.next_batch
cancel_op = state_saver.close(cancel_pending_enqueues=True)
update_1 = next_batch.save_state("state1", 1 + next_batch.state("state1"))
update_2 = next_batch.save_state("state2",
-1 + next_batch.state("state2"))
original_values = {}
def insert(which):
for i in range(20):
# Insert varying length inputs
pad_i = num_unroll * (1 + (i % 10))
length_i = int(np.random.rand() * pad_i)
key_value = "key_%02d_%04d" % (which, i)
stored_state = {
"length": length_i,
"seq1": np.random.rand(pad_i, 5),
"seq2": np.random.rand(pad_i, 4, 2),
"seq3": np.random.rand(pad_i),
"context1": np.random.rand(3, 4).astype(np.str),
"context2": np.asarray(
100 * np.random.rand(), dtype=np.int32),
"state1": np.random.rand(6, 7),
"state2": np.asarray(
100 * np.random.rand(), dtype=np.int32)
}
original_values[key_value] = stored_state
sess.run([state_saver.prefetch_op],
feed_dict={
length: stored_state["length"],
key: key_value,
sequences["seq1"]: stored_state["seq1"],
sequences["seq2"]: stored_state["seq2"],
sequences["seq3"]: stored_state["seq3"],
context["context1"]: stored_state["context1"],
context["context2"]: stored_state["context2"],
initial_states["state1"]: stored_state["state1"],
initial_states["state2"]: stored_state["state2"]
})
processed_count = [0]
def process_and_check_state():
next_batch = state_saver.next_batch
while True:
try:
(got_key, next_key, length, total_length, sequence, sequence_count,
context1, context2, seq1, seq2, seq3, state1, state2, _,
_) = (sess.run([
next_batch.key, next_batch.next_key, next_batch.length,
next_batch.total_length, next_batch.sequence,
next_batch.sequence_count, next_batch.context["context1"],
next_batch.context["context2"], next_batch.sequences["seq1"],
next_batch.sequences["seq2"], next_batch.sequences["seq3"],
next_batch.state("state1"), next_batch.state("state2"),
update_1, update_2
]))
except errors_impl.OutOfRangeError:
# SQSS has been closed
break
self.assertEqual(len(got_key), batch_size_value)
processed_count[0] += len(got_key)
for i in range(batch_size_value):
key_name = got_key[i].decode("ascii").split(":")[1]
# We really saved this unique key
self.assertTrue(key_name in original_values)
# The unique key matches next_key
self.assertEqual(key_name,
next_key[i].decode("ascii").split(":")[1])
# Pull out the random values we used to create this example
stored_state = original_values[key_name]
self.assertEqual(total_length[i], stored_state["length"])
self.assertEqual("%05d_of_%05d:%s" %
(sequence[i], sequence_count[i], key_name),
got_key[i].decode("ascii"))
expected_length = max(
0,
min(num_unroll,
stored_state["length"] - sequence[i] * num_unroll))
self.assertEqual(length[i], expected_length)
expected_state1 = stored_state["state1"] + sequence[i]
expected_state2 = stored_state["state2"] - sequence[i]
expected_sequence1 = stored_state["seq1"][sequence[i] * num_unroll:(
sequence[i] + 1) * num_unroll]
expected_sequence2 = stored_state["seq2"][sequence[i] * num_unroll:(
sequence[i] + 1) * num_unroll]
expected_sequence3 = stored_state["seq3"][sequence[i] * num_unroll:(
sequence[i] + 1) * num_unroll]
self.assertAllClose(state1[i], expected_state1)
self.assertAllEqual(state2[i], expected_state2)
# context1 is strings, which come back as bytes
self.assertAllEqual(context1[i].astype(np.str),
stored_state["context1"])
self.assertAllEqual(context2[i], stored_state["context2"])
self.assertAllClose(seq1[i], expected_sequence1)
self.assertAllClose(seq2[i], expected_sequence2)
self.assertAllClose(seq3[i], expected_sequence3)
# Total number of inserts will be a multiple of batch_size
insert_threads = [
self.checkedThread(
insert, args=(which,)) for which in range(batch_size_value)
]
process_threads = [
self.checkedThread(process_and_check_state)
for _ in range(num_proc_threads)
]
for t in insert_threads:
t.start()
for t in process_threads:
t.start()
for t in insert_threads:
t.join()
time.sleep(3) # Allow the threads to run and process for a while
cancel_op.run()
for t in process_threads:
t.join()
# Each thread processed at least 2 sequence segments
self.assertGreater(processed_count[0], 2 * 20 * batch_size_value)
def testStateSaverProcessesExamplesInOrder(self):
with self.cached_session() as sess:
batch_size_value = 32
batch_size = constant_op.constant(batch_size_value)
num_unroll = 17
length = array_ops.placeholder(dtypes.int32)
key = array_ops.placeholder(dtypes.string)
sequences = {
"seq1": array_ops.placeholder(
dtypes.float32, shape=(None, 5))
}
context = {"context1": array_ops.placeholder(dtypes.string, shape=(3, 4))}
initial_states = {
"state1": array_ops.placeholder(
dtypes.float32, shape=())
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states)
next_batch = state_saver.next_batch
update = next_batch.save_state("state1", 1 + next_batch.state("state1"))
get_ready_size = state_saver.barrier.ready_size()
get_incomplete_size = state_saver.barrier.incomplete_size()
global_insert_key = [0]
def insert(insert_key):
# Insert varying length inputs
sess.run([state_saver.prefetch_op],
feed_dict={
length: np.random.randint(2 * num_unroll),
key: "%05d" % insert_key[0],
sequences["seq1"]: np.random.rand(2 * num_unroll, 5),
context["context1"]: np.random.rand(3, 4).astype(np.str),
initial_states["state1"]: 0.0
})
insert_key[0] += 1
for _ in range(batch_size_value * 100):
insert(global_insert_key)
def process_and_validate(check_key):
true_step = int(check_key[0] / 2) # Each entry has two slices
check_key[0] += 1
got_keys, input_index, _ = sess.run(
[next_batch.key, next_batch.insertion_index, update])
decoded_keys = [int(x.decode("ascii").split(":")[-1]) for x in got_keys]
min_key = min(decoded_keys)
min_index = int(min(input_index)) # numpy scalar
max_key = max(decoded_keys)
max_index = int(max(input_index)) # numpy scalar
# The current min key should be above the previous min
self.assertEqual(min_key, true_step * batch_size_value)
self.assertEqual(max_key, (true_step + 1) * batch_size_value - 1)
self.assertEqual(2**63 + min_index, true_step * batch_size_value)
self.assertEqual(2**63 + max_index,
(true_step + 1) * batch_size_value - 1)
# There are now (batch_size * 100 * 2) / batch_size = 200 full steps
global_step_key = [0]
for _ in range(200):
process_and_validate(global_step_key)
# Processed everything in the queue
self.assertEqual(get_incomplete_size.eval(), 0)
self.assertEqual(get_ready_size.eval(), 0)
def testStateSaverCanHandleVariableBatchsize(self):
with self.cached_session() as sess:
batch_size = array_ops.placeholder(dtypes.int32)
num_unroll = 17
length = array_ops.placeholder(dtypes.int32)
key = array_ops.placeholder(dtypes.string)
sequences = {
"seq1": array_ops.placeholder(
dtypes.float32, shape=(None, 5))
}
context = {"context1": array_ops.placeholder(dtypes.string, shape=(3, 4))}
initial_states = {
"state1": array_ops.placeholder(
dtypes.float32, shape=())
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states)
next_batch = state_saver.next_batch
update = next_batch.save_state("state1", 1 + next_batch.state("state1"))
for insert_key in range(128):
# Insert varying length inputs
sess.run([state_saver.prefetch_op],
feed_dict={
length: np.random.randint(2 * num_unroll),
key: "%05d" % insert_key,
sequences["seq1"]: np.random.rand(2 * num_unroll, 5),
context["context1"]: np.random.rand(3, 4).astype(np.str),
initial_states["state1"]: 0.0
})
all_received_indices = []
# Pull out and validate batch sizes 0, 1, ..., 7
for batch_size_value in range(8):
got_keys, input_index, context1, seq1, state1, _ = sess.run(
[
next_batch.key, next_batch.insertion_index,
next_batch.context["context1"], next_batch.sequences["seq1"],
next_batch.state("state1"), update
],
feed_dict={batch_size: batch_size_value})
# Indices may have come in out of order within the batch
all_received_indices.append(input_index.tolist())
self.assertEqual(got_keys.size, batch_size_value)
self.assertEqual(input_index.size, batch_size_value)
self.assertEqual(context1.shape, (batch_size_value, 3, 4))
self.assertEqual(seq1.shape, (batch_size_value, num_unroll, 5))
self.assertEqual(state1.shape, (batch_size_value,))
# Each input was split into 2 iterations (sequences size == 2*num_unroll)
expected_indices = [[], [0], [0, 1], [1, 2, 3], [2, 3, 4, 5],
[4, 5, 6, 7, 8], [6, 7, 8, 9, 10, 11],
[9, 10, 11, 12, 13, 14, 15]]
self.assertEqual(len(all_received_indices), len(expected_indices))
for received, expected in zip(all_received_indices, expected_indices):
self.assertAllEqual([x + 2**63 for x in received], expected)
def testStateSaverScopeNames(self):
batch_size = constant_op.constant(2)
sqss_scope_name = "unique_scope_name_for_sqss"
num_unroll = 2
length = 3
key = string_ops.string_join([
"key_", string_ops.as_string(
math_ops.cast(10000 * random_ops.random_uniform(()), dtypes.int32))
])
padded_length = 4
sequences = {
"seq1": np.random.rand(padded_length, 5),
"seq2": np.random.rand(padded_length, 4, 2)
}
context = {"context1": [3, 4]}
initial_states = {
"state1": np.random.rand(6, 7),
"state2": np.random.rand(8)
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states,
name=sqss_scope_name)
prefetch_op = state_saver.prefetch_op
next_batch = state_saver.next_batch
self.assertTrue(
state_saver.barrier.barrier_ref.name.startswith("%s/" %
sqss_scope_name))
self.assertTrue(prefetch_op.name.startswith("%s/" % sqss_scope_name))
self.assertTrue(next_batch.key.name.startswith("%s/" % sqss_scope_name))
if __name__ == "__main__":
test.main()
| [
"v-grniki@microsoft.com"
] | v-grniki@microsoft.com |
b9679f711689aefe75d559577c6463c81f3bf6a7 | ccbfc7818c0b75929a1dfae41dc061d5e0b78519 | /aliyun-openapi-python-sdk-master/aliyun-python-sdk-chatbot/aliyunsdkchatbot/request/v20171011/CreateCategoryRequest.py | 81f144f15ac24e87bc0a3e60b668be68a5c5c461 | [
"Apache-2.0"
] | permissive | P79N6A/dysms_python | 44b634ffb2856b81d5f79f65889bfd5232a9b546 | f44877b35817e103eed469a637813efffa1be3e4 | refs/heads/master | 2020-04-28T15:25:00.368913 | 2019-03-13T07:52:34 | 2019-03-13T07:52:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,311 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class CreateCategoryRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Chatbot', '2017-10-11', 'CreateCategory','beebot')
def get_ParentCategoryId(self):
return self.get_query_params().get('ParentCategoryId')
def set_ParentCategoryId(self,ParentCategoryId):
self.add_query_param('ParentCategoryId',ParentCategoryId)
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name) | [
"1478458905@qq.com"
] | 1478458905@qq.com |
4bf9fa6772197b693a84d0fbece8ceef55c47b66 | 8cac2c1f717447643e8e0ba3648d27b163de5299 | /vesicles/helperFunctions.py | 871b93f11647a9aaa5523bd6291a811556adcdc7 | [] | no_license | gelisa/pdmmod | 33af8f1344692085992ff01d127a77086b6ab51e | ab81b6f7a8660c2d989e97a43fc7e663a646d18c | refs/heads/master | 2021-01-18T00:53:43.939311 | 2017-03-10T02:43:02 | 2017-03-10T02:43:02 | 24,741,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,247 | py | #!/usr/bin/python
"""
File with helper functions for analyzing and running vesicle simulations
"""
import sys
sys.path.append('../')
import libSimulate
def runInitSimulation(modelNum, path, endTime, timeStep=0.0001): #TESTED
"""
helper function, which runs simulation to produce different starters initPopulation files
Args:
modelNum: int. - number of the epdm model
path: str. - folder where simulation results will be located
endTime: float - time to end simulation
timeStep: flota - time step of the simulation
Returns:
None
"""
sDef = libSimulate.Simulation(
modelNum,
termCond=('simulateTime', endTime, timeStep),
rewrite=False,
specialPath=path,
numOfRuns=1,
traj=True,
log_level='WARNING')
sDef.runSeveralSeries(paramFile=None, populFile=None)
return None
def findMatureSeqs(trajFile, startWeight): #TEST
"""
Receives a trajectory file and start weight, at which vesicles start to grow
returns sequences at the time when start weight is reached
Args:
trajFile: str. - is located in self.path+'test' and named 'traj0'
startWeight: int. - usually self.matureWeight/2
Returns:
"""
def line2Data(rawList): #TESTED
"""
gets as input list of data points from a trajectory file, returns a dict. of populations of the sequences
Args:
rawList: list. [str 'seq pop']
Returns:
timeMature: float. -- time at which we stop and get sequneces
seqsAtTime: dict. {str. sequences: int. population}
"""
points = {}
for item in rawList[1:len(rawList) - 1]:
# get a couple specie -- its population
point = item.split(' ')
points[point[0]] = int(point[1])
return points
simRes = open(trajFile, 'r')
timeMature = -1
seqsAtTime = {}
weight = 0
for line in simRes:
if line[0] == "#":
continue
else:
raw = (line.rstrip('\n')).split(',')
seqsAtTime = line2Data(raw)
weight = countWeight(seqsAtTime)
# print(weight)
if weight >= startWeight:
timeMature = float(raw[0])
break
simRes.close()
if timeMature == -1 or seqsAtTime == {}:
raise ValueError(
"Simulation was too short to get to the specified weight of " + str(startWeight) +
". It's only " + str(weight)
)
return timeMature, seqsAtTime
def getSeq(seq):
"""
Arguments:
- seq -- str. sequence as depicted in trajectory file
Returns:
- hps -- str. actual HP sequence
"""
if 'f' in seq:
if '*' in seq:
hps = seq[2:]
else:
hps = seq[1:]
else:
hps = seq
return hps
def makeInitPopFile(seqsAtTime, initFilePath):
"""
from the dictionary of sequences and their populations makes a initial population file for the simulation run
Args:
seqsAtTime: dict. {str. seq: int. pop}
initFilePath: str.
Returns:
None
"""
initFile = open(initFilePath, 'w')
initFile.close()
initFile = open(initFilePath, 'a')
for (seq, pop) in seqsAtTime.items():
initFile.write(seq + ' ' + str(pop) + '\n')
initFile.close()
return None
def countWeight(seqsAtTime):
"""
Calculates the weight of the sequences in the current times
Args:
seqsAtTime: dict. {str. sequences: int. population}
Returns:
weight: int. total weight of the sequences as a number of monomers in them
"""
weight = 0
for (seq, pop) in seqsAtTime.items():
hps = getSeq(seq)
weight += len(hps) * pop
return weight
def readPopulations(popFile):
"""
Coverts initPopFile into dictionary
Args:
popFile: str. population file path
Returns:
sequences: dict. {str. seq: int. pop}
"""
sequences = {}
with open(popFile) as infile:
for line in infile:
pair = line.rstrip('\n').split(' ')
sequences[pair[0]] = int(pair[1])
return sequences
| [
"elizaveta.guseva@stonybrook.edu"
] | elizaveta.guseva@stonybrook.edu |
7e0aa58038722f83d48668b52a74d447e3367432 | d54a4454c1893b98d49381aa1182ca5c1031935a | /api.py | 3c72729a7a1a0f4c1de363b24e494f25248ffd10 | [
"Apache-2.0",
"MIT"
] | permissive | CodeLeom/pandelytics | 8866913e656fc0036afebe995bc60bf2558d4502 | 5950d4a95595dadf076ac9270be0dbcdcfa59a1a | refs/heads/master | 2022-06-23T16:52:02.355863 | 2020-05-11T08:26:37 | 2020-05-11T08:26:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | from pandelytics import search
from fastapi import FastAPI
app = FastAPI()
@app.get("/")
async def root():
news = search.search("jhu", "Data")
return news | [
"noreply@github.com"
] | noreply@github.com |
ec8e3c52a8028fbf811d28b44f8aa0bc0585ac61 | 01d6deac62519413f620e4f8291cecd7c2017cc5 | /Hackathon 9/main.py | 64236ac4f7ef1ff8faa59981e731030522047adc | [] | no_license | ecustodio123/Hackathon-9 | 1ff455adb85a991dd3b803e8edcd86c41fb94554 | e86fbec9e2bd1d3bb982aed30bc10a144a56f7bf | refs/heads/master | 2022-12-27T01:05:10.166469 | 2020-10-04T10:38:51 | 2020-10-04T10:38:51 | 301,102,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,424 | py | from helpers.menu import Menu
from controllers.rol_controller import Roles_controller
from controllers.almacen_controller import Almacen_controller
from controllers.almacen_total_controller import Almacen_total_controller
from controllers.cajero_controller import Cajero_controller
def iniciar_app():
try:
print('''
===============================
Minimarket - El Grupo 03
===============================
''')
print("Bienvenido, por favor indique su cargo\n")
menu_inicio = ["Administrador", "Cajero", "Almacen", "Salir"]
respuesta = Menu(menu_inicio).show()
if respuesta == 1:
rol = Roles_controller()
rol.inicio_admin()
if rol.admin:
menu_principal = ["Actualizar y/o modificar los registros", "Ver Productos", "Ver Reporte de ventas por día", "Ver reporte de ventas por mes", "Salir"]
respuesta = Menu(menu_principal).show()
if respuesta == 1:
rol = Roles_controller()
rol.menu()
if rol.salir:
iniciar_app()
elif respuesta == 2:
almacen_total = Almacen_total_controller()
almacen_total.listar_productos()
iniciar_app()
elif respuesta == 3:
cajero = Cajero_controller()
cajero.reporte_dia()
iniciar_app()
elif respuesta == 4:
cajero = Cajero_controller()
cajero.reporte_mes()
iniciar_app()
elif respuesta == 2:
rol = Roles_controller()
rol.inicio_cajero()
if rol.cajero:
cajero = Cajero_controller()
cajero.menu()
if cajero.salir:
iniciar_app()
elif respuesta == 3:
rol = Roles_controller()
rol.inicio_almacen()
if rol.almacen:
almacen = Almacen_controller()
almacen.menu()
if almacen.salir:
iniciar_app()
print("\nGracias por utilizar el sistema\n")
except KeyboardInterrupt:
print('\n Se interrumpio la aplicación')
except Exception as e:
print(f'{str(e)}')
iniciar_app() | [
"ecustodioflores@gmail.com"
] | ecustodioflores@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.