blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
55affa6ed141e902c3a98495c36a4b86168d8cfa | c054034f106dc2fae681cff4d1fcfca1ffbea27c | /ROS/rosserial/install/lib/python2.7/dist-packages/rosserial_msgs/msg/_myTest.py | aa79fd6af0a4008ef8ee3bc38a1080b9adb87c5c | [] | no_license | rvariverpirate/WiFly | 8117fec671801bae8204fd7590873e99e97f4e3a | eeb9815de91d86205984e2456aeaa5d430823caa | refs/heads/master | 2023-04-28T07:06:10.709854 | 2021-05-19T03:28:02 | 2021-05-19T03:28:02 | 328,256,759 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,736 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from rosserial_msgs/myTest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class myTest(genpy.Message):
_md5sum = "19b7d3627de9a4555d2aaa19dbf70a1d"
_type = "rosserial_msgs/myTest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """string my_name
string last_name
uint8 age
uint32 score
"""
__slots__ = ['my_name','last_name','age','score']
_slot_types = ['string','string','uint8','uint32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
my_name,last_name,age,score
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(myTest, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.my_name is None:
self.my_name = ''
if self.last_name is None:
self.last_name = ''
if self.age is None:
self.age = 0
if self.score is None:
self.score = 0
else:
self.my_name = ''
self.last_name = ''
self.age = 0
self.score = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.my_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.last_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_BI().pack(_x.age, _x.score))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.my_name = str[start:end].decode('utf-8')
else:
self.my_name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.last_name = str[start:end].decode('utf-8')
else:
self.last_name = str[start:end]
_x = self
start = end
end += 5
(_x.age, _x.score,) = _get_struct_BI().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.my_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.last_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_BI().pack(_x.age, _x.score))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.my_name = str[start:end].decode('utf-8')
else:
self.my_name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.last_name = str[start:end].decode('utf-8')
else:
self.last_name = str[start:end]
_x = self
start = end
end += 5
(_x.age, _x.score,) = _get_struct_BI().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_BI = None
def _get_struct_BI():
global _struct_BI
if _struct_BI is None:
_struct_BI = struct.Struct("<BI")
return _struct_BI
| [
"cannellajs2@vcu.edu"
] | cannellajs2@vcu.edu |
445617559274b877d9caaaded1e30307947b51ec | ac9e79b04eadb95497b99c30444d952e6068f18f | /dockermap/map/policy/__init__.py | 45f1833b73ce0c9c2dd07d072c74315426a84c27 | [
"MIT"
] | permissive | vijayshan/docker-map | ff58f5c8aba15b8d157478a6614c6d6681de1e61 | a222c92947cbc22aef727c12f39fb93b0b192bc7 | refs/heads/master | 2021-01-17T03:16:31.693681 | 2015-09-14T08:20:55 | 2015-09-14T08:20:55 | 42,375,505 | 1 | 0 | null | 2015-09-12T22:31:07 | 2015-09-12T22:31:07 | null | UTF-8 | Python | false | false | 204 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .actions import ACTION_ATTACHED_FLAG, ACTION_DEPENDENCY_FLAG
from .simple import SimplePolicy
from .resume import ResumeUpdatePolicy
| [
"matthias@erll.de"
] | matthias@erll.de |
4d3f3def02a2cfd603f45cd0d0fc6c279e282c46 | 7dc47da21e52d9da6b0b26eeaee4b4953e058c4a | /capture_raw_images_Malaga_yaw90.py | 636b8ab3a67d07cce7479d739338c20e137bf684 | [] | no_license | saiabinesh/opencv-addons | 987718e068aa9c552691149bd39fece5ee1daa32 | 7d0e32ddd902346251ec03b664026ce75892825f | refs/heads/master | 2020-12-01T20:53:07.866719 | 2019-12-29T15:23:06 | 2019-12-29T15:23:06 | 230,766,178 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,694 | py | # In settings.json first activate computer vision mode:
# https://github.com/Microsoft/AirSim/blob/master/docs/image_apis.md#computer-vision-mode
# Working on the newer environement with new houses, cars and river (extended by Sean).
# changed image folder in line 39. Commenting out
import setup_path
import airsim
import os
import numpy as np
client = airsim.VehicleClient()
client.confirmConnection()
# below code didn't work beacause currently in computerVision mode
# client.moveToPositionAsync(273, -7167, 1036, 10, 0).join() # , DrivetrainType.ForwardOnly, YawMode(False,0), 20, 1)
#Change the code below to move the camera to desired position
i =1
# for z in np.linspace(-30,-100,10):
# for x in np.linspace(2.93,-37.37, 10):
# for y in np.linspace(-88.17, -192.27,10):
###############################Trying to change weather ###############################################
# client.simEnableWeather(True)
# airsim.wait_key('Press any key to enable rain at 25%')
# client.simSetWeatherParameter(airsim.WeatherParameter.Rain, 0.25);
# exit()
#Trying different cameras with just 8 images
# for camera_number in ["0","1","2","3","4"]:
# for z in np.linspace(-30,-100,3):
# for x in np.linspace(-150,-30, 3):
# for y in np.linspace(-60,-240,3):
# client.simSetVehiclePose(airsim.Pose(airsim.Vector3r(x,y,z), airsim.to_quaternion(0,0,0)), True)
# #print(client.simGetCameraInfo("0"))
# responses = client.simGetImages([
# # airsim.ImageRequest("0", airsim.ImageType.Segmentation, True), #depth in perspective projection
# # airsim.ImageRequest("0", airsim.ImageType.Segmentation, False, False)]) #scene vision image in uncompressed RGBA array
# airsim.ImageRequest(camera_number, airsim.ImageType.Scene, False, False)])
# # airsim.ImageRequest("3", airsim.ImageType.Segmentation, False, False)])
# #print('Retrieved images: %d', len(responses))
# #save segmentation images in various formats
# for idx, response in enumerate(responses):
# filename = 'D:/AirSim/New/Images/Images_master_v2/image_' +str(i)+'_raw'
# if response.pixels_as_float:
# #print("Type %d, size %d" % (response.image_type, len(response.image_data_float)))
# airsim.write_pfm(os.path.normpath(filename + '.pfm'), airsim.get_pfm_array(response))
# elif response.compress: #png format
# #print("Type %d, size %d" % (response.image_type, len(response.image_data_uint8)))
# airsim.write_file(os.path.normpath(filename + '.png'), response.image_data_uint8)
# else: #uncompressed array - numpy demo
# #print("Type %d, size %d" % (response.image_type, len(response.image_data_uint8)))
# img1d = np.fromstring(response.image_data_uint8, dtype=np.uint8) #get numpy array
# img_rgba = img1d.reshape(response.height, response.width, 4) #reshape array to 4 channel image array H X W X 4
# img_rgba = np.flipud(img_rgba) #original image is flipped vertically
# airsim.write_png(os.path.normpath(filename + '.png'), img_rgba) #write to png
# if i%10 == 0:
# print("Image count = ", i)
# i = i +1
# # exit()
import math
ninety_degrees_in_radians = (math.pi)/2
forty_five_degrees_in_radians = (math.pi)/4
client.simSetCameraOrientation(0, airsim.to_quaternion(0, 0, forty_five_degrees_in_radians)); #radians
#Changing coordinates for new environment images
for z in np.linspace(-10,-50,4):
for x in np.linspace(-150,0, 10):
for y in np.linspace(-60,-240,10):
client.simSetVehiclePose(airsim.Pose(airsim.Vector3r(x,y,z), airsim.to_quaternion(0, 0, forty_five_degrees_in_radians)), True)
#print(client.simGetCameraInfo("0"))
responses = client.simGetImages([
# airsim.ImageRequest("0", airsim.ImageType.Segmentation, True), #depth in perspective projection
# airsim.ImageRequest("0", airsim.ImageType.Segmentation, False, False)]) #scene vision image in uncompressed RGBA array
airsim.ImageRequest("3", airsim.ImageType.Scene, False, False)])
# airsim.ImageRequest("3", airsim.ImageType.Segmentation, False, False)])
#print('Retrieved images: %d', len(responses))
#save segmentation images in various formats
for idx, response in enumerate(responses):
filename = 'D:/AirSim/New/Images/Images_master_v4_yaw_45/image_' +str(i)+'_raw'
if response.pixels_as_float:
#print("Type %d, size %d" % (response.image_type, len(response.image_data_float)))
airsim.write_pfm(os.path.normpath(filename + '.pfm'), airsim.get_pfm_array(response))
elif response.compress: #png format
#print("Type %d, size %d" % (response.image_type, len(response.image_data_uint8)))
airsim.write_file(os.path.normpath(filename + '.png'), response.image_data_uint8)
else: #uncompressed array - numpy demo
#print("Type %d, size %d" % (response.image_type, len(response.image_data_uint8)))
img1d = np.fromstring(response.image_data_uint8, dtype=np.uint8) #get numpy array
img_rgba = img1d.reshape(response.height, response.width, 4) #reshape array to 4 channel image array H X W X 4
img_rgba = np.flipud(img_rgba) #original image is flipped vertically
airsim.write_png(os.path.normpath(filename + '.png'), img_rgba) #write to png
if i%10 == 0:
print("Image count = ", i)
i = i +1
# # #find unique colors
# # print(np.unique(img_rgba[:,:,0], return_counts=True)) #red
# # print(np.unique(img_rgba[:,:,1], return_counts=True)) #green
# # print(np.unique(img_rgba[:,:,2], return_counts=True)) #blue
# # print(np.unique(img_rgba[:,:,3], return_counts=True)) #blue
# print(x,y,z)
| [
"Sai Abinesh@rocsafe4.it.nuigalway.ie"
] | Sai Abinesh@rocsafe4.it.nuigalway.ie |
4d18b04e23392f0889611e78983edc29acd208c6 | e881b61bf15aa945902055ca5aeae4c261c660fb | /src/fiesta/core/schema.py | 935e9b648c955195389f7367d8bcfae35905babc | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | lerooze/django-fiesta | 10653da761bb6b903a809af5ca353ddd038fa3f9 | d521f50bcdd3d40e91f0474ec2fa7e256758e0a5 | refs/heads/master | 2021-06-28T12:30:58.843384 | 2019-10-30T23:09:01 | 2019-10-30T23:09:01 | 186,468,978 | 0 | 0 | NOASSERTION | 2020-11-18T22:14:08 | 2019-05-13T17:48:52 | Python | UTF-8 | Python | false | false | 1,308 | py | import os.path
from lxml import etree
from rest_framework.exceptions import ParseError
from ..settings import api_settings
from .exceptions import NotImplementedError
class Schema:
def __init__(self, root):
self.root = root
@property
def schema(self):
return self.get_schema(self.root)
def get_schema(self, root):
"""Returns xml schema to validata message
Must redefine in subclasses"""
def gen_structure_specific_schema(structures):
pass
def get_main_schema(self, version, schema_file):
"""Returns the schema to validate files"""
path = os.path.join(
api_settings.DEFAULT_SCHEMA_PATH, 'sdmx', 'ml', version, schema_file)
try:
tree = etree.parse(path)
except (etree.ParseError, ValueError) as exc:
raise ParseError('XML schema parse error - %s' % exc)
return etree.XMLSchema(tree)
class Schema21(Schema):
def get_schema(self, root):
tag = root.tag
if etree.QName(tag).localname.endswith('StructureSpecific'):
raise NotImplementedError(
detail='Schema generation for a {tag.localname} not yet implemented')
else:
schema = self.get_main_schema('2_1', 'SDMXMessage.xsd')
return schema
| [
"al459@columbia.edu"
] | al459@columbia.edu |
23a539d75ebc5301644d07cab9022a2f157fbbf4 | 4640d218953e7d342c6eb49288e545e247c6183b | /app.py | cf441e203a6d8f3280e91fa9e3c8c0ca44ee19a8 | [] | no_license | jeffroche/jamhop | 1bba1f9b597ed67d477cb2750ee9c6cb4d5ab172 | 2bdb956a462f7500fe5bc1511fd2a4e1a20fecb7 | refs/heads/master | 2021-01-20T12:21:16.241050 | 2015-09-01T01:55:17 | 2015-09-01T01:55:17 | 41,710,848 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,432 | py | import datetime as dt
from flask import Flask, render_template, abort, request, redirect, url_for
import lastfm
import logging
app = Flask(__name__)
# Set up logging
handler = logging.StreamHandler()
log_fmt = logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')
handler.setFormatter(log_fmt)
app.logger.addHandler(handler)
app.logger.setLevel(logging.INFO)
@app.route('/', methods=['GET', 'POST'])
def home():
if request.method == 'POST' and request.form['username']:
url = url_for('user_page', username=request.form['username'])
print url
return redirect(url)
return render_template('home.html')
@app.route('/<username>/')
def user_page(username):
error_message = None
try:
charts = lastfm_snapshot(username)
except lastfm.LastFMException as e:
charts = None
error_message = str(e)
app.logger.error("%s - %s" % (username, error_message))
if 'User not found' in error_message:
return abort(404)
app.logger.info("Loaded data for %s" % username)
return render_template('user.html', username=username, charts=charts,
error=error_message)
def lastfm_snapshot(username):
charts = lastfm.chart_list(username)
six_months_ago = dt.date.today() - dt.timedelta(days=6*30)
six_months = lastfm.top_albums(username, six_months_ago, charts=charts)
one_year_ago = dt.date.today() - dt.timedelta(days=365)
one_year = lastfm.top_albums(username, one_year_ago, charts=charts)
two_year_ago = dt.date.today() - dt.timedelta(days=2*365)
two_year = lastfm.top_albums(username, two_year_ago, charts=charts)
three_year_ago = dt.date.today() - dt.timedelta(days=3*365)
three_year = lastfm.top_albums(username, three_year_ago, charts=charts)
four_year_ago = dt.date.today() - dt.timedelta(days=4*365)
four_year = lastfm.top_albums(username, four_year_ago, charts=charts)
five_year_ago = dt.date.today() - dt.timedelta(days=5*365)
five_year = lastfm.top_albums(username, five_year_ago, charts=charts)
charts = {
'six_months': six_months,
'one_year': one_year,
'two_year': two_year,
'three_year': three_year,
'four_year': four_year,
'five_year': five_year,
}
return charts
if __name__ == '__main__':
app.run(debug=True)
app.logger.setLevel(logging.DEBUG)
| [
"jeff.roche@gmail.com"
] | jeff.roche@gmail.com |
a7a8503738839b5d8221d5dd9c72fd2f04c4d38c | 7e0f5ac29cea20ad258b9069ef46ee694b1aed61 | /pp1/01-02 zad/Types & Variables/26.py | 7179b386783374c30abc17058fa1f8e6f1384874 | [] | no_license | lukasz009/pp1 | 33f951bc4cfc7943b0f65a7da99d8f27e193d23d | e6a3e240cc1704fd86508bd9979b0805689cac8b | refs/heads/master | 2020-08-20T23:31:32.399572 | 2019-12-08T18:46:58 | 2019-12-08T18:46:58 | 216,077,678 | 1 | 0 | null | 2019-10-18T17:43:34 | 2019-10-18T17:43:34 | null | UTF-8 | Python | false | false | 828 | py | # BMI
def oblicz_bmi(waga,wzrost):
bmi = waga/(wzrost/100)**2
if bmi < 18.5:
return bmi,'niedowaga'
elif bmi >= 18.5 and bmi < 25:
return bmi,'waga prawidłowa'
elif bmi >=25 and bmi < 30:
return bmi,'nadwaga'
else:
return bmi,'otyłość'
class LessThanOneError(Exception):
""" Błąd gdy wartość <= 0 """
pass
try:
wzrost = float(input('Podaj wzrost w cm: '))
if wzrost <= 0:
raise LessThanOneError
waga = float(input('Podaj wagę w kg: '))
if waga <= 0:
raise LessThanOneError
print('Wskaźnik BMI: {0:.2f} ({1})'.format(oblicz_bmi(waga,wzrost)[0],oblicz_bmi(waga,wzrost)[1]))
except ValueError:
print('Podana wartość jest nieprawidłowa!')
except LessThanOneError:
print('Wartość nie może być mniejsza od 1!') | [
"noreply@github.com"
] | noreply@github.com |
8c5e18b01ac0690c7c7bb348b0e0d6c88a0697eb | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/222/users/4084/codes/1578_1331.py | 4c3e362ab144d591bf2ec7a790a959912f033491 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 58 | py | g = 3
m = 50
conta = m // g
i = m%g
print(conta)
print(i)
| [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
83d191a418f9d45dec785f304f061904fb28c794 | 94ea2ed3dbba96ac1edaa5460bda78ed17e0fafa | /Day_9/Day_9.py | 600296253840709b6c00597b53f20901f5bed1d9 | [] | no_license | bray4168/AoC-2020 | 576ef4a7fc1656410457f7015ffca243197dcf28 | 713648df3ee71918e04d33fba11b29f0def55bbe | refs/heads/master | 2023-02-01T09:38:24.553618 | 2020-12-15T05:30:17 | 2020-12-15T05:30:17 | 317,672,704 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,340 | py | file_array = []
# Read file into lines
with open('Day_9/input.txt', 'r') as file:
for line in file:
file_array.append(line.strip())
preamble_range = 25
invalid_number = 0
def form_sum_array(index):
sum_array = []
for each in range(index - preamble_range, index):
for number in range(index - preamble_range, index):
if each == number:
continue
else:
sum_array.append(int(file_array[each]) + int(file_array[number]))
return sum_array
def find_number_set(index):
sum = 0
max = 0
min = int(file_array[index])
while sum < invalid_number:
number = int(file_array[index])
sum += number
if number <= min:
min = number
elif number >= max:
max = number
index += 1
if sum == invalid_number:
return max + min
else:
return 0
# Puzzle 1
for index, number in enumerate(file_array):
if index < preamble_range:
continue
else:
sum_array = form_sum_array(index)
if int(number) in sum_array:
continue
else:
invalid_number = int(number)
break
# Puzzle 2
puzzle_2_value = 0
for index, number in enumerate(file_array):
sum = find_number_set(index)
if sum == 0:
continue
else:
puzzle_2_value = sum
break
print("Puzzle 1 solution: " + str(invalid_number))
print("Puzzle 2 solution: " + str(puzzle_2_value)) | [
"bray4168@yahoo.com"
] | bray4168@yahoo.com |
c43bf63dd431b624bc239ea512a4bc391cfc03e6 | 34ff5b80155b0d5af4e49fd0ad542c13c6e06e40 | /한유경/[21.01.19]1002.py | 2b26dc8649a8a85c9d93bd1767da73a87ed8e50f | [] | no_license | 4RG0S/2021-Winter-Jookgorithm | 6a8d0ef8d320ed2abbd94fe4d03f4fe7f4f69a11 | 65c39147d6b403a44f2fc67c09c2b38344659b39 | refs/heads/main | 2023-03-23T05:31:35.672637 | 2021-03-15T11:24:11 | 2021-03-15T11:24:11 | 326,657,590 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 715 | py | # 터렛
import sys
import math
input = sys.stdin.readline
N = int(input())
for n in range(N):
(x1, y1, r1, x2, y2, r2) = list(map(int, input().split(' ')))
r = math.sqrt((x1-x2)**2+(y1-y2)**2)
# case 0 : find infinity locations
if r==0 and r1==r2:
print(-1)
# case 1 : can't find location
elif r>r1+r2:
print(0)
elif (r1 if r1>r2 else r2)>r+(r2 if r1>r2 else r1):
print(0)
# case 2 : find one location
elif r==r1+r2:
print(1)
elif (r1 if r1>r2 else r2)==r+(r2 if r1>r2 else r1):
print(1)
# case 3 : find two location
elif r<r1+r2:
print(2)
elif (r1 if r1>r2 else r2)<r+(r2 if r1>r2 else r1):
print(2) | [
"ygo65312@naver.com"
] | ygo65312@naver.com |
0b4cebed18de8ee30fcdb48f5e451a52446ad676 | 5b93e6cab9f054dd3bcb33dfe9072b0f20a6e7b8 | /train.py | c035a7351c526ae98b5d2ea9e35bee49058e78b9 | [] | no_license | SKHHHshike/Mouth_img_seg | 38567458fbc5e7f0ed57a6aac4e173fd422adac2 | 9c94101a9af827c8f64afa8f3dbce195fbcefb90 | refs/heads/master | 2021-05-24T07:11:32.738875 | 2020-05-27T03:38:29 | 2020-05-27T03:38:29 | 253,447,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,509 | py | from model import *
from model2 import *
from data import *
from keras.callbacks import ModelCheckpoint
from model import unet
#os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from IntraoralArea_project.data import trainGenerator
from IntraoralArea_project.model import get_model
n_classes = 2
#设定输入图片的width,height。设定使用的模型类别,设定分类的个数。
model = get_model(width =256, height = 128, model_name = 'unet_mini', n_classes = n_classes)
param = model.summary()
# model = unet()
def train():
data_gen_args = dict(rotation_range=0.2,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.05,
zoom_range=0.05,
horizontal_flip=True,
fill_mode='nearest')
img_target_size = (model.input_height,model.input_width)
mask_target_size = (model.output_height,model.output_width)
myGene = trainGenerator(2,'data/intraoralArea/train_252_0.5','image','label',
data_gen_args, n_classes=n_classes,target_size = img_target_size,
mask_target_size = mask_target_size, save_to_dir = None)
model_checkpoint = ModelCheckpoint('intraoralArea.hdf5', monitor='loss',verbose=1, save_best_only=True)
model.fit_generator(myGene, steps_per_epoch=300, epochs=8, callbacks=[model_checkpoint])
if __name__=='__main__':
train() | [
"471791040@qq.com"
] | 471791040@qq.com |
f714c1d17e7f59e7bbd574152622c59b0c6c4889 | 2a84e915ba499e97c6d087305aa928c7602aee76 | /LineSketch/LineSketch.py | ffa4032bbac1d72dc701e09deefe78a2de4d4702 | [
"MIT"
] | permissive | linny006/Amazing-Python-Scripts | 05c5e9a5e694f92da2749be6139f1148404c6f98 | d3369b9bba2cf7820513465ee05189573f13bb9d | refs/heads/master | 2023-08-28T08:57:30.015410 | 2021-11-03T16:17:40 | 2021-11-03T16:17:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | import turtle as t
tim = t.Turtle()
scr = t.Screen()
def move_forward():
tim.forward(10)
def move_backward():
tim.backward(10)
def clockwise():
tim.setheading(tim.heading() - 10)
def anticlockwise():
tim.setheading(tim.heading() + 10)
def clear():
tim.clear()
tim.penup()
tim.home()
tim.pendown()
scr.listen()
scr.onkey(key="f", fun=move_forward)
scr.onkey(key="d", fun=move_backward)
scr.onkeypress(key="c", fun=clockwise)
scr.onkeypress(key="a", fun=anticlockwise)
scr.onkey(key="x", fun=clear)
scr.exitonclick()
| [
"anushka.pathak17@gmail.com"
] | anushka.pathak17@gmail.com |
71964f4f03a88c44231adb44e53196613a314941 | 9b6133ea731aaa6260f4babcc749ce4fa033b85c | /lesson03/hw_3_3.py | 5ce8aa423d46a89efe377ed3b3d5f36b699515c2 | [] | no_license | Alexkvintin/PyBase08 | 1585031c80d8430e0fbadf68bfb30ce715b7f72b | f29337065cd7e4e79911d46ab8e8bbd7e2bd2817 | refs/heads/master | 2020-07-29T20:40:43.076130 | 2019-10-22T18:23:48 | 2019-10-22T18:23:48 | 209,952,032 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,179 | py | def circle(x):
y = int(x ** 2)
s = 3.14 * y
print("площадь круга равна:", s)
def triangle(x, y, z):
import math
p = float((x + y + z) / 2)
s = (math.sqrt(p * (p - x) * (p - y) * (p - z)))
print("площадь треугольника равна:", s)
def rectangle(x, y):
s = int(x * y)
print("Площадь прямоугольника равна", s)
f = input("для начала работы програмы нажмите ENTER ")
while f is not None:
try:
a = int(input(""" Площадь какой фигуры вы хотите вычеслить ?
Круг (1)
Треугольник (2)
Квадрат (3)
Для выхода нажмите ENTER"""))
if a == 1:
b = float(input("Ввудите радиус круга:"))
circle(b)
k = int(input("для продолжения нажмите 1 для выхода 0: "))
if k == 1:
continue
elif k == 0:
break
elif a == 2:
b = float(input("Введите размер первой стороны треугольника:"))
c = float(input("Введите размер второй стороны треугольника:"))
d = float(input("Введите размер третей стороны треугольника:"))
triangle(b, c, d)
k = int(input("для продолжения нажмите 1 для выхода 0: "))
if k == 1:
continue
elif k == 0:
break
elif a == 3:
d = float(input("Введите длину прямоугольника:"))
c = float(input("Введите ширину прямоугольника:"))
rectangle(d, c)
k = int(input("для продолжения нажмите 1 для выхода 0: "))
if k == 1:
continue
elif k == 0:
break
except:
break
print("работа программы завершена")
| [
"noreply@github.com"
] | noreply@github.com |
e2a9c106808551355d57ac6954bab4a7a5b9a666 | 440332ab0e7fee9ce1a27a6796629a1fbb0385cf | /Go/tests/test_get_neighbours.py | 4f0b0fdbac27504c301e46d0d55f8b0c05f497ab | [] | no_license | SveinungOverland/ZeroGO | f7123419499599e6ff350006e5ad8e9c84809eea | e979026711407ecc480e7d20ee27077bf693c1ea | refs/heads/master | 2020-08-15T06:32:34.749694 | 2019-11-21T18:02:15 | 2019-11-21T18:02:15 | 215,293,642 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 729 | py | from ..go import get_neighbours
from .test_find_group import group_in_set
import numpy as np
def test_get_neighbours():
board = np.array([
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 1, 1, 1],
[0, 0, 1, 0],
])
group = get_neighbours(board, 2, 2)
print(group)
assert group_in_set(group, {
(2, 1, 1),
(2, 3, 1),
(1, 2, 0),
(3, 2, 1),
})
def test_get_neighbours():
board = np.array([
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 1, 1, 1],
[0, 0, 1, 0],
])
group = get_neighbours(board, 2, 2, point_type=1)
print(group)
assert group_in_set(group, {
(2, 1, 1),
(2, 3, 1),
(3, 2, 1),
})
| [
"andershallemiversen@hotmail.com"
] | andershallemiversen@hotmail.com |
c9cf7689a50286a5c1017bfd446fa36d67ab48be | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02663/s577432279.py | fb0a161b678de985a05e44a3e1290554f0f0f831 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | import sys
input=sys.stdin.readline
h1,m1,h2,m2,k=map(int,input().split())
h1=h1*60
h2=h2*60
m1+=h1
m2+=h2
print(m2-m1-k) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
5d9851eedf62c0ea69485d51ed48d96036158b5a | 717d369ce117dc8938a4dee13a62364e13fcd40b | /vodafone_sample_api/vodafone_api/api/resources/user.py | 734ec9f8ad43616e56f9d1bda776326827f51e69 | [] | no_license | tentengr/sample_app | 750b1e615d695c6a01733d1c6bdb3f6e385852fd | 994b9787a6b9782d3122942eaf364ae2cffcf877 | refs/heads/master | 2022-04-02T03:17:28.373935 | 2020-02-06T01:45:34 | 2020-02-06T01:45:34 | 237,822,053 | 0 | 0 | null | 2020-02-06T01:45:36 | 2020-02-02T19:11:37 | Python | UTF-8 | Python | false | false | 3,942 | py | from flask import request
from flask_restful import Resource
from flask_jwt_extended import jwt_required
from vodafone_api.models import User
from vodafone_api.extensions import ma, db
from vodafone_api.commons.pagination import paginate
class UserSchema(ma.ModelSchema):
id = ma.Int(dump_only=True)
password = ma.String(load_only=True, required=True)
class Meta:
model = User
sqla_session = db.session
class UserResource(Resource):
"""Single object resource
---
get:
tags:
- api
parameters:
- in: path
name: user_id
schema:
type: integer
responses:
200:
content:
application/json:
schema:
type: object
properties:
user: UserSchema
404:
description: user does not exists
put:
tags:
- api
parameters:
- in: path
name: user_id
schema:
type: integer
requestBody:
content:
application/json:
schema:
UserSchema
responses:
200:
content:
application/json:
schema:
type: object
properties:
msg:
type: string
example: user updated
user: UserSchema
404:
description: user does not exists
delete:
tags:
- api
parameters:
- in: path
name: user_id
schema:
type: integer
responses:
200:
content:
application/json:
schema:
type: object
properties:
msg:
type: string
example: user deleted
404:
description: user does not exists
"""
method_decorators = [jwt_required]
def get(self, user_id):
schema = UserSchema()
user = User.query.get_or_404(user_id)
return {"user": schema.dump(user)}
def put(self, user_id):
schema = UserSchema(partial=True)
user = User.query.get_or_404(user_id)
user = schema.load(request.json, instance=user)
db.session.commit()
return {"msg": "user updated", "user": schema.dump(user)}
def delete(self, user_id):
user = User.query.get_or_404(user_id)
db.session.delete(user)
db.session.commit()
return {"msg": "user deleted"}
class UserList(Resource):
"""Creation and get_all
---
get:
tags:
- api
responses:
200:
content:
application/json:
schema:
allOf:
- $ref: '#/components/schemas/PaginatedResult'
- type: object
properties:
results:
type: array
items:
$ref: '#/components/schemas/UserSchema'
post:
tags:
- api
requestBody:
content:
application/json:
schema:
UserSchema
responses:
201:
content:
application/json:
schema:
type: object
properties:
msg:
type: string
example: user created
user: UserSchema
"""
method_decorators = [jwt_required]
def get(self):
schema = UserSchema(many=True)
query = User.query
return paginate(query, schema)
def post(self):
schema = UserSchema()
user = schema.load(request.json)
db.session.add(user)
db.session.commit()
return {"msg": "user created", "user": schema.dump(user)}, 201
| [
"grigorios.markopoulos@underwriteme.co.uk"
] | grigorios.markopoulos@underwriteme.co.uk |
a9d382ed0329400bd7906b0287707b561ddd30cc | c38e00c81aad18fb31707f864c1aabe79f6eff4e | /figures/energyCostGraph.py | ad7f35a3a82afa3507a26440381c6e4a807c241a | [] | no_license | SEL-Columbia/lab_measurements | 3ef2a62ef45f44650a2de9e7d136b6a89c52c04a | af30ec0bf6b070e3e5ec72ee7a18da995287637a | refs/heads/master | 2021-01-01T17:47:58.825081 | 2012-01-26T23:51:14 | 2012-01-26T23:51:14 | 2,440,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,183 | py | from __future__ import division
import matplotlib.pyplot as plt
def addEntry(key, cost, energy):
dict[key] = {}
dict[key]['cost'] = cost
dict[key]['energy'] = energy
dict[key]['cpkWh'] = dict[key]['cost'] / dict[key]['energy']
dict = {}
addEntry('Cell Phone Charge', cost = 0.25, energy = 0.005)
#addEntry('Kerosene', cost = 1.20, energy = 45/3.6)
addEntry('D Cell', cost = 0.30, energy = 0.0022)
addEntry('Car Battery Charge', cost = 1.0, energy = 0.12)
#addEntry('SharedSolar', cost = 1.0, energy = 0.2)
addEntry('Grid Mali', cost = 16, energy = 16/0.2)
fig = plt.figure()
axes = fig.add_subplot(111)
for key in dict.keys():
axes.loglog(dict[key]['cost'], dict[key]['cpkWh'], 'ko')
axes.text(dict[key]['cost'], dict[key]['cpkWh'], key)
from matplotlib.patches import Rectangle
rect = Rectangle((1, 1), 4, 5, facecolor="#dddddd")
axes.add_artist(rect)
#rect.set_clip_box(axes.bbox)
axes.text(1.3,2,'Shared Solar')
plt.xlim((0.1,50))
plt.ylim((0.01,500))
plt.title('Unit Cost of Energy and Purchase Price')
plt.xlabel('Purchase Cost (USD)')
plt.ylabel('Cost Per kWh')
plt.grid()
plt.savefig('costVsEnergy.pdf')
plt.show()
plt.close() | [
"no140@columbia.edu"
] | no140@columbia.edu |
9c1baafdaefea09ba51c9303e42aa722840cf9de | b2f58607ab7dc003781496d1222b8538de01816a | /opennmt-baseline/utils/loss.py | ce7239450b7fb20352a5d94b4228c81f1683fe03 | [] | no_license | Amazing-J/structural-transformer | 3ab99a79a52a26ef73b887ce9fcd01ba3aec0c66 | daef1f28cce74ecf984603dbfe796c5f71b1b39c | refs/heads/master | 2020-07-09T12:59:49.788084 | 2019-12-25T05:46:41 | 2019-12-25T05:46:41 | 203,974,442 | 79 | 11 | null | null | null | null | UTF-8 | Python | false | false | 11,249 | py | """
This includes: LossComputeBase and the standard NMTLossCompute, and
sharded loss compute stuff.
"""
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
import onmt
import onmt.constants as Constants
from utils.misc import use_gpu
from utils.statistics import Statistics
def build_loss_compute(model, tgt_vocab, opt, train=True):
"""
Returns a LossCompute subclass which wraps around an nn.Module subclass
(such as nn.NLLLoss) which defines the loss criterion. The LossCompute
object allows this loss to be computed in shards and passes the relevant
data to a Statistics object which handles training/validation logging.
Currently, the NMTLossCompute class handles all loss computation except
for when using a copy mechanism. Despite their name, LossCompute objects
do not merely compute the loss but also perform the backward pass inside
their sharded_compute_loss method.
"""
device = torch.device("cuda" if use_gpu(opt) else "cpu")
padding_idx = tgt_vocab.stoi[Constants.PAD_WORD]
if opt.label_smoothing > 0 and train:
criterion = LabelSmoothingLoss(
opt.label_smoothing, len(tgt_vocab), ignore_index=padding_idx
)
else:
criterion = nn.NLLLoss(ignore_index=padding_idx, reduction='sum')
# if the loss function operates on vectors of raw logits instead of
# probabilities, only the first part of the generator needs to be
# passed to the NMTLossCompute. At the moment, the only supported
# loss function of this kind is the sparsemax loss.
loss_gen = model.generator
compute = NMTLossCompute(criterion, loss_gen)
compute.to(device)
return compute
class LossComputeBase(nn.Module):
"""
Class for managing efficient loss computation. Handles
sharding next step predictions and accumulating multiple
loss computations
Users can implement their own loss computation strategy by making
subclass of this one. Users need to implement the _compute_loss()
and make_shard_state() methods.
Args:
generator (:obj:`nn.Module`) :
module that maps the output of the decoder to a
distribution over the target vocabulary.
tgt_vocab (:obj:`Vocab`) :
torchtext vocab object representing the target output
normalzation (str): normalize by "sents" or "tokens"
"""
def __init__(self, criterion, generator):
super(LossComputeBase, self).__init__()
self.criterion = criterion
self.generator = generator
@property
def padding_idx(self):
return self.criterion.ignore_index
def _make_shard_state(self, batch, output, range_, attns=None):
"""
Make shard state dictionary for shards() to return iterable
shards for efficient loss computation. Subclass must define
this method to match its own _compute_loss() interface.
Args:
batch: the current batch.
output: the predict output from the model.
range_: the range of examples for computing, the whole
batch or a trunc of it?
attns: the attns dictionary returned from the model.
"""
return NotImplementedError
def _compute_loss(self, batch, output, target, **kwargs):
"""
Compute the loss. Subclass must define this method.
Args:
batch: the current batch.
output: the predict output from the model.
target: the validate target to compare output with.
**kwargs(optional): additional info for computing loss.
"""
return NotImplementedError
def monolithic_compute_loss(self, batch, output, attns):
"""
Compute the forward loss for the batch.
Args:
batch (batch): batch of labeled examples
output (:obj:`FloatTensor`):
output of decoder model `[tgt_len x batch x hidden]`
attns (dict of :obj:`FloatTensor`) :
dictionary of attention distributions
`[tgt_len x batch x src_len]`
Returns:
:obj:`onmt.utils.Statistics`: loss statistics
"""
range_ = (0, batch.tgt.size(0))
shard_state = self._make_shard_state(batch, output, range_, attns)
_, batch_stats = self._compute_loss(batch, **shard_state)
return batch_stats
def sharded_compute_loss(self, batch, output, attns,
cur_trunc, trunc_size, shard_size,
normalization):
"""Compute the forward loss and backpropagate. Computation is done
with shards and optionally truncation for memory efficiency.
Also supports truncated BPTT for long sequences by taking a
range in the decoder output sequence to back propagate in.
Range is from `(cur_trunc, cur_trunc + trunc_size)`.
Note sharding is an exact efficiency trick to relieve memory
required for the generation buffers. Truncation is an
approximate efficiency trick to relieve the memory required
in the RNN buffers.
Args:
batch (batch) : batch of labeled examples
output (:obj:`FloatTensor`) :
output of decoder model `[tgt_len x batch x hidden]`
attns (dict) : dictionary of attention distributions
`[tgt_len x batch x src_len]`
cur_trunc (int) : starting position of truncation window
trunc_size (int) : length of truncation window
shard_size (int) : maximum number of examples in a shard
normalization (int) : Loss is divided by this number
Returns:
:obj:`onmt.utils.Statistics`: validation loss statistics
"""
batch_stats = Statistics()
range_ = (cur_trunc, cur_trunc + trunc_size)
shard_state = self._make_shard_state(batch, output, range_, attns)
for shard in shards(shard_state, shard_size):
loss, stats = self._compute_loss(batch, **shard)
loss.div(float(normalization)).backward()
batch_stats.update(stats)
return batch_stats
def _stats(self, loss, scores, target):
"""
Args:
loss (:obj:`FloatTensor`): the loss computed by the loss criterion.
scores (:obj:`FloatTensor`): a score for each possible output
target (:obj:`FloatTensor`): true targets
Returns:
:obj:`onmt.utils.Statistics` : statistics for this batch.
"""
pred = scores.max(1)[1]
non_padding = target.ne(self.padding_idx)
num_correct = pred.eq(target).masked_select(non_padding).sum().item()
num_non_padding = non_padding.sum().item()
return Statistics(loss.item(), num_non_padding, num_correct)
def _bottle(self, _v):
return _v.view(-1, _v.size(2))
def _unbottle(self, _v, batch_size):
return _v.view(-1, batch_size, _v.size(1))
class LabelSmoothingLoss(nn.Module):
"""
With label smoothing,
KL-divergence between q_{smoothed ground truth prob.}(w)
and p_{prob. computed by model}(w) is minimized.
"""
def __init__(self, label_smoothing, tgt_vocab_size, ignore_index=-100):
assert 0.0 < label_smoothing <= 1.0
self.ignore_index = ignore_index
super(LabelSmoothingLoss, self).__init__()
smoothing_value = label_smoothing / (tgt_vocab_size - 2)
one_hot = torch.full((tgt_vocab_size,), smoothing_value)
one_hot[self.ignore_index] = 0
self.register_buffer('one_hot', one_hot.unsqueeze(0))
self.confidence = 1.0 - label_smoothing
def forward(self, output, target):
"""
output (FloatTensor): batch_size x n_classes
target (LongTensor): batch_size
"""
model_prob = self.one_hot.repeat(target.size(0), 1)
model_prob.scatter_(1, target.unsqueeze(1), self.confidence)
model_prob.masked_fill_((target == self.ignore_index).unsqueeze(1), 0)
return F.kl_div(output, model_prob, reduction='sum')
class NMTLossCompute(LossComputeBase):
"""
Standard NMT Loss Computation.
"""
def __init__(self, criterion, generator, normalization="sents"):
super(NMTLossCompute, self).__init__(criterion, generator)
def _make_shard_state(self, batch, output, range_, attns=None):
return {
"output": output,
"target": batch.tgt[range_[0] + 1: range_[1]],
}
def _compute_loss(self, batch, output, target):
bottled_output = self._bottle(output)
scores = self.generator(bottled_output)
gtruth = target.view(-1)
loss = self.criterion(scores, gtruth)
stats = self._stats(loss.clone(), scores, gtruth)
return loss, stats
def filter_shard_state(state, shard_size=None):
for k, v in state.items():
if shard_size is None:
yield k, v
if v is not None:
v_split = []
if isinstance(v, torch.Tensor):
for v_chunk in torch.split(v, shard_size):
v_chunk = v_chunk.data.clone()
v_chunk.requires_grad = v.requires_grad
v_split.append(v_chunk)
yield k, (v, v_split)
def shards(state, shard_size, eval_only=False):
"""
Args:
state: A dictionary which corresponds to the output of
*LossCompute._make_shard_state(). The values for
those keys are Tensor-like or None.
shard_size: The maximum size of the shards yielded by the model.
eval_only: If True, only yield the state, nothing else.
Otherwise, yield shards.
Yields:
Each yielded shard is a dict.
Side effect:
After the last shard, this function does back-propagation.
"""
if eval_only:
yield filter_shard_state(state)
else:
# non_none: the subdict of the state dictionary where the values
# are not None.
non_none = dict(filter_shard_state(state, shard_size))
# Now, the iteration:
# state is a dictionary of sequences of tensor-like but we
# want a sequence of dictionaries of tensors.
# First, unzip the dictionary into a sequence of keys and a
# sequence of tensor-like sequences.
keys, values = zip(*((k, [v_chunk for v_chunk in v_split])
for k, (_, v_split) in non_none.items()))
# Now, yield a dictionary for each shard. The keys are always
# the same. values is a sequence of length #keys where each
# element is a sequence of length #shards. We want to iterate
# over the shards, not over the keys: therefore, the values need
# to be re-zipped by shard and then each shard can be paired
# with the keys.
for shard_tensors in zip(*values):
yield dict(zip(keys, shard_tensors))
# Assumed backprop'd
variables = []
for k, (v, v_split) in non_none.items():
if isinstance(v, torch.Tensor) and state[k].requires_grad:
variables.extend(zip(torch.split(state[k], shard_size),
[v_chunk.grad for v_chunk in v_split]))
inputs, grads = zip(*variables)
torch.autograd.backward(inputs, grads)
| [
"zhujie951121@gmail.com"
] | zhujie951121@gmail.com |
570d5c279ef4c76aad6ed5844ecc6bea270092c3 | c7fb854c1e787adfb066d485a86cb384848c8a89 | /app/models/comment.py | e9ac0428df2289dcdb6cb4db5c99cc2ffd2342a1 | [] | no_license | longfe1yang/flask_blog | 51218bec01bc706a088c6bf6d1000389936670ea | 3b54e417878742e886d0c629230d72b4dbd24045 | refs/heads/master | 2021-06-08T09:15:52.040002 | 2016-09-04T09:39:22 | 2016-09-04T09:39:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 994 | py | from . import db
from . import ReprMixin
import time
class Comment(db.Model, ReprMixin):
__tablename__ = 'comments'
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.String())
reply_username = db.Column(db.Integer)
tweet_id = db.Column(db.Integer, db.ForeignKey('tweets.id'))
created_time = db.Column(db.Integer)
def __init__(self, form):
self.content = form.get('content', '')
self.created_time = int(time.time())
self.tweet_id = form.get('tweet_id', '')
def json(self):
self.id
d = {k: v for k, v in self.__dict__.items() if k not in self.blacklist()}
return d
def blacklist(self):
b = [
'_sa_instance_state',
]
return b
def save(self):
db.session.add(self)
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
"""
时间问题没有解决,依旧是只用了一个时间
"""
| [
"yanglongfei71937315@gmail.com"
] | yanglongfei71937315@gmail.com |
8a68a48afac4164eb404a1c903aea21610418776 | 4133ecadccf991de5e2cb2aade0d61dc55cb1b57 | /ProTwo/settings.py | b62f137cc9248c0323c4227159a01b214015972e | [] | no_license | EmonZaman/Web-Development | f357035772e41b8f65c9265d70236d3c6b24c173 | 8a51813469cdef39096e525d9b3b4933570c9bbe | refs/heads/main | 2023-06-26T10:04:26.644356 | 2021-07-29T09:08:13 | 2021-07-29T09:08:13 | 381,151,748 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,380 | py | """
Django settings for ProTwo project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
STATIC_DIR = os.path.join(BASE_DIR,"static")
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-wv()pj@*7vjit7=-i-wy&uu5)_zsa)9g^bzlas!y5@*jwu%g4a'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'AppTwo'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ProTwo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ProTwo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
STATIC_DIR,
]
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"aagontukemon@gmail.com"
] | aagontukemon@gmail.com |
1750d92d1dc355447d3f4c59c6a8905eb0f2bb15 | 23a1faa037ddaf34a7b5db8ae10ff8fa1bb79b94 | /TCS_Practice/TCS_CodeVita_Problems/Constellation.py | e77bcc038db671a3100235e6c5e1bd94fd310097 | [] | no_license | Pyk017/Competetive-Programming | e57d2fe1e26eeeca49777d79ad0cbac3ab22fe63 | aaa689f9e208bc80e05a24b31aa652048858de22 | refs/heads/master | 2023-04-27T09:37:16.432258 | 2023-04-22T08:01:18 | 2023-04-22T08:01:18 | 231,229,696 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,823 | py | '''
Three characters { #, *, . } represents a constellation of stars and galaxies in space. Each galaxy is demarcated by # characters. There can be one or many stars in a given galaxy. Stars can only be in shape of vowels { A, E, I, O, U } . A collection of * in the shape of the vowels is a star. A star is contained in a 3x3 block. Stars cannot be overlapping. The dot(.) character denotes empty space.
Given 3xN matrix comprising of { #, *, . } character, find the galaxy and stars within them.
Note: Please pay attention to how vowel A is denoted in a 3x3 block in the examples section below.
Constraints
3 <= N <= 10^5
Input
Input consists of single integer N denoting number of columns.
Output
Output contains vowels (stars) in order of their occurrence within the given galaxy. Galaxy itself is represented by # character.
Example 1
Input
18
* . * # * * * # * * * # * * * . * .
* . * # * . * # . * . # * * * * * *
* * * # * * * # * * * # * * * * . *
Output
U#O#I#EA
Explanation
As it can be seen that the stars make the image of the alphabets U, O, I, E and A respectively.
Example 2
Input
12
* . * # . * * * # . * .
* . * # . . * . # * * *
* * * # . * * * # * . *
Output
U#I#A
Explanation
As it can be seen that the stars make the image of the alphabet U, I and A.
Possible solution:
Input:
12
* . * # . * * * # . * .
* . * # . . * . # * * *
* * * # . * * * # * . *
'''
n = int(input())
galaxy = [list(map(int, input().split())) for _ in range(3)]
for i in range(n):
if galaxy[0][i] == '#' and galaxy[1][j] == '#' and galaxy[2][i] == '#':
print('#', end='')
elif galaxy[0][i] == '.' and galaxy[1][j] == '.' and galaxy[2][i] == '.':
pass
else:
x = i
a, b, c, a1, b1, c1, a2, b2, c2 = galaxy[0][x], galaxy[0][x+1], galaxy[0][x+2], galaxy[1][x], galaxy[1][x+1], galaxy[1][x+2], galaxy[2][x], galaxy[2][x+1], galaxy[2][x+2]
if a == '.' and b == '*' and c == '.' and a1=='*' and b1 == '*' and c1 == '*' and a2=='*' and b2 == '.' and c2 == '*':
print("A", end='')
i = i + 2
if a == '*' and b == '*' and c == '*' and a1 == '*' and b1 == '*' and c1 == '*' and a2 == '*' and b2 == '*' and c2 == '*':
print("E", end='')
i = i + 2
if a == '*' and b == '*' and c == '*' and a1 == '.' and b1 == '*' and c1 == '.' and a2 == '*' and b2 == '*' and c2 == '*':
print("I", end='')
i = i + 2
if a == '*' and b == '*' and c == '*' and a1 == '*' and b1 == '.' and c1 == '*' and a2 == '*' and b2 == '*' and c2 == '*':
print("O", end='')
i = i + 2
if a == '*' and b == '.' and c == '*' and a1 == '*' and b1 == '.' and c1 == '*' and a2 == '*' and b2 == '*' and c2 =='*':
print("U", end='')
i = i + 2
| [
"prakharkumar506978@gmail.com"
] | prakharkumar506978@gmail.com |
e0401fc292b6f962226021e0e3f636419bf5068e | 958b6de6be5fb8bce876373cec29677259c6ceb3 | /hypergan/train_hooks/experimental/weight_penalty_train_hook.py | 7a672346f3853a2b0d4a457e359240de7e35efd9 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | RandomStudio/HyperGAN | 712679b9121ad414d2f91205a82370d54a930120 | 586cefe69805f5ffa8dcb11aaf346f6b3dcf3ac9 | refs/heads/master | 2020-06-22T22:43:04.884557 | 2019-07-23T12:17:58 | 2019-07-23T12:17:58 | 198,420,256 | 0 | 0 | null | 2019-07-23T11:52:00 | 2019-07-23T11:51:59 | null | UTF-8 | Python | false | false | 2,923 | py | #From https://gist.github.com/EndingCredits/b5f35e84df10d46cfa716178d9c862a3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.framework import ops
from tensorflow.python.training import optimizer
import tensorflow as tf
import hyperchamber as hc
import numpy as np
import inspect
from operator import itemgetter
from hypergan.train_hooks.base_train_hook import BaseTrainHook
class WeightPenaltyTrainHook(BaseTrainHook):
def __init__(self, gan=None, config=None, trainer=None, name="WeightPenaltyTrainHook", memory_size=2, top_k=1):
super().__init__(config=config, gan=gan, trainer=trainer, name=name)
d_losses = []
weights = self.gan.weights()
if config.only_d:
weights = self.discriminator.weights()
if config.l2nn_penalty:
l2nn_penalties = []
if len(weights) > 0:
for w in weights:
w = tf.reshape(w, [-1, self.ops.shape(w)[-1]])
wt = tf.transpose(w)
wtw = tf.matmul(wt,w)
wwt = tf.matmul(w,wt)
def _l(m):
m = tf.abs(m)
m = tf.reduce_sum(m, axis=0,keep_dims=True)
m = tf.maximum(m-1, 0)
m = tf.reduce_max(m, axis=1,keep_dims=True)
return m
l2nn_penalties.append(tf.minimum(_l(wtw), _l(wwt)))
print('l2nn_penalty', self.config.l2nn_penalty, l2nn_penalties)
l2nn_penalty = self.config.l2nn_penalty * tf.add_n(l2nn_penalties)
self.add_metric('l2nn_penalty', self.gan.ops.squash(l2nn_penalty))
d_losses.append(l2nn_penalty)
if config.ortho_penalty:
penalties = []
for w in self.gan.weights():
print("PENALTY", w)
w = tf.reshape(w, [-1, self.ops.shape(w)[-1]])
wt = tf.transpose(w)
wtw = tf.matmul(wt,w)
wwt = tf.matmul(w,wt)
mwtw = tf.matmul(w, wtw)
mwwt = tf.matmul(wt, wwt)
def _l(w,m):
l = tf.reduce_mean(tf.abs(w - m))
l = self.ops.squash(l)
return l
penalties.append(tf.minimum(_l(w, mwtw), _l(wt, mwwt)))
penalty = self.config.ortho_penalty * tf.add_n(penalties)
self.add_metric('ortho_penalty', self.gan.ops.squash(penalty))
print("PENALTY", penalty)
penalty = tf.reshape(penalty, [1,1])
penalty = tf.tile(penalty, [self.gan.batch_size(), 1])
d_losses.append(penalty)
self.loss = self.ops.squash(d_losses)
def losses(self):
return [self.loss, self.loss]
def after_step(self, step, feed_dict):
pass
def before_step(self, step, feed_dict):
pass
| [
"mikkel@255bits.com"
] | mikkel@255bits.com |
a88decbf53c4ea080eaa46061a56e303ee56c2ab | 3002a71450c9f26c674f0f3949a7dfb39b95f968 | /mysite/myapi/models.py | 756e726823521920e527142baaeda13d485360b4 | [] | no_license | Gabriel0110/Django-Basic-REST_API | a4d4906aabb6ae095d53f4630746fc1b707f9ad2 | 2aeb1331ca0a0a0d15b2d8e9075f48d7277ffb79 | refs/heads/main | 2023-03-28T19:01:44.340793 | 2021-04-05T12:50:01 | 2021-04-05T12:50:01 | 354,688,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | from django.db import models
# Create your models here.
class Hero(models.Model):
name = models.CharField(max_length=60)
alias = models.CharField(max_length=60)
def __str__(self):
return self.name
class Spell(models.Model):
name = models.CharField(max_length=60)
description = models.TextField()
def __str__(self):
return self.name | [
"gabrieltomberlin14@gmail.com"
] | gabrieltomberlin14@gmail.com |
f85f6f39aa12d9bd44917d0f830d724ec3d6f956 | c42f7f7a8421103cc3ca8ee44673704f7eea22b1 | /src/utils/routes.py | 01fe170c5eaa9ad4e3f0cf77beb0b1f34279b976 | [
"MIT"
] | permissive | styleolder/fp-server | fe585fe73014eb0421b25d5579191d24276df250 | ae405e7c37a919bd73be567e3e098e7fe5524097 | refs/heads/master | 2020-03-21T12:05:36.250998 | 2018-06-24T13:07:51 | 2018-06-24T13:07:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,770 | py | # -*- coding:utf-8 -*-
"""
http uri 路由装饰器
"""
from utils import log as logger
class route(object):
"""
@route('/some/path')
class SomeRequestHandler(RequestHandler):
pass
@route('/some/path', name='other')
class SomeOtherRequestHandler(RequestHandler):
pass
my_routes = route.make_routes(['api'])
"""
_routes = []
def __init__(self, uri, name=None):
""" 装饰器
@param uri 注册的uri名字,支持uri正则表达式
@param name 注册的uri别名
"""
self.uri = uri
if not name:
name = '-'.join(uri.split('/'))
self.name = name
def __call__(self, _handler):
""" gets called when we class decorate
"""
for item in self._routes:
if item.get('uri') == self.uri:
logger.error('uri aleady exists! uri:', self.uri, 'name:', self.name, 'handler:', _handler, caller=self)
if item.get('name') == self.name:
logger.warn('name aleady exists! uri:', self.uri, 'name:', self.name, 'handler:', _handler, caller=self)
self._routes.append({'uri': self.uri, 'name': self.name, 'handler': _handler})
return _handler
@classmethod
def make_routes(cls, dirs):
""" 注册并返回所有的handler
@param dirs list,需要注册uri路由的处理方法路径
"""
for dir in dirs:
s = 'import %s' % dir
exec(s)
routes = []
for handler_dic in cls._routes:
logger.info('register uri:', handler_dic['uri'], 'handler:', handler_dic.get('handler'), caller=cls)
routes.append((handler_dic.get('uri'), handler_dic.get('handler')))
return routes
| [
"valesail7@gmail.com"
] | valesail7@gmail.com |
fb49c099273a55d0a4bc9d8f41449699f033d46f | 445c652693214fb358ca07b1283a3873c4a7fe69 | /potential_energy_build_DNN.py | 40c809cfae4a2dc3c24828c6fb5a1c854121a9f7 | [] | no_license | syang-lab/Potential-ML | c80d2890c80ac6a1f5dc5fae6253a9a29e6cd009 | 7cdbc1392ea020db4ad617c9838c3d924416723c | refs/heads/master | 2022-11-09T10:55:51.495216 | 2020-06-22T00:10:08 | 2020-06-22T00:10:08 | 273,999,519 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,832 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 12 14:51:46 2020
@author: shanyang
"""
# import library
import numpy as np
from sklearn.model_selection import train_test_split;
from tensorflow import keras
from keras.models import Sequential
from keras.layers import Dense
from keras import initializers
from keras import optimizers
import matplotlib.pyplot as plt
# fit the total energy with AIMD go get potential energy surface at finite temperatures
# preprocess data
class load_data:
def read_xdatcar(self,path):
""" read the XDARCAR file from AIMD (VASP) """
try:
with open(path,'r') as xdatcar:
self._system=xdatcar.readline()
self._scale_supercell=float(xdatcar.readline().rstrip('\n').rstrip());
self._a_supercell_vector=np.array([float(i)*self._scale_supercell for i in xdatcar.readline().rstrip('\n').split()])
self._b_supercell_vector=np.array([float(i)*self._scale_supercell for i in xdatcar.readline().rstrip('\n').split()])
self._c_supercell_vector=np.array([float(i)*self._scale_supercell for i in xdatcar.readline().rstrip('\n').split()])
self._latticevector_matrix_supercell=np.round(np.stack((self._a_supercell_vector,self._b_supercell_vector,self._c_supercell_vector)),6)
self._element_names = [name for name in xdatcar.readline().rstrip('\n').split()]
self._element_numbers = np.array([int(number) for number in xdatcar.readline().rstrip('\n').split()])
self._total_number = np.sum(self._element_numbers)
self._xdatcar=[]
self._count = 0
while True:
line=xdatcar.readline().rstrip('\n').split();
if not line:
break
if (self._isfloat(*[items for items in line])):
self._xdatcar.append(line)
self._count +=1
#self._xdatcar_fract = np.asarray(self._xdatcar,dtype = float)
self._steps = int(self._count/self._total_number)
except FileNotFoundError as e:
print('XDARCAR file does not exist:{}'.format(e))
raise e
""" reshape the data from XDATCAR to 3D matrix steps * atoms * xyz(direction)"""
self._xdatcar_fract = np.zeros((self._steps,self._total_number*3));
for t in range(self._steps):
self._xdatcar_fract[t,:] = np.asarray(self._xdatcar,dtype = float)[t*self._total_number:(t+1)*self._total_number,:].flatten();
return self._xdatcar_fract;
def _isfloat(self,*value):
for it in value:
try:
float(it)
except ValueError:
return False
return True;
def read_energy(self,path):
try:
self._energy = np.loadtxt(path);
except FileNotFoundError as e:
print('Energy file does not exist:{}'.format(e))
raise e
return self._energy;
def get_total_steps(self):
return self._steps
def get_total_atoms(self):
return self._total_number;
x=np.loadtxt('x')
y=np.loadtxt('y')
# train_test split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, shuffle = True ,random_state=42);
# Create the deep neural network model
model = Sequential();
#total_atoms = data_loader.get_total_atoms();
total_atoms = 96;
init = initializers.RandomNormal(mean=0, stddev=1, seed=None)
model.add(Dense(32, input_shape=(total_atoms*3,), activation='tanh',kernel_initializer= init));
model.add(Dense(32, activation='tanh',kernel_initializer= init));
model.add(Dense(32,activation='tanh',kernel_initializer= init));
model.add(Dense(1,activation=None));
optimizer_adam = optimizers.Adam(lr=0.005,decay = 0.01)
model.compile(loss='mean_absolute_error', optimizer=optimizer_adam)
model.summary()
# Train the model
# call back
early_stopping_cb = keras.callbacks.EarlyStopping(monitor='val_loss', patience = 30,restore_best_weights = True)
hist=model.fit(x_train,y_train,epochs =1000, batch_size =30, validation_split=0.2,callbacks=[early_stopping_cb])
mse_test = model.evaluate(x_test, y_test)
# plot loss function
plt.figure()
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
# Test on the test set
y_pred = model.predict(x_test)
plt.figure()
plt.plot(y_pred)
plt.plot(y_test)
plt.title('prediction and test')
plt.ylabel('Y')
plt.xlabel('configurations')
plt.legend(['y_pred', 'y_test'], loc='upper left')
plt.show()
# save the model
model.save("Potential_model.h5")
| [
"noreply@github.com"
] | noreply@github.com |
eda9a5843285cbebe07129c87ad4d25678886ef8 | e7b8c608fbb56417d532972fb24ec7d34f11edff | /hw3_release/utils.py | 0ab7856ca8345270f692bdefc93d8a753bb93354 | [
"MIT"
] | permissive | rudrakshkapil/CS131 | 26c03667a7e9aed1f3ecda60a6ff8937e5eafe31 | 2aa847b6ed181717d5fded343b4c0deb8241c895 | refs/heads/main | 2023-02-06T17:01:13.827853 | 2021-01-01T13:53:48 | 2021-01-01T13:53:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,838 | py | import numpy as np
from scipy.ndimage import affine_transform
# Functions to convert points to homogeneous coordinates and back
pad = lambda x: np.hstack([x, np.ones((x.shape[0], 1))])
unpad = lambda x: x[:,:-1]
def plot_matches(ax, image1, image2, keypoints1, keypoints2, matches,
keypoints_color='k', matches_color=None, only_matches=False):
"""Plot matched features.
Parameters
----------
ax : matplotlib.axes.Axes
Matches and image are drawn in this ax.
image1 : (N, M [, 3]) array
First grayscale or color image.
image2 : (N, M [, 3]) array
Second grayscale or color image.
keypoints1 : (K1, 2) array
First keypoint coordinates as ``(row, col)``.
keypoints2 : (K2, 2) array
Second keypoint coordinates as ``(row, col)``.
matches : (Q, 2) array
Indices of corresponding matches in first and second set of
descriptors, where ``matches[:, 0]`` denote the indices in the first
and ``matches[:, 1]`` the indices in the second set of descriptors.
keypoints_color : matplotlib color, optional
Color for keypoint locations.
matches_color : matplotlib color, optional
Color for lines which connect keypoint matches. By default the
color is chosen randomly.
only_matches : bool, optional
Whether to only plot matches and not plot the keypoint locations.
"""
image1.astype(np.float32)
image2.astype(np.float32)
new_shape1 = list(image1.shape)
new_shape2 = list(image2.shape)
if image1.shape[0] < image2.shape[0]:
new_shape1[0] = image2.shape[0]
elif image1.shape[0] > image2.shape[0]:
new_shape2[0] = image1.shape[0]
if image1.shape[1] < image2.shape[1]:
new_shape1[1] = image2.shape[1]
elif image1.shape[1] > image2.shape[1]:
new_shape2[1] = image1.shape[1]
if new_shape1 != image1.shape:
new_image1 = np.zeros(new_shape1, dtype=image1.dtype)
new_image1[:image1.shape[0], :image1.shape[1]] = image1
image1 = new_image1
if new_shape2 != image2.shape:
new_image2 = np.zeros(new_shape2, dtype=image2.dtype)
new_image2[:image2.shape[0], :image2.shape[1]] = image2
image2 = new_image2
image = np.concatenate([image1, image2], axis=1)
offset = image1.shape
if not only_matches:
ax.scatter(keypoints1[:, 1], keypoints1[:, 0],
facecolors='none', edgecolors=keypoints_color)
ax.scatter(keypoints2[:, 1] + offset[1], keypoints2[:, 0],
facecolors='none', edgecolors=keypoints_color)
ax.imshow(image, interpolation='nearest', cmap='gray')
ax.axis((0, 2 * offset[1], offset[0], 0))
for i in range(len(matches)):
idx1 = matches[i, 0]
idx2 = matches[i, 1]
if matches_color is None:
color = np.random.rand(3)
else:
color = matches_color
ax.plot((keypoints1[idx1, 1], keypoints2[idx2, 1] + offset[1]),
(keypoints1[idx1, 0], keypoints2[idx2, 0]),
'-', color=color)
def get_output_space(img_ref, imgs, transforms):
"""
Args:
img_ref: reference image
imgs: images to be transformed
transforms: list of affine transformation matrices. transforms[i] maps
points in imgs[i] to the points in img_ref
Returns:
output_shape
"""
assert (len(imgs) == len(transforms))
r, c = img_ref.shape
corners = np.array([[0, 0], [r, 0], [0, c], [r, c]])
all_corners = [corners]
for i in range(len(imgs)):
r, c = imgs[i].shape
H = transforms[i]
corners = np.array([[0, 0], [r, 0], [0, c], [r, c]])
warped_corners = corners.dot(H[:2,:2]) + H[2,:2]
all_corners.append(warped_corners)
# Find the extents of both the reference image and the warped
# target image
all_corners = np.vstack(all_corners)
# The overall output shape will be max - min
corner_min = np.min(all_corners, axis=0)
corner_max = np.max(all_corners, axis=0)
output_shape = (corner_max - corner_min)
# Ensure integer shape with np.ceil and dtype conversion
output_shape = np.ceil(output_shape).astype(int)
offset = corner_min
return output_shape, offset
def warp_image(img, H, output_shape, offset):
# Note about affine_transfomr function:
# Given an output image pixel index vector o,
# the pixel value is determined from the input image at position
# np.dot(matrix,o) + offset.
Hinv = np.linalg.inv(H)
m = Hinv.T[:2,:2]
b = Hinv.T[:2,2]
img_warped = affine_transform(img.astype(np.float32),
m, b+offset,
output_shape,
cval=-1)
return img_warped
| [
"noreply@github.com"
] | noreply@github.com |
413db64783d10d4a0dc8cc64cb9d5212ebd955ff | 07e9dad7af5468961af5e24a34aa5544cc777b42 | /src/supergrass-reporter.py | a556318e2f5addee73ce03d61aed11e1e37b30f2 | [] | no_license | jspc/supergrass | 1807691f715d7eb3bddd9d25d6725c1d12122388 | 1d4fa297655d1c91934a6acae978a762ebc31fe2 | refs/heads/master | 2021-01-20T18:07:31.154657 | 2016-08-01T16:42:43 | 2016-08-01T16:42:43 | 64,684,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,113 | py | from supergrass import app, health
import json
import time
import os
class SupergrassReporter(app.App):
def report(self):
healthchecks = health.HealthChecks()
healthchecks.system_code = 393
healthchecks.name = 'Flex'
healthchecks.description = 'FT video team workflow and publishing engine'
healthchecks.checks = []
for m in [self.failure_proportion(),
self.bytes_per_second_proportion('project-workflow'),
self.bytes_per_second_proportion('ingest-workflow'),
self.bytes_per_second_proportion('publish-workflow')]:
healthchecks.checks.append( m )
return healthchecks.dump()
def failure_proportion(self):
h = health.HealthCheck()
h.name = 'Workflow Failure Rates'
h.severity = 3
h.impact = 'Videos will not be archived and/or published automatically'
h.summary = 'Tests the proportion of failed workflows to successful workflows'
h.panic_guide = 'N/a'
totals = self.workflows_by_time(3600)
if totals['failed'] == 0:
h.ok = True
if totals['success'] == 0:
h.check_output = 'No workflows seem to have run.'
else:
h.check_output = 'All workflows succeeding.'
else:
if totals['success'] == 0:
h.check_output = 'Every workflow is failing.'
h.ok = False
else:
percentage = ( float(totals['failed']) / float(totals['success']) ) * 100
if percentage > 10:
h.check_output = 'More than 10% of workflows have failed.'
h.ok = False
else:
h.check_output = 'Less than 10% of workflows failed.'
h.ok = True
h.check_output += " Succesful Workflow Runs: {}, Failed Workflow Runs: {}".format(totals['success'], totals['failed'])
return h
def bytes_per_second_proportion(self, wf):
h = health.HealthCheck()
h.name = 'Processing time per kb for workflow: {}'.format(wf)
h.severity = 3
h.impact = 'Waiting time for videos will be higher than expected'
h.summary = 'Tests that workflows are being completed in a timely manner'
h.panic_guide = 'N/a'
h.ok = True
all_workflows = self.workflows.by_time(wf)
recent_workflows = self.workflows.time_period(all_workflows, length=3600)
if len(all_workflows) == 0 or len(recent_workflows) == 0:
h.check_output = 'Not enough data to determine whether workflows take too long.'
else:
all_bytes_per_second = self.bytes_per_second(all_workflows)
recent_bytes_per_second = self.bytes_per_second(recent_workflows)
diff = recent_bytes_per_second / all_bytes_per_second
if diff < 0.85:
h.ok = False
h.check_output = 'Throughput has dropped to {}% of previous values'.format(diff*100)
return h
def workflows_by_time(self, check_period=3600):
failed = self.workflows.failed()
success = self.workflows.successful()
return {'failed': len(self.workflows.time_period(failed, length=check_period)),
'success': len(self.workflows.time_period(success, length=check_period))}
def bytes_per_second(self, workflows):
time = 0
size = 0
for wf in workflows:
if wf['Failed'] == False:
next
time += wf['ProcessTime']
size += wf['AssetSize']
return float(size) / float(time)
def load(event, context):
mio = event.get('mio', 'http://localhost:9898')
username = event.get('username', 'masteruser')
password = event.get('password', os.environ.get('MIO_PASSWORD'))
sr = SupergrassReporter(mio=mio,
username=username,
password=password)
return sr.report()
if __name__ == '__main__':
print json.dumps(load({},{}), ensure_ascii=False)
| [
"james@zero-internet.org.uk"
] | james@zero-internet.org.uk |
46f20c311de9662ae52e0d17c54e38adf2809320 | 163eae2f1e83140ba8f082909793de9a1212371b | /ileriseviyeprogramlama/karakterfrekansı.py | 080518131d29fccfacadc8e79b7f57a04632fdfd | [] | no_license | MusabBayram/Python-Studies | b2ba99487c9509c05c85904cc77d3df35775ea38 | e2b388c0d05c30852a80eaac966d5b4a12f6d85f | refs/heads/master | 2021-05-17T11:57:36.777335 | 2020-03-28T10:23:20 | 2020-03-28T10:23:20 | 250,763,552 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py |
dize = "ProgramlamaÖdeviİleriSeviyeVeriYapılarıveObjeleripynb"
say = dict()
for i in dize:
if (i in say):
say[i] +=1
else:
say[i] = 1
print(say) | [
"Musab.bayram@hotmail.com"
] | Musab.bayram@hotmail.com |
bb7645b996dd70bb11bceb7fa31190757f205a92 | 141d1fb160fcfb4294d4b0572216033218da702d | /exec -l /bin/zsh/google-cloud-sdk/lib/surface/composer/environments/run.py | b81165e938f3ff95fea3676709e9be6e342bacc4 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | sudocams/tech-club | 1f2d74c4aedde18853c2b4b729ff3ca5908e76a5 | c8540954b11a6fd838427e959e38965a084b2a4c | refs/heads/master | 2021-07-15T03:04:40.397799 | 2020-12-01T20:05:55 | 2020-12-01T20:05:55 | 245,985,795 | 0 | 1 | null | 2021-04-30T21:04:39 | 2020-03-09T08:51:41 | Python | UTF-8 | Python | false | false | 7,255 | py | # -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to run an Airflow CLI sub-command in an environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import argparse
from googlecloudsdk.api_lib.composer import environments_util as environments_api_util
from googlecloudsdk.api_lib.composer import util as api_util
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.composer import resource_args
from googlecloudsdk.command_lib.composer import util as command_util
from googlecloudsdk.core import log
from googlecloudsdk.core.console import console_io
WORKER_POD_SUBSTR = 'worker'
WORKER_CONTAINER = 'airflow-worker'
DEPRECATION_WARNING = ('Because Cloud Composer manages the Airflow metadata '
'database for your environment, support for the Airflow '
'`{}` subcommand is being deprecated. '
'To avoid issues related to Airflow metadata, we '
'recommend that you do not use this subcommand unless '
'you understand the outcome.')
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Run(base.Command):
"""Run an Airflow sub-command remotely in a Cloud Composer environment.
Executes an Airflow CLI sub-command remotely in an environment. If the
sub-command takes flags, separate the environment name from the sub-command
and its flags with ``--''. This command waits for the sub-command to
complete; its exit code will match the sub-command's exit code.
## EXAMPLES
The following command:
{command} myenv trigger_dag -- some_dag --run_id=foo
is equivalent to running the following command from a shell inside the
*my-environment* environment:
airflow trigger_dag some_dag --run_id=foo
"""
@staticmethod
def Args(parser):
resource_args.AddEnvironmentResourceArg(
parser, 'in which to run an Airflow command')
parser.add_argument(
'subcommand',
metavar='SUBCOMMAND',
choices=command_util.SUBCOMMAND_WHITELIST,
help=('The Airflow CLI subcommand to run. Available subcommands '
'include: {} (see https://airflow.apache.org/cli.html for more '
'info). Note that delete_dag is available from Airflow 1.10.1, '
'and list_dag_runs, next_execution are available from Airflow '
'1.10.2.').format(', '.join(command_util.SUBCOMMAND_WHITELIST)))
parser.add_argument(
'cmd_args',
metavar='CMD_ARGS',
nargs=argparse.REMAINDER,
help='Command line arguments to the subcommand.',
example='{command} myenv trigger_dag -- some_dag --run_id=foo')
def BypassConfirmationPrompt(self, args):
"""Bypasses confirmations with "yes" responses.
Prevents certain Airflow CLI subcommands from presenting a confirmation
prompting (which can hang the gcloud CLI). When necessary, bypass
confirmations with a "yes" response.
Args:
args: argparse.Namespace, An object that contains the values for the
arguments specified in the .Args() method.
"""
prompting_subcommands = ['delete_dag']
if args.subcommand in prompting_subcommands and set(
args.cmd_args).isdisjoint({'-y', '--yes'}):
args.cmd_args.append('--yes')
def DeprecationWarningPrompt(self, args):
response = True
if args.subcommand in command_util.SUBCOMMAND_DEPRECATION:
response = console_io.PromptContinue(
message=DEPRECATION_WARNING.format(args.subcommand),
default=False, cancel_on_no=True)
return response
def ConvertKubectlError(self, error, env_obj):
del env_obj # Unused argument.
return error
def Run(self, args):
self.DeprecationWarningPrompt(args)
running_state = (
api_util.GetMessagesModule(release_track=self.ReleaseTrack())
.Environment.StateValueValuesEnum.RUNNING)
env_ref = args.CONCEPTS.environment.Parse()
env_obj = environments_api_util.Get(
env_ref, release_track=self.ReleaseTrack())
if env_obj.state != running_state:
raise command_util.Error(
'Cannot execute subcommand for environment in state {}. '
'Must be RUNNING.'.format(env_obj.state))
cluster_id = env_obj.config.gkeCluster
cluster_location_id = command_util.ExtractGkeClusterLocationId(env_obj)
with command_util.TemporaryKubeconfig(cluster_location_id, cluster_id):
try:
kubectl_ns = command_util.FetchKubectlNamespace(
env_obj.config.softwareConfig.imageVersion)
pod = command_util.GetGkePod(
pod_substr=WORKER_POD_SUBSTR, kubectl_namespace=kubectl_ns)
log.status.Print(
'Executing within the following kubectl namespace: {}'.format(
kubectl_ns))
self.BypassConfirmationPrompt(args)
kubectl_args = [
'exec', pod, '-tic', WORKER_CONTAINER, 'airflow', args.subcommand
]
if args.cmd_args:
# Add '--' to the argument list so kubectl won't eat the command args.
kubectl_args.extend(['--'] + args.cmd_args)
command_util.RunKubectlCommand(
command_util.AddKubectlNamespace(kubectl_ns, kubectl_args),
out_func=log.status.Print)
except command_util.KubectlError as e:
raise self.ConvertKubectlError(e, env_obj)
@base.ReleaseTracks(base.ReleaseTrack.BETA, base.ReleaseTrack.ALPHA)
class RunBeta(Run):
"""Run an Airflow sub-command remotely in a Cloud Composer environment.
Executes an Airflow CLI sub-command remotely in an environment. If the
sub-command takes flags, separate the environment name from the sub-command
and its flags with ``--''. This command waits for the sub-command to
complete; its exit code will match the sub-command's exit code.
## EXAMPLES
The following command:
{command} myenv trigger_dag -- some_dag --run_id=foo
is equivalent to running the following command from a shell inside the
*my-environment* environment:
airflow trigger_dag some_dag --run_id=foo
"""
def ConvertKubectlError(self, error, env_obj):
is_private = (
env_obj.config.privateEnvironmentConfig and
env_obj.config.privateEnvironmentConfig.enablePrivateEnvironment)
if is_private:
return command_util.Error(
str(error) +
' Make sure you have followed https://cloud.google.com/composer/docs/how-to/accessing/airflow-cli#running_commands_on_a_private_ip_environment '
'to enable access to your private Cloud Composer environment from '
'your machine.')
return error
| [
"yogocamlus@gmail.com"
] | yogocamlus@gmail.com |
efe6000e08a97a745405e6fd91c9d473d802d800 | 9b72185bacfeec4c44c85d4ef1b0a23295db2769 | /TestInvoice.py | ed05480a51cf5ece2dabac58471bbc66b375650a | [] | no_license | A-ozmez/lab2Part4Group | a69e47747c100313f1d23afce9a9146a632e6ac2 | 9aae974119313217c40b2b1ea25f78104e7e4b3c | refs/heads/master | 2021-04-03T23:50:20.618534 | 2020-03-19T04:01:30 | 2020-03-19T04:01:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,297 | py | import pytest
from Invoice import Invoice
@pytest.fixture()
def products():
products = {'Pen': {'qnt': 10, 'unit_price': 3.75, 'discount': 5},
'Notebook': {'qnt': 5, 'unit_price': 7.5, 'discount': 10}}
return products
@pytest.fixture()
def invoice():
invoice = Invoice()
return invoice
def test_CanCalculateTotalImpurePrice(invoice, products):
invoice.totalImpurePrice(products)
assert invoice.totalImpurePrice(products) == 75
def test_CanCalculateTotalDiscount(invoice, products):
invoice.totalDiscount(products)
assert invoice.totalDiscount(products) == 5.62
def test_CanCalculateTotalPurePrice(invoice, products):
invoice.totalPurePrice(products)
assert invoice.totalPurePrice(products) == 69.38
# tests that the inputs are not zero
def test_PositiveQuantity(invoice, products):
for k, v in products.items():
if (v['qnt'] < 0):
print("qnt cannot be negative")
# tests that the inputs are not zero
def test_PositivePriceAndDiscount(invoice, products):
for k, v in products.items():
if (v['discount'] < 0):
print("discount cannot be negative")
if (v['unit_price'] < 0):
print("price cannot be negative")
| [
"noreply@github.com"
] | noreply@github.com |
fd5834d71ffff170665dd44f08b74372e8c33d67 | 85431b353749dd8f6ea308b439b9e5c42b2a7352 | /UnicornLog/UnicornLog/asgi.py | 1604c11dd14442cce813b200b702b615d37ef0d4 | [] | no_license | evansimmons/DjangoUnicorn | d018b229f7468e371a939ffb8fbf6a4afc2f1903 | 5e5c5fb6401daeabeb12404ebc390248cb29324d | refs/heads/main | 2023-06-19T19:36:57.036735 | 2021-07-14T19:52:38 | 2021-07-14T19:52:38 | 381,471,923 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
ASGI config for UnicornLog project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'UnicornLog.settings')
application = get_asgi_application()
| [
"etscodelancer@gmail.com"
] | etscodelancer@gmail.com |
ae587eaf8f61b8ea33ef42bd8a1a5fb0c40cd54e | 7866e1a7c043e5d1b539949f1d472c63003eb135 | /stringToCamelCase.py | 0ff3a534280f7232b2e04112eadf6a29aaae4b72 | [] | no_license | sindhujacoder/guvi | ce7362db7d87ad708ae69fcf3316d9c1a43ac002 | 5500625ed748358da76201645aa2a0780163786a | refs/heads/master | 2021-01-19T12:15:42.429802 | 2016-09-24T10:10:57 | 2016-09-24T10:10:57 | 69,040,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | testcases = input()
while testcases:
word = raw_input()
word = word.title()
print word
testcases = testcases - 1
| [
"noreply@github.com"
] | noreply@github.com |
52d71685ffeb1d6312ac1a58a2bf9d34f0c1a4a3 | a90bb6995f15a864708c493ae8539f9c6bb0a1ea | /Zip - V.4.3.1.py | e99f4d551773775b3cf3f25b8fbf5859df2bb3cb | [] | no_license | ComplexChris/VigenereArchive | aac7f9cf06a955f7cf1484db9ea5e7641f4b6edc | 1e7b7288e3dff4293b568979396882b81d89f49b | refs/heads/master | 2016-09-14T13:16:31.986337 | 2016-05-12T09:48:42 | 2016-05-12T09:48:42 | 57,876,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,605 | py | # coding: utf-8
## Container Archive Encryption Program
## Written and Devoloped by Chris J. Nguyen
import os, sys, zipfile, StringIO, string, random, shutil, time
__version__ = "4.3.1"
__author__ = "Chris J. Nguyen"
__date__ = "April 6th, 2016"
__copyright__ = "(C) 2016-2018 Chris J. Nguyen. GNU GPL 3."
## freeNote.net
## irc
## filename encryption python
tmp_s = 'There he was...'
tmp_k = 'password'
#TmpZip = zipfile.ZipFile('Test.zip', 'w')
_Bricks = "~-"*16
_Fancy = "\n\n%s\n\n" % _Bricks
COMP_TYPE = 8
BLOCK = 256
try:
_ScriptLoc = os.path.dirname(os.path.abspath(__file__))
except NameError:
_ScriptLoc = os.path.dirname(os.path.abspath(sys.argv[0]))
_LogLoc = os.path.join(_ScriptLoc, 'Log.txt' )
_CacheLoc = os.path.join(_ScriptLoc, '_Cache')
_UsersLoc = os.path.join(_ScriptLoc, '_Users')
FAQ = """
Q: How do I add my own document to my Archive? \nA: Cut/Move a file from your computer and paste it into the temporary directory that is opened when you "Start" your archive in the Main Menu!
\nQ: How do I Quit the program? \nA: Simpily input the corresponding option relating to "Exit" or "Logout" or just type "EXIT" when promted to input text!
\nQ: How do I thank or ask the Devolopers? \nA: Email him at: ChrisNguyen180@gmail.com!
\nQ: What type of files may I import to my archive? \nA: Any file less than a couple MBs. As the archive uses a high compression ratio which may take up to a minute per MB!
\nQ: How long did it take to make this program? \nA: Apx <1 Month.
\nQ: How many lines of code is this program? \nA: About 500 lines.
\nQ: What are other acceptable inputs? \nA: "LS" or "DIR", "PWD", "FAQ" or "HELP", "START" or "OPEN", "CD" followed by a path """
DEFAULTS = {
'MyPasswords.txt': 'Facebook = "Bob1992" ',
'MyNotes.txt': 'To do: Decrypt Russian Nuclear Launch Codes "XMG-01 (WMD)" ',
"FAQ.txt": FAQ}
class SAK():
ExitCode = "STOP, EXIT, QUIT, FUCK"
Sleep = lambda self, zzz=1: time.sleep(zzz)
Exists = lambda self, path: os.path.exists(path)
#GetSize = lambda self, path: os.path.getsize(path)
CD = lambda self, path: os.chdir(path) if self.Exists(path) else 0
def GetSize(self, path):
if os.path.isdir(path):
total = 0
for root, dirs, files in os.walk(path):
for f in files:
total += os.path.getsize( os.path.join(root, f))
return total
else: return os.path.getsize(path)
def GetDetails(self):
cwd = os.getcwd()
sVer = sys.version
items = [_Fancy,
"\nScript Location: %s \nCache Location: %s \nArchive Location: %s " % (_ScriptLoc, _CacheLoc, _UsersLoc),
"Current Directory: %s \n\nProgram Version: %s \nSystem Version %s " % (cwd, __version__, sVer),
"\nRelease Date: %s \nCopyright: %s \nAuthor: %s " % (__date__, __copyright__, __author__)
]
for line in items: print line
def Clean(self, Path, verbose=True):
try:
if os.path.isdir(Path):
shutil.rmtree(Path)
else:
os.remove(Path)
except Exception as E:
if verbose: print '\nUnable to remove file(s)!\nPlease close any open processes!\n', E
return False
def Log(self, Stat, LogName= _LogLoc ):
self.WriteFile(LogName, '\n\n'+Stat, 'a')
def ReadFile(self, FileName, mode='rb'):
with file(FileName, mode) as f:
return f.read()
def WriteFile(self, FileName, Content, mode='wb'):
with file(FileName, mode) as f:
f.write(Content)
f.close()
def RandomString(self, length=10, CanExist=False ):
""" Generates random string """
BaseChars = (string.ascii_letters + string.digits) * random.randint(4,8)
while True:
Out = ''.join( random.sample( BaseChars, length ) )
if self.Exists(Out) == CanExist:
return Out
def Smart_Input(self, msg='(Y/N)'):
""" Extended raw_input with system functions"""
while True:
Out = raw_input(msg)
OutUp = Out.upper()
LOO = len(Out)
path = Out[2:].strip(' ') ## Used for "CD"
if path.upper() in ["HOME"]: path = _ScriptLoc
if OutUp in self.ExitCode and len(Out)==4:
raise SystemExit
elif OutUp in ["LS", "DIR"]:
print _Fancy, os.listdir('.')
elif OutUp in ["GWD", "PWD"]:
print _Fancy, os.getcwd()
elif OutUp in ["FAQ", "HELP"]:
print _Fancy, FAQ
elif OutUp in ["START", "OPEN"]:
os.startfile('.')
elif OutUp[:2]=="CD" and self.Exists(path):
os.chdir(path)
print "\nChanging directories to: \n%s \n" % os.getcwd()
else:
return Out
def Raw_Choice(self, Msg='(Y/N)', Options='YN', Length=1):
""" User input specific answer choice """
while True:
Answer = self.Smart_Input(Msg+'> ').upper()
if Answer in Options and len(Answer)==Length:
return Answer
def MakeASCII(self, String):
""" Filters out invalid characters """
Bad = '/\:*?<>"|'
Bad = string.punctuation
BaseChars = (string.ascii_letters + string.digits)
LBS = len(BaseChars)
out = ''
for c in String:
if c not in BaseChars:
out += BaseChars[ord(c) % LBS ]
else: out += c
return out
def GetLogin(self, Confirm=True):
""" Gets login credentials form user input """
while True:
print _Fancy
UN = self.Smart_Input('Enter User Name >>> ').upper()
PW = self.Smart_Input('Enter Password >>> ')
if Confirm:
PW2 = self.Smart_Input('Confirm Password >>> ')
if PW == PW2: Confirm=False
else:
print '\nPasswords do not match!'
raise EnvironmentError
if Confirm==0:
return UN, PW
def GetFileName(self, UN, PW):
""" Mix variables and Collatz algorithm to produce one-way string file name """
DefaultPhrase = self.Vigenere('_CoMpLeXiTy_314', UN)
P1 = self.Vigenere(DefaultPhrase, UN, True)
P2 = self.Vigenere(P1, PW, True)
Name = ''
for char in P1+P2:
cInt = self.Collatz( ord(char) * 4 )%256
Name += chr( cInt ) if cInt>0 else ''
return Name #(Name, P1, P2)
def Collatz(self, n):
""" Simple algorithm representation of Collatz Conjecture """
## Basic algorithm
n = abs(n); numb=[]
if n<=2: return n
while n!=1:
numb.append(n)
if n%2==0 and n!=0: n/=2
else: n=(n*3)+1
return max(numb)
def Vigenere(self, string, key, encrypt=None):
""" Primary method for just single poly-alphabetic encryption """
#return string
LP = len(key)
out = ''; x=0
los = len(string)
if "" in (string, key): return string
for x in range( los ):
val1, val2 = ord( string[x] ), ord( key[x % LP ] )
if encrypt != None:
val3 = val1+val2 if encrypt==False else val1-val2 ## if True else (False)
else:
a, b = min([val1, val2]), max([val1, val2])
val3 = val1+val2 if (val2<val1 and val1+val2<256) else b-a
v3 = val3%256
out += chr(v3)
return out
def VigLarge(self, string, key, encrypt=None, Blocks=512):
""" Encrypts string in small blocks """
if False: #len(string) < Blocks/2.5:
print "\nLarge going to straight..."
return self.Vigenere(string, key, encrypt)
middle = len(string)/2
a,b = middle-Blocks, middle+Blocks
modified = string[:Blocks] + string[-Blocks:]
viged = self.Vigenere(modified, key, encrypt)
out = viged[:Blocks] + string[Blocks:-Blocks] + viged[-Blocks:]
#print middle, a,b
return out
def MakeBlocks(self, String, Size):
""" Creates blocks for larger string encryption """
## Creates blocks from string based on "Size"
x=0; LOS=len(String)
while x*Size < LOS: #for x in range(1, (len(String) / Size) ) :
x += 1
a, b = ((x-1)*Size), (x*Size)
#print "The X: ", x, " |\tA, B : ", a, b
yield String[ a : b ]
def VigBlocks(self, String, PW, mode=True, Size=2**15, Path=None):
""" Used for high volumes of data. Highest block method available """
## Encrypts blocks for large strings
if False: #len(String) < 2**20 :
print "\nBlocks going to large..."
return self.VigLarge(String, PW, mode, Size)
LOS = len(String)
x=-1
mass = ''
for part in self.MakeBlocks(String, Size):
x+=1
#frag = String[ (x-1)*BlockSize : x*BlockSize ]
if x % 2 == 0:
chunk = Tools.VigLarge(part, PW, mode, Size/8) # Tools.VigLarge( part, PW, mode, cap )
else:
chunk = part
mass += chunk
#Tools.WriteFile(Path, chunk, "ab")
return mass
def AutoVig(self, string, key, encrypt=None, Blocks=512):
""" Execute string encryption based on size of text """
LOS = len(string)
if LOS > 2**20:
print "\nUsing Blocks Method..."
product = self.VigBlocks(string, key, encrypt, 2048)
elif LOS > 2**15:
print "\nUsing Large Method..."
product = self.VigLarge(string, key, encrypt, 512)
else:
print "\nUsing Primary Method..."
product = self.Vigenere(string, key, encrypt)
#product = self.Vig(string, key, encrypt, limit=2056, Blocks=512)[0]
return product
Tools = SAK()
class ArcZip():
""" Class for handling String IO and zipfile instances """
def __init__(self, Active=True):
pass
def WalkDir(self, path):
""" Generator function for directory files """
## Generator object for directory walks
for root, dirs, files in os.walk(path):
for f in files:
yield os.path.join(root, f)
def CreateZip(self, ZipName, Password):
""" Creates zipfile containing default information """
## Creates archive file in local directory
FileInst = StringIO.StringIO()
with zipfile.ZipFile( FileInst, mode='w', compression=COMP_TYPE ) as ZipF:
for item in DEFAULTS:
if Tools.Exists(item):
print "\nCopying default file..."
ZipF.write(item, item)
else: ZipF.writestr(item, DEFAULTS[item])
ZipF.close()
Encrypted = Tools.AutoVig( FileInst.getvalue(), Password, True, BLOCK)
Tools.WriteFile(ZipName, Encrypted )
def OpenZip(self, ZipName, Password):
""" Decrypts a zipfile and returns zipfile IO instance """
## Opens and decrypts zip file and returns decrypted zip file instance
raw_zip = Tools.ReadFile(ZipName)
Decrypted = Tools.AutoVig(raw_zip, Password, False, BLOCK)
buff = StringIO.StringIO(Decrypted)
if zipfile.is_zipfile( buff ):
print "\nProcessing Archive IO Instance..."
ZipInst = zipfile.ZipFile( buff, 'r', compression=COMP_TYPE )
return ZipInst
else: return False
def WriteZip(self, TmpDir, Password=None, Destination=None):
""" Reads all files in temp directory in writes to zipfile """
## Reads all content of temp directory
## and writes to zip IO instance
FileInst = StringIO.StringIO()
with zipfile.ZipFile( FileInst, mode='w', compression=COMP_TYPE ) as ZipInst:
for raw_file in self.WalkDir( TmpDir ):
ZipInst.write( raw_file, os.path.relpath(raw_file, TmpDir) ) # os.path.join(root, raw_file))
if (Destination and Password) != (None, None):
Path = os.path.join(_ScriptLoc, Destination)
Encrypt = Tools.AutoVig(FileInst.getvalue(), Password, True, BLOCK)
Tools.WriteFile(Path, Encrypt)
def Decompress(self, ZipInst, ToDir = None ):
""" Extracts all files in a decrypted zipfile instance """
#Extracts all files in decrypted Zip IO instance
if ToDir==None: ToDir = self.ToDir
ZipInst.extractall( ToDir )
class User(ArcZip):
""" Primary UI class for Zip Archive Usage """
def __init__(self, Active=True):
if Active:
os.chdir(_ScriptLoc)
Tools.CD(_ScriptLoc)
if Tools.Exists(_CacheLoc):
Tools.Clean (_CacheLoc)
if Tools.Exists(_CacheLoc)==False:
os.mkdir(_CacheLoc)
if Tools.Exists(_UsersLoc)==False:
os.mkdir(_UsersLoc)
if Tools.Exists("FAQ.txt"):
Tools.WriteFile("FAQ.txt", FAQ, 'w')
self.Username, self.Password = '', ''
self.ZipName, self.ZipInst = '', None
self.Start()
def Depart(self, Cleanup=True, verbose=True):
print _Fancy, '\nGoodBye!' if verbose else ""
shutil.rmtree(_CacheLoc)
sys.exit()
## Delete Tmp Dir...
def UpdateUserVar(self, Switch):
""" Gathers primary user variables """
self.Username, self.Password = Tools.GetLogin( Switch ) #'CHRIS', 'ares'
Name = Tools.MakeASCII(Tools.GetFileName(self.Username, self.Password) )[:20]+'.ADF'
self.ZipName = os.path.join( _ScriptLoc, "_Users", Name)
self.ZipBackup = os.path.join( _ScriptLoc, "_Users", Name.replace(".ADF", ".ABDF"))
RawDir = Name.rstrip('.ADF')
self.ToDir = os.path.join(_ScriptLoc, '_Cache', ".$"+RawDir) ##'.$'
def Start(self):
""" First menu interface to start archive process """
Menu = "a. Log in \t\tb. Create Account \nc. Exit"
while True:
Tools.Sleep(1.5)
print _Fancy, Menu
Choice = Tools.Raw_Choice('(A/B/C)', 'ABC')
if Choice=='C': self.Depart()
else:
## Complex method of assigning boolean values (Encrypt, Decrypt)
Either = False if Choice=='A' else True
self.UpdateUserVar(Either)
ArcStat = Tools.Exists(self.ZipName)
ArcStatBak = Tools.Exists(self.ZipBackup)
if Either==False:
if ArcStat or ArcStatBak:
if ArcStat:
shutil.copy(self.ZipName, self.ZipBackup)
self.ZipInst = self.OpenZip(self.ZipName, self.Password )
else:
print "\nRestoring from backup file..."
shutil.copy(self.ZipBackup, self.ZipName)
self.ZipInst = self.OpenZip(self.ZipName+".ADF", self.Password )
if self.ZipInst != False: self.Run()
else: print "\nUnable to proccess archive!"
else:
print "\nAccount not found!"
else:
if ArcStat==0:
self.CreateZip(self.ZipName, self.Password)
print "\nAccount created successfully! \nPlease re-login to continue!"
else:
print "\nArchive already exists. Please log in instead."
def Run(self):
""" UI for Archive instances once variables are established """
Menu1 = "MAIN MENU: \na. Start Archive \tb. Save Archive \nc. More \t\td. Logout"
Menu2 = "EXTENDED MENU: \na. Clear Cache \t\tb. Delete Archive \nc. Info \t\td. Back"
Msg, Options = "(A/B/C/D)", "ABCD"
MainMenu = True
print _Fancy, "\nWelcome %s!" % self.Username.title()
while True:
Tools.Sleep(2)
print _Fancy, Menu1 if MainMenu else Menu2
Choice = Tools.Raw_Choice(Msg, Options)
if MainMenu == True:
if Choice=="C": MainMenu=False
else: self.MainMenu(Choice)
else:
if Choice=="D": MainMenu=True
self.ExtendedMenu(Choice)
def ExtendedMenu(self, Choice):
""" Extended menu for alternitive options """
if Choice == "A":
if Tools.Exists(self.ToDir):
print "\nWarning: Any unsaved changes to your archive will be lost!"
c = Tools.Raw_Choice("Continue? (Y/N) > ")
if c=="N": return
else: print "\nProceeding..."
for x in range(100):
Tools.Clean( _CacheLoc, False )
if Tools.Exists(_CacheLoc)==0:
print "\nCache has been cleared!"
break
elif Choice == "B":
self.RemoveArchive()
elif Choice=="C":
Tools.GetDetails()
def MainMenu(self, Choice):
""" Main menu with basic archive control """
if Choice == 'A':
self.StartArchive()
elif Choice == 'B':
self.SaveArchive()
elif Choice=="D":
if Tools.Exists(self.ToDir):
print "\nWarning! Did you want to save your archive first?"
Ans = Tools.Raw_Choice()
if Ans == "Y": self.SaveArchive()
raise EnvironmentError
def RemoveArchive(self):
""" Removes Archive and temp directory if one exists """
print "\nConfirm removale of entire archive \nby entering the Captcha between > and <:"
code = Tools.RandomString(6, False)
attempt = raw_input("@#!>%s<?*@ \n> " % code)
OVW = "SUOVW" ; CONT=False
if attempt==code or attempt==OVW:
if attempt != OVW:
print "\nNow re-login: "
a, b = Tools.GetLogin(False)
if (a,b) == (self.Username, self.Password):
CONT = True
else: print "\nIncorrect login!"
else: CONT=True
if CONT==True:
stat = Tools.Clean(self.ZipName)
if stat != False:
print "\nArchive has been removed!"
raise EnvironmentError
else:
print "\nUnable to remove archive at this time!"
else: print "\nIncorrect Captcha!"
def StartArchive(self):
""" Opens zipfile and decrompresses content into temp directory """
if Tools.Exists(self.ToDir):
print "\nAn archive is already started. Please Save or manually remove direcory: \n%s " % self.ToDir
else:
os.mkdir(self.ToDir)
self.Decompress(self.ZipInst, self.ToDir)
print "\nArchive has been opened at: \n%s " % (self.ToDir)
try: os.startfile(self.ToDir)
except: pass
def SaveArchive(self):
""" Reads, encrypts, and writes files back into zipfile container """
if Tools.Exists(self.ToDir): # and Update==False:
a, b = Tools.GetSize(self.ToDir), Tools.GetSize(self.ZipName)
if a < b and b>1:
print "\nDetected changes are less than original archive file size!"
if a<1:
print "\nAll file(s) will be removed!\n"
if Tools.Raw_Choice("Proceed with changes? (Y/N) >")=="N": return
print '\nSaving...'
ZipInstByte = self.WriteZip(self.ToDir, self.Password, self.ZipName )
RmStat = Tools.Clean( self.ToDir )
print "\nArchive has been saved!"
print "...and the temporary directory %s removed" % ("WAS" if RmStat!=False else "WAS NOT")
print "Location: ", os.path.relpath( self.ToDir )
self.ZipInst = self.OpenZip(self.ZipName, self.Password) ## Update
shutil.copy(self.ZipName, self.ZipBackup)
else:
print "\nArchive not started! \nNo changes made to archive!"
if __name__=="__main__":
while True:
try:
c = User()
except EnvironmentError:
pass
except SystemExit:
if Tools.Exists(_CacheLoc):
print "\nClearing...\n"
Tools.Clean(_CacheLoc)
print "\nExiting...\n"
break
| [
"ChrisNguyen180@gmail.com"
] | ChrisNguyen180@gmail.com |
8177573260147c1b6a0fc39e0e1977682266b7b6 | 50dd2a43daa8316fc11e0c176b5872738fcc5dde | /Learning/130_Fluent_Python/fp2-utf8/freeinteractive/freeinteractive 103.py | fedc3048dc3417682a8981be5ae2035e8e4eed63 | [] | no_license | FrenchBear/Python | 58204d368e3e72071eef298ff00d06ff51bd7914 | b41ab4b6a59ee9e145ef2cd887a5fe306973962b | refs/heads/master | 2023-08-31T18:43:37.792427 | 2023-08-26T15:53:20 | 2023-08-26T15:53:20 | 124,466,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24 | py | >>> b = 6
>>> f1(3)
3
6
| [
"FrenchBear38@outlook.com"
] | FrenchBear38@outlook.com |
3e67c476deabc53331ccd7582f0feff94455d632 | 31741f4807f857675f9304088b689af9b043e7b1 | /chp10/django_ecommerce/contact/views.py | 8160a48a0c26f4c0a9a9857aa9771927481e3ab1 | [] | no_license | ujrc/Realpython_book3 | c487ff0569f90b0e21c2c51cf951d6aad4755541 | aaff8db074b8dd33d6c7305ac0a94c2ef161c847 | refs/heads/master | 2021-01-10T02:02:11.247279 | 2016-01-11T17:06:59 | 2016-01-11T17:06:59 | 48,060,189 | 0 | 0 | null | 2015-12-31T16:48:52 | 2015-12-15T18:03:47 | Python | UTF-8 | Python | false | false | 755 | py | from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext, loader
from .forms import ContactView
from django.contrib import messages
def contact(request):
if request.method == 'POST':
form = ContactView(request.POST)
if form.is_valid():
our_form = form.save(commit=False)
our_form.save()
messages.add_message(
request, messages.INFO, 'Your message has been sent. Thank you.'
)
return HttpResponseRedirect('/')
else:
form = ContactView()
t = loader.get_template('contact/contact.html')
c = RequestContext(request, {'form': form, })
return HttpResponse(t.render(c))# Create your views here.
| [
"uwjearc@yahoo.com"
] | uwjearc@yahoo.com |
d417b2b14d4f4e28483f2fafa8effd65731f1db4 | e38b2cc76373c54a766485c9ca659e6c6e598620 | /Tuple_Teerapat.py | 3f061bb279ce68485442360e32b60226ba0457c0 | [] | no_license | icelake10/- | d660a3cc5c93c880c815f80168dceda1d3f6995f | d089f8f5311232d3b6ac6d0c114ea1620395aa97 | refs/heads/master | 2020-07-30T08:52:13.986406 | 2019-09-22T14:41:23 | 2019-09-22T14:41:23 | 210,162,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,479 | py | #!/usr/bin/env python
# coding: utf-8
# 1.กำหนดให้ brand_cars = ("Toyota", "Honda", "Benz", "BMW", "Tesla", "Ford", "KIA", "Volvo" )
#
# 1.1 ให้เขียนคำสั่งโปรแกรมแสดงตำแหน่งของ Benz, Ford และ Volvo
# 1.2 ให้เขียนคำสั่งโปรแกรมแสดงจำนวนข้อมูลทั้งหมดในทูเพิล
# 1.3 ให้เขียนคำสั่งโปรแกรมตรวจสอบมียี่ห้อรถ Suzuki, Ferrari, Ford อยู่ใน cars หรือไม่
# In[3]:
brand_cars = ("Toyota", "Honda", "Benz", "BMW", "Tesla", "Ford", "KIA", "Volvo" )
print("ตำแหน่งของ Benz คือ",brand_cars.index("Benz"))
print("ตำแหน่งของ Ford คือ",brand_cars.index("Ford"))
print("ตำแหน่งของ Volvo คือ",brand_cars.index("Volvo"))
print("จำนวนข้อมูลทั้งหมดในทูเพิล คือ",len(brand_cars),"แบน")
print("มีแบนรถยน Suzuki อยู่ใน brand_cars หรือไม่ =","Suzuki" in brand_cars)
print("มีแบนรถยน Ferrari อยู่ใน brand_cars หรือไม่ =","Ferrari" in brand_cars)
print("มีแบนรถยน Ford อยู่ใน brand_cars หรือไม่ =","Ford" in brand_cars)
# In[ ]:
| [
"noreply@github.com"
] | noreply@github.com |
403e6416ef4d218912c9202b4988ddf498e0997d | 594102aed31b51ddc54e429bcde76fd733faf18f | /test_request_teacher/api_page/address_page.py | fb94cb949687c65fb4e714479d7f200157eddc46 | [] | no_license | niujiama/LilyGithub | 995c81b0b3574e587008b0807ec92e8f3a1106c6 | 91881dd57a139198c459fc33a41e286cdeaad51f | refs/heads/master | 2023-04-19T12:50:36.377748 | 2020-11-23T10:56:41 | 2020-11-23T10:56:41 | 285,243,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,572 | py | from test_request_teacher.api_page.base_api import BaseApi
from test_request_teacher.api_page.wework_utils import WeWorkUtils
class AddressPage(BaseApi):
"""
通讯录管理,包括增删改查
"""
def __init__(self):
_corpsecret = "6e-e9pUV0QZnJppDPwzRZA9PCtKb9urb9TWNf-6v5fA"
utils = WeWorkUtils()
self.token = utils.get_token(_corpsecret)
def get_member_info(self):
data = {
"method": "get",
"url": f"https://qyapi.weixin.qq.com/cgi-bin/user/get",
"params": {"access_token": self.token, "userid": "labixiaoxin"}
}
return self.send_api(data)
def add_member(self):
data = {
"method": "post",
"url": f"https://qyapi.weixin.qq.com/cgi-bin/user/create",
"params": {"access_token": self.token},
"json": {"userid": "labixiaoxin", "name": "蜡笔小新",
"mobile": "10111111115", "department": [1]}}
return self.send_api(data)
def delete_member(self):
data = {
"url": f"https://qyapi.weixin.qq.com/cgi-bin/user/delete?access_token={self.token}&userid=labixiaoxin",
"method": "get"
}
return self.send_api(data)
def update_member(self):
data = {
"method": "post",
"url": f"https://qyapi.weixin.qq.com/cgi-bin/user/update?access_token={self.token}",
"json": {
"userid": "labixiaoxin",
"name": "wangwu"}
}
return self.send_api(data) | [
"13426251727@163.com"
] | 13426251727@163.com |
47f0356ebef2eca13e87788037bd242513af73be | cec3584cb40c3f762f8e550231c646efb8905004 | /main.py | 00436c722fa86f195e4f6332a2413482a410f514 | [] | no_license | XXX-CODER/pythondiary-1 | a40f22dc10d8ecf119430be39cba31c7e97a0d1e | 60e3ca72f047aa78485e8be6e8d7c1db13943627 | refs/heads/master | 2020-07-06T09:13:37.343064 | 2019-08-18T06:20:23 | 2019-08-18T06:20:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 908 | py | from flask import Flask, render_template
import glob
from datetime import datetime
import os
app = Flask("Elwing")
@app.route("/category/<c>")
def category(c):
fs = glob.glob("articles/" + c + "/*.txt")
fill = []
for i, f in enumerate(fs):
a = open(f)
article = a.read()
a.close()
fp = f.split("/")[-1].replace(".txt", "")
utc = datetime.utcfromtimestamp(os.path.getmtime(f))
t = (i, fp, str(utc), article)
fill.append(t)
return render_template("category.html", cat=fill, title=c)
@app.route("/")
def home():
temp = glob.glob("articles/*")
fill = []
for t in temp:
length = len(glob.glob(t + "/*.txt"))
category = t.replace("articles/", "")
f = (category, length)
fill.append(f)
return render_template("index.html", cat=fill)
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0", port="3000")
| [
"noreply@github.com"
] | noreply@github.com |
0bc10e89e9c0f58d8de8e45fd99cba81ea6fcae8 | 94583800a07384bebcde02e147066b0ed8636234 | /spider/内蒙贷_完全版1.0.py | 08066c0e748411fcfb5e5c5465e249f422a413d7 | [] | no_license | jk123415/python_some_js | 2f3323143cf932e67411e29aab28cdb2fffd1014 | b5bcdda35bf8bf9e11680d55c5efc91bfe802fa4 | refs/heads/master | 2020-04-24T14:09:07.977279 | 2019-02-22T06:51:16 | 2019-02-22T06:51:16 | 172,010,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,488 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2018-07-10 11:14:08
# @Author : leopython (hailson@sina.com)
# @Link : http://www.cnblogs.com/leopython/
# @Version : $Id$
import os
import requests
import pprint
import re
import sqlite3
from bs4 import BeautifulSoup
import lxml.html
import jsonpath
# 处理头消息
def header(stri):
if stri == '':
return dict()
elif isinstance(stri, str):
a = stri.split('\n')
data = {}
for i in a:
lst = i.split(": ")
data[lst[0]] = lst[1]
return data
else:
pass
# 初始化数据库生成Content表
def initialize_db(db_file):
post = {'title': "标题",
'borrowid': "编号",
'siteid': '网站编号',
'lastpostdate': '时间',
'daystr': '借款期限',
'typeimg': '类型图片',
'posttype': '回复类型',
'postdata': '回复内容',
'money': '借款金额',
'rate': '年利率',
'senddate': '发标时间',
'username': '作者',
'jiangli': '投标奖励',
'jianglimoney': '奖励金额',
'ratetype': '利率类型',
'repayment_type': '还款方式',
'borrow_url': '网址',
'sex': '性别',
'age': '年龄',
'industry': '所属行业',
'df': '所在地',
'organization': '发布机构',
'borrow_use': '借款用途',
'borrower_type': '借款类别',
'borrow_info': '借款详情', }
print('''start initialize database: ''', db_file)
conn = sqlite3.connect(db_file)
db = conn.cursor()
db.execute("SELECT name FROM sqlite_master WHERE type='table'")
table_name = db.fetchall()
if ('Content',) not in table_name:
print('create table: Content in ', db_file)
s = post.values()
v = ''
for i in s:
c = ', ' + i + ' TEXT'
v += c
db.execute(
'''CREATE TABLE Content(ID INTEGER PRIMARY KEY AUTOINCREMENT,已采 TINYINT(1),已发 TINYINT(1){})'''.format(v))
else:
print('already exist table: Content in ', db_file)
conn.commit()
conn.close()
# 更新数据库Content表列名,根据传入的lst
def update_db_field(db_file, lst=[]):
print('start update table Content fields')
# 链接数据库
conn = sqlite3.connect(db_file)
# 创建游标
db = conn.cursor()
# 查询Content信息
db.execute('''PRAGMA table_info([Content])''')
# 获取查询结果
colculmns = db.fetchall()
# print(colculmns)
# 生成空列表lt
lt = []
# 对查询结果进行迭代,生成lt 列名数据
for col in colculmns:
lt.append(col[1])
# 对传入数据lst进行迭代
for val in lst:
# 判断lst中的字段是否在表content中存在
if val not in lt:
# 如果不存在,生成字段
sta = '''ALTER TABLE Content ADD {} TEXT'''.format(val)
# print(sta)
db.execute(sta)
print(val, ' is yes')
else:
# 如果存在则,返回yes
# print('yes')
pass
print('Table Content fields already update')
conn.commit()
conn.close()
# 根据传入的字典更新数据库Content表信息
def update_db(db_file, dic={}, conditions=''):
conn = sqlite3.connect(db_file)
db = conn.cursor()
if conditions == '':
columns = ', '.join(dic.keys())
value = (', ?' * len(dic.keys())).strip(', ')
s = 'select * from Content where 编号="%s"' % dic['编号']
# print(s)
db.execute(s)
lst = db.fetchall()
if lst == []:
statement = 'insert into Content({}) values ({})'.format(
columns, value)
db.execute(statement, tuple(dic.values()))
else:
print(dic['标题'], ' 已经采集过')
return
else:
for item in dic.items():
# print(item)
str = "UPDATE Content set {0}='{1}' where {2}".format(
item[0], item[1], conditions)
# print(str)
db.execute(str)
conn.commit()
db.close()
conn.close()
print(dic['标题'], ' is done')
# 发布数据方法
def publish34(db_file):
post = {'title': "标题",
'borrowid': "编号",
'siteid': '网站编号',
'lastpostdate': '时间',
'daystr': '借款期限',
'typeimg': '类型图片',
'posttype': '回复类型',
'postdata': '回复内容',
'money': '借款金额',
'rate': '年利率',
'senddate': '发标时间',
'username': '作者',
'jiangli': '投标奖励',
'jianglimoney': '奖励金额',
'ratetype': '利率类型',
'repayment_type': '还款方式',
'borrow_url': '网址',
'sex': '性别',
'age': '年龄',
'industry': '所属行业',
'df': '所在地',
'organization': '发布机构',
'borrow_use': '借款用途',
'borrower_type': '借款类别',
'borrow_info': '借款详情', }
reg = re.compile('ok')
post_uri = 'http://101.201.75.34/curl/insert.php'
colculmus = ','.join(post.values())
conn = sqlite3.connect(db_file)
db = conn.cursor()
db.execute('''SELECT {} FROM Content WHERE 已采=1 AND 已发=0'''.format(colculmus,))
# print(db.fetchall())
lst = db.fetchall()
if lst == []:
print('Need post data is 0')
conn.close()
return
else:
for postval in lst:
publish_data = dict(zip(post.keys(), postval))
# print(publish_data)
rr = requests.post(post_uri, data=publish_data)
if re.search(reg, rr.text):
print(publish_data['title'], ' issued successfull')
db.execute('''UPDATE Content SET 已发=1 WHERE 编号="{}"'''.format(
publish_data['borrowid']))
else:
print(publish_data['title'], ' issued failed')
db.execute('''UPDATE Content SET 已发=2 WHERE 编号="{}"'''.format(
publish_data['borrowid']))
conn.commit()
conn.close()
# 标签类
class Tag():
def __init__(self, name, value, entry, *body):
self.name = name
self.alias = value['name']
self.value = value['value']
if value['extend'] != {}:
# print(value['extend'])
for extend_name, extend_value in value['extend'].items():
if extend_name == 'xpath' and extend_value[0] == 'text_content()':
html = lxml.html.fromstring(str(entry))
s = html.xpath(extend_value[1])
try:
f = s[0].text_content()
self.value = f.strip(' ')
except:
print(value['name'], ' 获取异常')
elif extend_name == 'css':
# print(str(entry))
var = entry.select(extend_value[1])
if extend_value[0] == 'string':
# print(var)
self.value = str(var[0].string)
elif extend_value[0] == 'href':
self.value = str(var[0].get('href'))
elif extend_value[0] == 'remove_html_tag':
rr = re.compile('<.*?>')
# print(type(regx))
s = rr.subn('', str(var[0]))
self.value = s[0]
# print(s[0])
elif extend_name == 'calculate':
if extend_value[0] == '+' and extend_value[2] == 'value':
self.value = extend_value[1] + self.value
elif extend_name == 'substitute':
try:
rr = re.compile(extend_value[0])
s = re.search(rr, self.value)
self.value = s.group(extend_value[1])
except:
print(value['name'], ' 获取异常')
# print(s.group(1))
elif extend_name == 'to_10000':
if re.search('万', self.value):
s = self.value.replace('万', '')
self.value = str(float(s) * 10000)
# print(s)
elif extend_name == 'replace' and extend_value[0] == 'pay_method':
# print(extend_value)
try:
rr = re.compile('到期还本')
if re.search(rr, self.value):
self.value = 4
except:
print(value['name'], ' 获取异常')
elif extend_name == 'loop_match':
# print(str(entry))
rr = re.compile(extend_value[0])
# print(extend_value[0])
#rr = re.compile(r'''<tr class="list_record tab1" >(?:.|\n)*?<td width="35%" style="text-align:left;">(.*?)</td>(?:.|\n)*?<td width="30%" style="">(.*?)</td>(?:.|\n)*?<td width="35%" style="text-align:right;">(.*?)</td>(?:.|\n)*?</tr>''')
# print(body[0])
n = rr.findall(body[0])
# print(n)
records = ''
for investrecord in n:
# print(investrecord)
# print(extend_value[1])
s = extend_value[1].format(t=investrecord)
records += s
# print(records)
self.value = records
# elif extend_name == 'time_first':
# elif extend_name == 'website_match':
class Entry():
def __init__(self, entry, variable):
#uri_1 = ''
dic_0 = {}
dic_1 = {}
for tag_name, tag_value in variable['page_0']['tag'].items():
tag = Tag(tag_name, tag_value, entry)
ety = getattr(tag, '__dict__')
# print(ety)
dic_0[ety['alias']] = ety['value']
dic_1[ety['name']] = ety['value']
# print(dic_0)
uri_1 = dic_0['网址']
body = requests.get(uri_1).text
content = BeautifulSoup(body, 'lxml')
for tag_name, tag_value in variable['page_1']['tag'].items():
tag = Tag(tag_name, tag_value, content, body)
ety = getattr(tag, '__dict__')
# print(ety)
dic_0[ety['alias']] = ety['value']
dic_1[ety['name']] = ety['value']
# 编号从网址中采集
# print(dic_0['编号']=='website_match')
if dic_0['编号'] == 'website_match':
# print(variable['page_1']['tag']['borrowid']['extend']['website_match'][0])
rr = re.compile(variable['page_1']['tag']
['borrowid']['extend']['website_match'][0])
s = re.search(rr, dic_0['网址'])
# print(s.group(1))
# print(type(dic_0))
dic_0['编号'] = variable['page_1']['tag']['borrowid']['extend']['website_match'][1] + \
'-' + str(s.group(1))
# 从回复内容中采时间
if dic_0['时间'] == 'time_first':
rr = re.compile(r'{.*?postdate=(.*?)\|status=全部通过}.*')
s = re.search(rr, dic_0['回复内容'])
dic_0['时间'] = s.group(1)
# print(dic_0['完成度'])
# sqlite3 入库===========================================================
#
#conn = sqlite3.connect(variable['base']['db_file'])
if dic_0['完成度'] != '1':
print(dic_0['标题'], '--未完成')
dic_0['已采'] = 0
elif 'noNone' in dic_0.values():
dic_0['已采'] = 0
else:
dic_0['已采'] = 1
dic_0['已发'] = 0
# if dic_0['完成度'] !='1':
# print(dic_0['标题'],'未完成不采集')
# pprint.pprint(dic_0)
# 更新数据表Content数据
if dic_0['已采'] == 1:
update_db(variable['base']['db_file'], dic_0)
class Spyder():
def __init__(self):
self.variable = {}
# 采集数据
def spider(self):
# 生成标签字段列表
scope = {}
scope['b'] = []
scope['a'] = jsonpath.jsonpath(self.variable, expr='$..name')
for i in scope['a']:
scope['b'].append(i)
# print(scope['b'])
lts = scope['b']
# 初始化数据库,生成Content表
initialize_db(self.variable['base']['db_file'])
# 根据标签更新数据库表Content字段
update_db_field(self.variable['base']['db_file'], lts)
for var in range(self.variable['page_0']['uri'][1][0], self.variable['page_0']['uri'][1][1]):
# print(var)
# print(self.variable['page_0']['uri'][0])
url = self.variable['page_0']['uri'][0].format(var)
# print(url)
req_body = (requests.get(url)).text
# print(req_body)
soup = BeautifulSoup(req_body, 'lxml').find(
'div', 'content3').contents
soup.remove('\n')
soup.remove(' ')
#index = 0
#s = (soup[0])
# pprint.pprint(s)
for ety in soup:
# print(type(ety))
# try:
Entry(ety, self.variable)
# except:
# print('数据获取异常,联系技术人员')
# 发布数据
spyder = Spyder()
spyder.variable = {
'base': {'db_file': r'C:\Users\Administrator\Desktop\spider\db_file\Nmd.db3', # 内蒙贷
'headers': ''''''},
'page_0': {'uri': ('https://www.nmgdai.com/Invest/investList/p/{}', (1, 7)),
'tag': {'borrow_url': {'name': '网址', 'value': 'noNone', 'extend': {'css': ('href', '.block_r_txt1 > a'),
'calculate': ('+', 'https://www.nmgdai.com', 'value'), }},
'title': {'name': '标题', 'value': 'noNone', 'extend': {'css': ('string', '.block_r_txt1 > a'), }},
'progress': {'name': '完成度', 'value': 'noNone', 'extend': {'css': ('string', '.pertxt'), }}, }},
'page_1': {'tag': {'money': {'name': '借款金额', 'value': 'noNone', 'extend': {'css': ('remove_html_tag', '.investDetail_span_money'),
'substitute': (r'借款金额(.*?)元', 1),
'to_10000': (), }},
'rate': {'name': '年利率', 'value': 'noNone', 'extend': {'xpath': ('text_content()', '/html/body/div[5]/div[1]/div[2]/span[2]/font/b'), }},
'daystr': {'name': '借款期限', 'value': 'noNone', 'extend': {'xpath': ('text_content()', '/html/body/div[5]/div[1]/div[2]/span[3]'),
'substitute': (r'期限(.*)', 1)}},
'username': {'name': '作者', 'value': 'noNone', 'extend': {'xpath': ('text_content()', '/html/body/div[5]/div[1]/div[2]/span[4]'), }},
'senddate': {'name': '发标时间', 'value': 'noNone', 'extend': {'xpath': ('text_content()', '/html/body/div[5]/div[1]/div[3]/div[6]'),
'substitute': (r'上线时间:(.*)', 1)}},
'repayment_type': {'name': '还款方式', 'value': 'noNone', 'extend': {'xpath': ('text_content()', '/html/body/div[5]/div[1]/div[3]/div[2]'),
'substitute': (r'还款方式:(.*)', 1),
'replace': ('pay_method',)}},
'posttype': {'name': '回复类型', 'value': 1, 'extend': {}},
'postdata': {'name': '回复内容', 'value': 'noNone', 'extend': {'loop_match': (r'''<tr class="list_record tab1" >(?:.|\n)*?<td width="35%" style="text-align:left;">(.*?)</td>(?:.|\n)*?<td width="30%" style="">(.*?)</td>(?:.|\n)*?<td width="35%" style="text-align:right;">(.*?)</td>(?:.|\n)*?</tr>''', '{{username={t[1]}|rate=-1|postmoney={t[2]}|money={t[2]}|postdate={t[0]}|status=全部通过}}'), }},
'lastpostdate': {'name': '时间', 'value': 'time_first', 'extend': {'time_first': ()}},
'siteid': {'name': '网站编号', 'value': '5730', 'extend': {'time_first': ()}},
'borrowid': {'name': '编号', 'value': 'website_match', 'extend': {'website_match': (r'https://www.nmgdai.com/Invest/investDetail/borrow_id/(\w+)', '内蒙贷')}},
}},
'page_2': {},
}
spyder.spider()
print(spyder.variable['base']['db_file'])
publish34(spyder.variable['base']['db_file'])
#input('请按回车键结束脚本: ')
| [
"289891598@qq.com"
] | 289891598@qq.com |
9d9c75dc71a08948292a19969a209d9e9e35aaba | 04f194dfd80367756cc3971b57b48065b2edbfb3 | /topics/number_line.py | 37386cf3172b78370d0618372e8485ba120c6294 | [] | no_license | arthurtcl/manim | de2bfcf495981fb332e036b63e7c074e0db50624 | ad05030641483b7f99b382cf6492bebcd4aa6d18 | refs/heads/master | 2021-01-17T11:44:48.968626 | 2017-03-04T01:34:05 | 2017-03-04T01:34:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,977 | py | from helpers import *
from mobject import Mobject1D
from mobject.vectorized_mobject import VMobject, VGroup
from mobject.tex_mobject import TexMobject
from topics.geometry import Line, Arrow
from scene import Scene
class NumberLine(VMobject):
CONFIG = {
"color" : BLUE,
"x_min" : -SPACE_WIDTH,
"x_max" : SPACE_WIDTH,
"space_unit_to_num" : 1,
"tick_size" : 0.1,
"tick_frequency" : 1,
"leftmost_tick" : None, #Defaults to ceil(x_min)
"numbers_with_elongated_ticks" : [0],
"numbers_to_show" : None,
"longer_tick_multiple" : 2,
"number_at_center" : 0,
"propogate_style_to_family" : True
}
def __init__(self, **kwargs):
digest_config(self, kwargs)
if self.leftmost_tick is None:
self.leftmost_tick = np.ceil(self.x_min)
VMobject.__init__(self, **kwargs)
def generate_points(self):
self.main_line = Line(self.x_min*RIGHT, self.x_max*RIGHT)
self.tick_marks = VMobject()
self.add(self.main_line, self.tick_marks)
for x in self.get_tick_numbers():
self.add_tick(x, self.tick_size)
for x in self.numbers_with_elongated_ticks:
self.add_tick(x, self.longer_tick_multiple*self.tick_size)
self.stretch(self.space_unit_to_num, 0)
self.shift(-self.number_to_point(self.number_at_center))
def add_tick(self, x, size):
self.tick_marks.add(Line(
x*RIGHT+size*DOWN,
x*RIGHT+size*UP,
))
return self
def get_tick_marks(self):
return self.tick_marks
def get_tick_numbers(self):
return np.arange(
self.leftmost_tick, self.x_max + self.tick_frequency,
self.tick_frequency
)
def number_to_point(self, number):
alpha = float(number-self.x_min)/(self.x_max - self.x_min)
return interpolate(
self.main_line.get_start(),
self.main_line.get_end(),
alpha
)
def point_to_number(self, point):
left_point, right_point = self.main_line.get_start_and_end()
full_vect = right_point-left_point
def distance_from_left(p):
return np.dot(p-left_point, full_vect)/np.linalg.norm(full_vect)
return interpolate(
self.x_min, self.x_max,
distance_from_left(point)/distance_from_left(right_point)
)
def default_numbers_to_display(self):
if self.numbers_to_show is not None:
return self.numbers_to_show
return np.arange(self.leftmost_tick, self.x_max, 1)
def get_vertical_number_offset(self, direction = DOWN):
return 4*direction*self.tick_size
def get_number_mobjects(self, *numbers, **kwargs):
#TODO, handle decimals
if len(numbers) == 0:
numbers = self.default_numbers_to_display()
result = VGroup()
for number in numbers:
mob = TexMobject(str(int(number)))
mob.scale_to_fit_height(3*self.tick_size)
mob.shift(
self.number_to_point(number),
self.get_vertical_number_offset(**kwargs)
)
result.add(mob)
return result
def add_numbers(self, *numbers, **kwargs):
self.numbers = self.get_number_mobjects(
*numbers, **kwargs
)
self.add(*self.numbers)
return self
class UnitInterval(NumberLine):
CONFIG = {
"x_min" : 0,
"x_max" : 1,
"space_unit_to_num" : 6,
"tick_frequency" : 0.1,
"numbers_with_elongated_ticks" : [0, 1],
"number_at_center" : 0.5,
}
class Axes(VGroup):
CONFIG = {
"propogate_style_to_family" : True
}
def __init__(self, **kwargs):
VGroup.__init__(self)
self.x_axis = NumberLine(**kwargs)
self.y_axis = NumberLine(**kwargs).rotate(np.pi/2)
self.add(self.x_axis, self.y_axis)
class NumberPlane(VMobject):
CONFIG = {
"color" : BLUE_D,
"secondary_color" : BLUE_E,
"axes_color" : WHITE,
"secondary_stroke_width" : 1,
"x_radius": SPACE_WIDTH,
"y_radius": SPACE_HEIGHT,
"space_unit_to_x_unit" : 1,
"space_unit_to_y_unit" : 1,
"x_line_frequency" : 1,
"y_line_frequency" : 1,
"secondary_line_ratio" : 1,
"written_coordinate_height" : 0.2,
"written_coordinate_nudge" : 0.1*(DOWN+RIGHT),
"num_pair_at_center" : (0, 0),
"propogate_style_to_family" : False,
}
def generate_points(self):
self.axes = VMobject()
self.main_lines = VMobject()
self.secondary_lines = VMobject()
tuples = [
(
self.x_radius,
self.x_line_frequency,
self.y_radius*DOWN,
self.y_radius*UP,
RIGHT
),
(
self.y_radius,
self.y_line_frequency,
self.x_radius*LEFT,
self.x_radius*RIGHT,
UP,
),
]
for radius, freq, start, end, unit in tuples:
main_range = np.arange(0, radius, freq)
step = freq/float(freq + self.secondary_line_ratio)
for v in np.arange(0, radius, step):
line1 = Line(start+v*unit, end+v*unit)
line2 = Line(start-v*unit, end-v*unit)
if v == 0:
self.axes.add(line1)
elif v in main_range:
self.main_lines.add(line1, line2)
else:
self.secondary_lines.add(line1, line2)
self.add(self.secondary_lines, self.main_lines, self.axes)
self.stretch(self.space_unit_to_x_unit, 0)
self.stretch(self.space_unit_to_y_unit, 1)
#Put x_axis before y_axis
y_axis, x_axis = self.axes.split()
self.axes = VMobject(x_axis, y_axis)
def init_colors(self):
VMobject.init_colors(self)
self.axes.set_stroke(self.axes_color, self.stroke_width)
self.main_lines.set_stroke(self.color, self.stroke_width)
self.secondary_lines.set_stroke(
self.secondary_color, self.secondary_stroke_width
)
return self
def get_center_point(self):
return self.num_pair_to_point(self.num_pair_at_center)
def num_pair_to_point(self, pair):
pair = np.array(pair) + self.num_pair_at_center
result = self.axes.get_center()
result[0] += pair[0]*self.space_unit_to_x_unit
result[1] += pair[1]*self.space_unit_to_y_unit
return result
def point_to_num_pair(self, point):
new_point = point-self.get_center()
center_x, center_y = self.num_pair_at_center
x = center_x + point[0]/self.space_unit_to_x_unit
y = center_y + point[1]/self.space_unit_to_y_unit
return x, y
def get_coordinate_labels(self, x_vals = None, y_vals = None):
result = []
if x_vals == None and y_vals == None:
x_vals = range(-int(self.x_radius), int(self.x_radius))
y_vals = range(-int(self.y_radius), int(self.y_radius))
for index, vals in enumerate([x_vals, y_vals]):
num_pair = [0, 0]
for val in vals:
num_pair[index] = val
point = self.num_pair_to_point(num_pair)
num = TexMobject(str(val))
num.scale_to_fit_height(
self.written_coordinate_height
)
num.shift(
point-num.get_corner(UP+LEFT),
self.written_coordinate_nudge
)
result.append(num)
return result
def get_axes(self):
return self.axes
def get_axis_labels(self, x_label = "x", y_label = "y"):
x_axis, y_axis = self.get_axes().split()
x_label_mob = TexMobject(x_label)
y_label_mob = TexMobject(y_label)
x_label_mob.next_to(x_axis, DOWN)
x_label_mob.to_edge(RIGHT)
y_label_mob.next_to(y_axis, RIGHT)
y_label_mob.to_edge(UP)
return VMobject(x_label_mob, y_label_mob)
def add_coordinates(self, x_vals = None, y_vals = None):
self.add(*self.get_coordinate_labels(x_vals, y_vals))
return self
def get_vector(self, coords, **kwargs):
point = coords[0]*RIGHT + coords[1]*UP
arrow = Arrow(ORIGIN, coords, **kwargs)
return arrow
def prepare_for_nonlinear_transform(self, num_inserted_anchor_points = 50):
for mob in self.family_members_with_points():
num_anchors = mob.get_num_anchor_points()
if num_inserted_anchor_points > num_anchors:
mob.insert_n_anchor_points(num_inserted_anchor_points-num_anchors)
mob.make_smooth()
return self
| [
"grantsanderson7@gmail.com"
] | grantsanderson7@gmail.com |
07d6dbce7c8ee75f85135f932a4b99236ba69e9e | 52d6cacfc6df5d0f5ed6724bd92ff0239dea682e | /manage.py | 6f8028621eaa81b9b5e8bb643876da1cf59929dc | [] | no_license | scyilsamrat/GST-Printing-Billing-webapp | eff26f06169853a48427657d89321b8e691c37cd | 8915c41d48996919523edc62da7c9bf1461682dc | refs/heads/master | 2022-04-27T22:25:32.167991 | 2020-04-28T18:17:24 | 2020-04-28T18:17:24 | 257,631,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 806 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
print("Please Wait Scyil pvt Ltd server is starting on /Dettol Software /user 09253 ")
print("Please Wait Scyil pvt Ltd server is starting on /Dettol Software /user 09255 ")
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dettol.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main() | [
"samratsahil180@gmail.com"
] | samratsahil180@gmail.com |
6755be04d36dd75036f3b787a0ebebb8b8d709d7 | e41d8daac285e37551e17778fa1d31698d707311 | /Project4/etl.py | 8eb9b3a90698232a2aed3aa054f58558f6190cdd | [] | no_license | Johannes-Handloser/Data-Engineer-Nanodegree | d0224011b28ce1fdd9d9cc9b037032216cffc39a | 5da2123c5b0eff436ff570281be6ed3161d95c2a | refs/heads/master | 2022-11-26T19:47:31.795613 | 2020-08-08T10:34:53 | 2020-08-08T10:34:53 | 271,854,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,904 | py | import configparser
from datetime import datetime
import os
from pyspark.sql import SparkSession
from pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, dayofweek, date_format, monotonically_increasing_id, udf, col, to_date
from pyspark.sql.types import TimestampType
config = configparser.ConfigParser()
config.read('dl.cfg')
os.environ['AWS_ACCESS_KEY_ID'] = config['AWS']['AWS_ACCESS_KEY_ID']
os.environ['AWS_SECRET_ACCESS_KEY'] = config['AWS']['AWS_SECRET_ACCESS_KEY']
def create_spark_session():
"""
Create a Spark Session
"""
spark = SparkSession \
.builder \
.config("spark.jars.packages", "org.apache.hadoop:hadoop-aws:2.7.0") \
.getOrCreate()
return spark
def process_song_data(spark, input_data, output_data):
"""
Function to transform song_data on S3 into song and artist tables
"""
# get filepath to song data file
song_data = input_data + "song_data/*/*/*/*.json"
# read song data file
df = spark.read.json(song_data)
# extract columns to create songs table
songs_table = df.select("song_id", "title", "artist_id", "year", "duration").dropDuplicates()
songs_table.createOrReplaceTempView("songs_table")
# write songs table to parquet files partitioned by year and artist
songs_table.write.mode("overwrite").partitionBy("year", "artist_id").parquet(output_data+"songs_table/songs.parquet")
# extract columns to create artists table
artists_table = df.select("artist_id", "artist_latitude", "artist_location", "artist_longitude", "artist_name").withColumnRenamed("artist_name", "name") \
.withColumnRenamed("artist_latitude", "latitude") \
.withColumnRenamed("artist_longitude", "longitude") \
.withColumnRenamed("artist_location", "location") \
.dropDuplicates()
artists_table.createOrReplaceTempView("artists_table")
# write artists table to parquet files
artists_table.write.mode("overwrite").parquet(output_data + "artists_table/artists.parquet")
def process_log_data(spark, input_data, output_data):
"""
Load data from log_data dataset and extract columns for user and time tables.
Data is written to parquet files and stored on S3
"""
# get filepath to log data file
log_data = input_data + "log_data/*/*/*.json"
# read log data file
df = spark.read.json(log_data)
# filter by actions for song plays
df = df.filter(df.page == "NextSong")
# extract columns for users table
users_table = df.select("userId", "firstName", "lastName", "gender", "level").dropDuplicates()
# write users table to parquet files
users_table.write.mode("overwrite").parquet(output_data + "users_table/users.parquet")
# create timestamp column from original timestamp column
get_timestamp = udf(lambda x: datetime.fromtimestamp(x / 1000), TimestampType())
df = df.withColumn("start_time", get_timestamp(df.ts))
# extract columns to create time table
time_table = df.select("start_time") \
.withColumn("hour", hour("start_time")) \
.withColumn("day", dayofmonth("start_time")) \
.withColumn("week", weekofyear("start_time")) \
.withColumn("month", month("start_time")) \
.withColumn("year", year("start_time")) \
.withColumn("weekday", dayofweek("start_time")) \
.dropDuplicates()
# write time table to parquet files partitioned by year and month
time_table.write.mode("overwrite").partitionBy("year", "month").parquet(output_data+"time_table/time.parquet")
# read in song data to use for songplays table
song_df = spark.read.json(input_data + "song_data/*/*/*/*.json")
# extract columns from joined song and log datasets to create songplays table
joined_df = df.join(song_df, song_df.artist_name == df.artist, "inner")
songplays_table = joined_df.select(
col("start_time"),
col("userId").alias("user_id"),
col("level"),
col("song_id"),
col("artist_id"),
col("sessionId").alias("session_id"),
col("location"),
year("start_time").alias("year"),
month("start_time").alias("month"),
col("userAgent").alias("user_agent"))\
.withColumn("songplay_id", monotonically_increasing_id())
# write songplays table to parquet files partitioned by year and month
songplays_table.write.mode("overwrite").partitionBy("year", "month").parquet(output_data+"songplays/songplays.parquet")
def main():
spark = create_spark_session()
input_data = "s3a://udacity-dend/"
output_data = "s3a://udacity-jh-dend/"
process_song_data(spark, input_data, output_data)
process_log_data(spark, input_data, output_data)
if __name__ == "__main__":
main()
| [
"johannes.handloser@bmw.de"
] | johannes.handloser@bmw.de |
7acc3c35279f863cec0ecbea75665131958ceafb | 6007b761a47ca3f437f8bb038714fccb6d3592db | /api/migrations/0003_alter_estudiante_id.py | 049b165d2feeefab8d1c100865396b48f0e56aa4 | [] | no_license | Mauville/DjangoBoilerplate | 165c3ef0386b33ce47b9249a4d8df0059c486e31 | 89ba241dffd6f8255b7c7806e215de45d1382958 | refs/heads/main | 2023-05-15T03:19:56.217191 | 2021-05-31T03:41:14 | 2021-05-31T03:41:14 | 368,931,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | # Generated by Django 3.2 on 2021-04-30 02:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0002_alter_estudiante_id'),
]
operations = [
migrations.AlterField(
model_name='estudiante',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| [
"daniel@semantyk.com"
] | daniel@semantyk.com |
32cda06d0f7da3b981aa0d0550102cd54a804c9c | 633c3b9827501b8c6512f0cca6c5534506c2b1e8 | /main.py | cb4782f9abf5a13b19ccd200b06ec8d8d44fcb6f | [] | no_license | roliver7878/project2 | 21d56947c920a1556b337b95e08d7f92d8c2b25d | f48d8225225caf00b7c4bd630c91b63d122a4347 | refs/heads/master | 2022-12-24T15:08:05.518984 | 2020-10-05T00:10:43 | 2020-10-05T00:10:43 | 300,168,246 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 676 | py | import addressfind
import addresstotext
import converttoaudio
# This is my main method
def main():
# here we ask for postalCode from user
cepcodevalue = input('Write here your postal code: ')
# the function above send the postalcode writed by User
# addressfind use a free api for serach the address by postalcode
completeaddress = addressfind.find(cepcodevalue)
# function above convert ( parse ) the Json Address to text
# this text returned will used to converted in audio
text = addresstotext.totext(completeaddress)
# at last, the text is converted in audio
converttoaudio.convert(text)
if __name__ == "__main__":
main()
| [
"ubuntu@ip-172-31-4-18.ec2.internal"
] | ubuntu@ip-172-31-4-18.ec2.internal |
82d322d9d2a24a4f17977671c69823b4c05dcae3 | 523f8f5febbbfeb6d42183f2bbeebc36f98eadb5 | /207_3.py | a0c0add9e9275d90f0930004027fe8138ec29417 | [] | no_license | saleed/LeetCode | 655f82fdfcc3000400f49388e97fc0560f356af0 | 48b43999fb7e2ed82d922e1f64ac76f8fabe4baa | refs/heads/master | 2022-06-15T21:54:56.223204 | 2022-05-09T14:05:50 | 2022-05-09T14:05:50 | 209,430,056 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,013 | py | class Solution(object):
def canFinish(self, numCourses, prerequisites):
"""
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: bool
"""
graph={}
indegree=[0]*numCourses
for i in range(numCourses):
graph[i]=[]
for pair in prerequisites:
graph[pair[1]].append(pair[0])
indegree[pair[0]]+=1
res=[]
while True:
flag=0
for node in range(len(indegree)):
if indegree[node]==0:
indegree[node]=float("inf")
res.append(node)
for n in graph[node]:
indegree[n]-=1
del graph[node]
flag=1
break
if flag==0:
break
return len(res)==numCourses
a=Solution()
presp=[[1,0]]
num=2
print(a.canFinish(num,presp))
nums=2
psp=[[1,0],[0,1]]
print(a.canFinish(num,psp)) | [
"1533441387@qq.com"
] | 1533441387@qq.com |
550337ec8fec2fdd14b71387c7975aba6339e251 | c984113e37ac5ca4e56a89bc6ec4806e0a899d2a | /old/temperatura_precipitacion_acidez_example.py | 0dca9e8c3a78a84be828cdde567f014fcfb93d4c | [] | no_license | josprimen/TFG | ba6066cbc8a04bba175cfe72e647826cebe43028 | 0e5c60189ed2d3d6d01b65825f806f227c598969 | refs/heads/master | 2020-04-23T21:32:48.166183 | 2020-02-08T14:34:08 | 2020-02-08T14:34:08 | 171,473,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,216 | py | import numpy as np
import math
from pandas import read_csv
from datetime import datetime
from math import sqrt
from numpy import concatenate
from matplotlib import pyplot
from pandas import DataFrame
from pandas import concat
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
datos_clima_2015 = read_csv('files/datos_clima_abril_mayo_2015.csv', usecols=[1,2], engine='python')
datos_clima_2015 = datos_clima_2015.values
datos_clima_2015df = DataFrame(datos_clima_2015)
datos_clima_2015c1 = datos_clima_2015df[0].values
datos_clima_2015c2 = datos_clima_2015df[1].values
datos_clima_2016 = read_csv('files/datos_clima_abril_mayo_2016.csv', usecols=[1,2], engine='python')
datos_clima_2016 = datos_clima_2016.values
datos_clima_2016df = DataFrame(datos_clima_2016)
datos_clima_2016c1 = datos_clima_2016df[0].values
datos_clima_2016c2 = datos_clima_2016df[1].values
datos_clima_2017 = read_csv('files/datos_clima_abril_mayo_2017.csv', usecols=[1,2], engine='python')
datos_clima_2017 = datos_clima_2017.values
datos_clima_2017df = DataFrame(datos_clima_2017)
datos_clima_2017c1 = datos_clima_2017df[0].values
datos_clima_2017c2 = datos_clima_2017df[1].values
datos_clima_2018 = read_csv('files/datos_clima_abril_mayo_2018.csv', usecols=[1,2], engine='python')
datos_clima_2018 = datos_clima_2018.values
datos_clima_2018df = DataFrame(datos_clima_2018)
datos_clima_2018c1 = datos_clima_2018df[0].values
datos_clima_2018c2 = datos_clima_2018df[1].values
datos_precipitacion = np.concatenate((datos_clima_2015c1, datos_clima_2016c1, datos_clima_2017c1, datos_clima_2018c1))
datos_precipitacion = DataFrame(datos_precipitacion)
datos_precipitacion = datos_precipitacion.values
datos_temperatura = np.concatenate((datos_clima_2015c2, datos_clima_2016c2, datos_clima_2017c2, datos_clima_2018c2))
datos_temperatura = DataFrame(datos_temperatura)
datos_temperatura = datos_temperatura.values
datos_acidez_2015 = read_csv('files/media_acidez_dias_2015.csv', usecols=[1], engine='python')
datos_acidez_2015 = datos_acidez_2015.values[0:61]
''' Salen los datos de manera distinta por eso aqui arriba pasamos a dataframe y de nuevo a values
datos_acidez_2015 = read_csv('files/media_acidez_dias_2015.csv', usecols=[1], engine='python')
datos_acidez_2015 = datos_acidez_2015.values
datos_acidez_2015df = DataFrame(datos_acidez_2015)
datos_acidez_2015 = datos_acidez_2015df[0].values
Con esto tenemos un array normal [] y con lo que hay tenemos un [[]]
'''
datos_acidez_2016 = read_csv('files/media_acidez_dias_2016.csv', usecols=[1], engine='python')
datos_acidez_2016 = datos_acidez_2016.values[0:61]
datos_acidez_2017 = read_csv('files/media_acidez_dias_2017.csv', usecols=[1], engine='python')
datos_acidez_2017 = datos_acidez_2017.values[0:61]
datos_acidez_2018 = read_csv('files/media_acidez_dias_2018.csv', usecols=[1], engine='python')
datos_acidez_2018 = datos_acidez_2018.values[0:61]
datos_acidez = np.concatenate((datos_acidez_2015, datos_acidez_2016, datos_acidez_2017, datos_acidez_2018))
groups = [datos_temperatura, datos_precipitacion, datos_acidez]
aux = 1
pyplot.figure()
for group in groups:
pyplot.subplot(3, 1, aux)
pyplot.plot(group)
if group[0]==datos_temperatura[0]:
pyplot.title('Temperatura abril-mayo')
if group[0] == datos_precipitacion[0]:
pyplot.title('Precipitacion abril-mayo')
else:
pyplot.title('acidez')
aux = aux+1
pyplot.show()
conjunto = concatenate((datos_acidez, datos_temperatura, datos_precipitacion), axis=1)
scaler = MinMaxScaler(feature_range=(0, 1))
conjunto_normalizado = scaler.fit_transform(conjunto)
def datosX (conjunto):
df = DataFrame(conjunto)
aux = []
for i in range (len(conjunto)-1):
a = [[df[0][i], df[1][i], df[2][i]]]
aux.append(a)
return np.array(aux)
def datosY (conjunto):
df = DataFrame(conjunto)
aux = []
for i in range (len(conjunto)-1):
aux.append(df[0][i+1])
return np.array(aux)
tamaño_entrenamiento = int(len(conjunto_normalizado) * 0.75)
tamaño_test = len(conjunto_normalizado) - tamaño_entrenamiento
entrenamiento = conjunto_normalizado[0:tamaño_entrenamiento]
test = conjunto_normalizado[tamaño_entrenamiento:len(conjunto_normalizado)]
entrenamientoX, entrenamientoY = datosX(entrenamiento), datosY(entrenamiento)
testX, testY = datosX(test), datosY(test)
print('DATOS entrenamientoX')
print(entrenamientoX)
print('DATOS entrenamientoY')
print(entrenamientoY)
print('DATOS testX')
print(testX)
print('DATOS testY')
print(testY)
"""
################################################################
da1 = read_csv('files/datos_aceituna_tratados_2015_2016.csv', usecols=[1], engine='python')
da2 = read_csv('files/datos_aceituna_tratados_2016_2017.csv', usecols=[1], engine='python')
da3 = read_csv('files/datos_aceituna_tratados_2017_2018.csv', usecols=[1], engine='python')
da4 = read_csv('files/datos_aceituna_tratados_2018_2019.csv', usecols=[1], engine='python')
da1 = da1.values
da2 = da2.values
da3 = da3.values
da4 = da4.values
da_prueba = np.concatenate((da1,da2,da3,da4))
da_prueba_df = DataFrame(da_prueba)
da_prueba_df.to_csv('files/datos_aceituna_tratados.csv')
################################################################
"""
model = Sequential()
model.add(LSTM(50, input_shape=(1, 3)))
model.add(Dense(1))
model.compile(loss='mae', optimizer='adam')
# fit network
history = model.fit(entrenamientoX, entrenamientoY, epochs=25, validation_data=(testX, testY), verbose=2)
# plot history
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='test')
pyplot.legend()
pyplot.show()
#Hacer las predicciones sobre la acidez
prediccion_test = model.predict(testX)
print('DATO prediccion_test')
print(prediccion_test)
#Invertir el normalizado para tener los datos en la escala original
#1 Hacer el array del mismo tamaño que el de la salida para poder concatenar
testX = testX.reshape((testX.shape[0], testX.shape[2]))
testY = testY.reshape((len(testY), 1))
#2 Concatenar
concatenado_test_real = concatenate((testY, testX[:, 1:]), axis=1)
concatenado_test_prediccion = concatenate((prediccion_test, testX[:, 1:]), axis=1)
#3 Invertir el normalizado
inversion_test_real = scaler.inverse_transform(concatenado_test_real)
inversion_test_prediccion = scaler.inverse_transform(concatenado_test_prediccion)
#4 Obtener las predicciones invertidas
datos_real_test = inversion_test_real[:, 0]
print('Datos acidez test')
print(datos_real_test)
datos_prediccion_test = inversion_test_prediccion[:, 0]
print('datos prediccion test')
print(datos_prediccion_test)
#Calcular el error cuadrático medio
testScore = sqrt(mean_squared_error(datos_real_test, datos_prediccion_test))
print('Test Score: %.2f RMSE' % (testScore))
#Comparamos graficamente lo real y lo predicho
results = [[datos_real_test, datos_prediccion_test]]
aux = 1
pyplot.figure()
for result in results:
pyplot.subplot(2, 1, aux)
pyplot.plot(result[0])
pyplot.plot(result[1])
aux = aux+1
pyplot.show() | [
"36623141+josprimen@users.noreply.github.com"
] | 36623141+josprimen@users.noreply.github.com |
0901daeeca261a63c8a9570fd5b8251a328976fe | f5b48cba3562e5efd32f1379fba69fe27c2c54cb | /ex014.py | 66a414ae70e9a2f803d2753527c59e9f29815aae | [] | no_license | GGMagenta/exerciciosMundo123 | e9596eee260dc7876efad1684ef88b36340b3a4d | 3764b706bd5b7d24e270a6c0ea006f2fbf8ebcbb | refs/heads/main | 2023-01-30T07:43:21.002259 | 2020-12-15T15:59:39 | 2020-12-15T15:59:39 | 316,284,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | #pegue uma temperatura em °C e converta em °F
ce = float(input('temperatura em °C '))
fa = (ce * 9/5)+ 32
print('{}°C equivale a {}°F'.format(ce, fa))
| [
"noreply@github.com"
] | noreply@github.com |
6fbd126342d2762103a2aff7486d0ce1305afb29 | 28297b7172bad2e427db185d449056340be2a429 | /src/join_pairs.py | 3bca92e18b11ac0a0c113c6e7492e6e049cf7c5b | [] | no_license | audy/cd-hit-that | 6a3480c01c7930751325acbd716202ad514562da | 27922835ebace8bcdcf8d7118ec2e05e11e5e9fa | refs/heads/master | 2021-01-01T15:31:01.604403 | 2011-08-02T20:33:47 | 2011-08-02T20:33:47 | 1,357,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,420 | py | #!/usr/bin/env python
# outputs a FASTQ file but with its filename in the header (sorta)
# Also puts paired reads together with their 5' ends touching
# This is for clustering
# Takes input from STDIN
import sys
import os
from itertools import cycle
import string
_complement = string.maketrans('GATCRYgatcry','CTAGYRctagyr')
c = cycle([0, 1])
seq = { 0: '', 1: ''}
i = 0
infile = sys.argv[1]
minimum_read_length = int(sys.argv[2])
f_num = int(infile.split('_')[-1].split('.')[0])
kept, skipped = 0, 0
with open(infile) as handle:
for line in handle:
if line.startswith('>'):
n = c.next()
i += 1
if n == 1:
header = '>%s:%s' % (f_num, hex(i)[2:])
else:
seq[n] += line.strip()
if n == 1:
# Reverse-complement 3' pair
seq[1] = seq[1].translate(_complement)[::-1]
# Make sure reads are minimum length
if (len(seq[0]) >= minimum_read_length) \
and (len(seq[1]) >= minimum_read_length):
print header
print '%s%s' % (seq[1], seq[0])
kept +=1
else:
skipped +=1
seq = { 0: '', 1: ''}
print >> sys.stderr, "kept: %.2f percent of pairs (%s : %s)" % (float(kept)/(skipped + kept), skipped, kept) | [
"harekrishna@gmail.com"
] | harekrishna@gmail.com |
337d900284082e21087ff98ddb9d2bb64e6b8248 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_banked.py | 08d044b1996c25d485b9094eb3beb1112231d788 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py |
#calss header
class _BANKED():
def __init__(self,):
self.name = "BANKED"
self.definitions = bank
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['bank']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
088cc5b497205e602ec432a2cd905342facda67c | afca7ccc222dc73aefed14907b798a68b0e1b03a | /dsutils/plot/_path_utils.py | 7165db91f6e0b52ec4d9e239b78da6d6f8f7879d | [] | no_license | kazetof/dsci-utils | c195476753ed2f8b266c3500f14d30d5596ae2da | 2f158ac130e816d89f0d3684f5af38598e82d057 | refs/heads/master | 2020-03-27T17:50:15.146217 | 2018-09-01T14:47:30 | 2018-09-01T14:47:30 | 146,878,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | import os
def _make_savedir_from_savename(savename: str) -> None:
savedir = os.path.dirname(savename)
os.makedirs(savedir, exist_ok=True)
if __name__ == '__main__':
pass | [
"fukasawakaze@gmail.com"
] | fukasawakaze@gmail.com |
5f7d5b6011e270fd215d3af403eabbe953cbe368 | a9aaa9befdee0aedbffd7e86cced93a3f98ad90a | /blog/migrations/0004_comment.py | 2716048249bbe17d0cdca54060b43df26b43a747 | [] | no_license | mksime/my-first-blog | bcb22367fd6576bacb6f1031e938de59d130dc87 | 80fe29d9584c827e0262ca1ae38c8c4f9e0dd867 | refs/heads/master | 2020-03-17T22:49:21.431028 | 2018-06-13T13:57:20 | 2018-06-13T13:57:20 | 134,019,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 983 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-06-02 02:34
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('blog', '0003_auto_20180525_1431'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('approved_comment', models.BooleanField(default=False)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='blog.Post')),
],
),
]
| [
"marcelo.sime@gmail.com"
] | marcelo.sime@gmail.com |
3ff10af42ce6a02892caa8712ebe2280e07e04d3 | 0d64b67478dd8a3817cb411d5e7b674cbc267917 | /app/views/main.py | 139bca0a9eeb936f748db6a83446c0bed1e06d41 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Kbman99/Compute-Cross-Product | 9a3345c22b681be11296cce7b9f09733a6e193b6 | b19f63829d12809acbb42a3041aaa44d6e35bea8 | refs/heads/master | 2020-03-26T19:09:14.892769 | 2018-12-30T21:49:59 | 2018-12-30T21:49:59 | 145,251,503 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,646 | py | from flask import render_template, Blueprint, jsonify, request
from flask_cors import CORS
from app.models import ResultList
from app.core import db
from app.schemas import ValidationSchema, ResultListSchema, AllResultsListSchema
from app.toolbox.results import AllResults
api = Blueprint('api', __name__, static_folder='../static', template_folder='../templates')
CORS(api)
@api.route('/', defaults={'path': ''})
@api.route('/<path:path>')
def catch_all(path):
return render_template('error.html', message='404 not found'), 404
@api.route('/calculate', methods=['POST', 'GET'])
def calculate():
if request.method == 'POST':
req_data = request.get_json()
data, errors_validation = ValidationSchema().load(req_data)
if errors_validation:
return jsonify(errors_validation), 400
result = ResultList(req_data['vector1'], req_data['vector2'])
db.session.add(result)
try:
db.session.commit()
except Exception as e:
# this shouldn't happen
return jsonify({'error': e}), 500
final_result, errors_results = ResultListSchema().dump(result)
if errors_results:
jsonify(errors_results), 400
return jsonify(final_result)
else:
results = ResultList.query.all()
results_obj = AllResults(results)
all_results, errors_all_results = AllResultsListSchema().dump(results_obj)
if errors_all_results:
jsonify(errors_all_results), 400
return jsonify(all_results)
@api.route('/health', methods=['GET'])
def health():
return jsonify({'message': 'okay'})
| [
"Kylebowman99@gmail.com"
] | Kylebowman99@gmail.com |
0b86b5dcd14fc780f3a0c39b0fbadb7e2b44011c | 61f3996cc11b6103f248cc0e38ecae7d8b847bbd | /python-libs/behavior.py | e02f1416ffe7b0729165f45b0a3f4b835ded697f | [
"MIT"
] | permissive | massimo-nocentini/competitive-programming | c289625cfc25b4a5b82fc05b1de9aefaa0413b9f | fc54589c765141f0c51c0c5feb750ed72333f42e | refs/heads/master | 2021-01-19T02:02:06.402717 | 2018-06-28T14:14:22 | 2018-06-28T14:14:22 | 22,633,722 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 974 | py |
def dispatch(*args, table, default=lambda k, e: k):
'''
Dispatch behavior in *even* positions within `args` against mapping `table`.
It accepts a variable list of arguments, however of even length, where
*hashable* objects in *even* positions are used in the key for dispatching against
logic container `table`, namely a mapping of functions; in parallel, objects
in *odd* positions within `args` are used as values, respectively.
Keyword argument `default` is a function that consumes two arguments:
the former is the key not found in the dispatch `table`; the latter one
is the caught exception, if re-raising would be performed. Its default
behavior is to return the key as it is.
'''
key = tuple([args[e] for e in range(0, len(args), 2)])
values = [args[o] for o in range(1, len(args), 2)]
try:
method = table[key]
return method(*values)
except KeyError as e:
return default(key, e)
| [
"massimo.nocentini@gmail.com"
] | massimo.nocentini@gmail.com |
96f9a31921da3a30bc91014997b41594bbd7ab9d | fa24efc43ab0d8d0e7cf3d538e4733130b760f32 | /day_3/day_3_part_2_test.py | 2c78e3afe464e48dce0867c35afd9deb6110d61b | [] | no_license | nqeron/advent_of_code_2019 | 7d2176eafc1f0437a1863192a471d2fbbda735bc | 35f7b7f7faa87bc5cdd27cebc9eabec4fd6cd6a0 | refs/heads/master | 2020-09-30T05:38:37.349274 | 2019-12-27T18:03:44 | 2019-12-27T18:03:44 | 227,217,766 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,062 | py | def produce_points(directions) -> set:
points = set()
pos = (0, 0)
for d in directions:
bearing = d[0]
dist = int(d[1:])
x_bear = ()
y_bear = ()
if bearing == "L":
x_bear = range(pos[0]-dist, pos[0]+1)
y_bear = (pos[1] for _ in range(dist))
pos = (pos[0] - dist, pos[1])
elif bearing == "R":
x_bear = range(pos[0], pos[0] + dist + 1)
y_bear = (pos[1] for _ in range(dist))
pos = (pos[0] + dist, pos[1])
elif bearing == "U":
y_bear = range(pos[1], pos[1] + dist + 1)
x_bear = (pos[0] for _ in range(dist))
pos = (pos[0], pos[1] + dist)
elif bearing == "D":
y_bear = range(pos[1] - dist, pos[1] + 1)
x_bear = (pos[0] for _ in range(dist))
pos = (pos[0], pos[1] - dist)
for x, y in zip(x_bear, y_bear):
points.add((x, y))
return points
def gen_points(directions):
pos = (0, 0)
for d in directions:
bearing = d[0]
dist = int(d[1:])
x_bear = ()
y_bear = ()
if bearing == "L":
x_bear = range(pos[0], pos[0] - dist - 1, -1)
y_bear = (pos[1] for _ in range(dist))
pos = (pos[0] - dist, pos[1])
elif bearing == "R":
x_bear = range(pos[0], pos[0] + dist + 1)
y_bear = (pos[1] for _ in range(dist))
pos = (pos[0] + dist, pos[1])
elif bearing == "U":
y_bear = range(pos[1], pos[1] + dist + 1)
x_bear = (pos[0] for _ in range(dist))
pos = (pos[0], pos[1] + dist)
elif bearing == "D":
y_bear = range( pos[1], pos[1] - dist - 1, -1)
x_bear = (pos[0] for _ in range(dist))
pos = (pos[0], pos[1] - dist)
for x, y in zip(x_bear, y_bear):
yield (x, y)
yield pos
def m_dist(point_1: tuple, point_2: tuple) -> int:
return abs(point_1[0] - point_2[0]) + abs(point_1[1] - point_2[1])
def analyze(file):
with open(file) as f:
directions_1 = f.readline().split(",")
directions_2 = f.readline().split(",")
points_1 = produce_points("R75,D30,R83,U83,L12,D49,R71,U7,L72".split(","))
points_temp = gen_points("R75,D30,R83,U83,L12,D49,R71,U7,L72".split(","))
#print(set(points_temp))
#print(points_1)
#print(set(points_temp) - points_1)
points_temp = gen_points("R75,D30,R83,U83,L12,D49,R71,U7,L72".split(","))
#print([i for i in points_temp])
points_2 = produce_points("U62,R66,U55,R34,D71,R55,D58,R83".split(","))
points_2_temp = gen_points("U62,R66,U55,R34,D71,R55,D58,R83".split(","))
#print(points_2 - set(points_2_temp))
intersections = points_1.intersection(points_2)
int_temp = set(points_temp).intersection(set(points_2_temp))
print(intersections)
print(int_temp)
#print(min( (m_dist(intersection, (0, 0)) for intersection in intersections)))
if __name__ == '__main__':
analyze("../inputs/day_3.txt") | [
"nqeron@gmail.com"
] | nqeron@gmail.com |
54d72878eac09a4ed9b40f9ef8fdc315b10a7f4d | 99259216f11b15ec60446b4a141b3592a35560ce | /wex-python-api/test/test_json_node.py | c75004f2b70287a8c291d81ea8751f09dcf73ca6 | [] | no_license | adam725417/Walsin | 296ba868f0837077abff93e4f236c6ee50917c06 | 7fbefb9bb5064dabccf4a7e2bf49d2a43e0f66e9 | refs/heads/master | 2020-04-12T14:14:07.607675 | 2019-03-05T01:54:03 | 2019-03-05T01:54:03 | 162,546,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 977 | py | # coding: utf-8
"""
WEX REST APIs
Authentication methods - Basic Auth - JSON Web Token - [POST /api/v1/usermgmt/login](#!/User/signinUser) - [POST /api/v1/usermgmt/logout](#!/User/doLogout) - Python client sample [Download](/docs/wex-python-api.zip)
OpenAPI spec version: 12.0.2.417
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import ibmwex
from ibmwex.rest import ApiException
from ibmwex.models.json_node import JsonNode
class TestJsonNode(unittest.TestCase):
""" JsonNode unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testJsonNode(self):
"""
Test JsonNode
"""
# FIXME: construct object with mandatory attributes with example values
#model = ibmwex.models.json_node.JsonNode()
pass
if __name__ == '__main__':
unittest.main()
| [
"adamtp_chen@walsin.com"
] | adamtp_chen@walsin.com |
dd292841168cd88e929dbb617056733aa1f5364b | 4a934b1d646cc6660b1b38ac09fc0cb7b343445c | /Palta/generateTextCollection.py | 99af87240e93f99248a0a58f22847b860c8267b6 | [] | no_license | chobeat/mapredush | 85aece9d4b4e8ebfcba610b1f13ce95358dee9ef | c827e7ecd1076a30ab3cd5395d79bcd155ca2f6b | refs/heads/master | 2021-01-19T03:23:21.116562 | 2014-06-12T18:05:19 | 2014-06-12T18:05:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | from faker import *
from pymongo import *
client=MongoClient()
db=client.db
tc=db['textCollection3']
f=Faker()
l=[f.text(140) for i in range(1000)]
for i in range(len(l)):
tc.insert({"_id":i,"text":l[i]})
| [
"simone.robutti@gmail.com"
] | simone.robutti@gmail.com |
9eff05ce12331a82a2df274f4ab930c8296168b5 | 7ddffbc0b183e880e6f440bad44e49aceddf3b6a | /Alexa/alexaDomain/apps.py | 62f7c382fad2088b6b984876b3e61ce90bc0f217 | [] | no_license | mabraca/Alexa | bd15e0d044609b80abf43232e2c38b19b956cac3 | cc666b729f5e1feb39744b642a35115f4f7d905e | refs/heads/master | 2020-04-01T18:31:20.767238 | 2018-10-22T06:29:40 | 2018-10-22T06:29:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | from django.apps import AppConfig
class AlexadomainConfig(AppConfig):
name = 'alexaDomain'
| [
"mabraca18@gmail.com"
] | mabraca18@gmail.com |
6e887cf7d59d6082baba621dd168862f85c5d1b5 | d3bee7fead4c206116e855b91c7024222a01208d | /dpAPI/MPGW.py | 1da2436a6e6230ad7b95233f2e6ab46f1b716b07 | [] | no_license | itayb1/dp-python | d5cbe5bae44f411dc4ee2b86458b1e5c75aab029 | 2b481640619c2fd8e472e11f9f579d64f2fa7418 | refs/heads/master | 2020-05-15T16:52:15.975891 | 2020-05-04T13:19:24 | 2020-05-04T13:19:24 | 182,396,451 | 2 | 1 | null | 2020-04-27T12:32:49 | 2019-04-20T11:16:47 | Python | UTF-8 | Python | false | false | 2,192 | py | from .const import API_PATH, MPGW_request_body, policy_attachment_request_body
from .base import api_call
from copy import deepcopy
from .DPEndpoint import DPEndpoint
class MPGW(DPEndpoint):
def __init__(self, auth, base_url, domain):
DPEndpoint.__init__(self, auth=auth, base_url=base_url, domain=domain)
self.parent_key = "MultiProtocolGateway"
self.api_path = API_PATH["mpgw"]
def create(self, name, front_handlers, xml_manager, style_policy, state="enabled", **kwargs):
"""Creates a new ``Multi Protocol Gateway``
Parameters:
name (str): The name of the mpgw
front_handlers (list): A list of strings representing front handlers to be attached to the mpgw
xml_manager (str): The name of the xml manager to be attached to the mpgw
style_policy (str): The name of the style policy to be attached to the mpgw
state (str): The state of the mpgw (default is enabled)
Returns:
dict: a dict/json object of the new mpgw
"""
request_body = deepcopy(MPGW_request_body)
self.__create_mpgw_policy_attachment(name)
request_body[self.parent_key]["name"] = name
request_body[self.parent_key]["FrontProtocol"] = [ { "value": handler } for handler in front_handlers ]
request_body[self.parent_key]["mAdminState"] = state
request_body[self.parent_key]["XMLManager"] = { "value": xml_manager }
request_body[self.parent_key]["StylePolicy"] = { "value": style_policy }
request_body[self.parent_key]["PolicyAttachments"] = { "value": name }
self._append_kwargs(request_body, **kwargs)
response = api_call.post(self.base_url + (self.api_path).format(domain=self.domain), auth=self.auth, data=request_body)
return request_body[self.parent_key]
def __create_mpgw_policy_attachment(self, name):
request_body = deepcopy(policy_attachment_request_body)
request_body["PolicyAttachments"]["name"] = name
return api_call.post(self.base_url + (API_PATH["policy_attachments"]).format(domain=self.domain), auth=self.auth, data=request_body)
| [
"itay4445@gmail.com"
] | itay4445@gmail.com |
45789a6830e3cc51c628742349487ac491954b52 | df82b5aff1985cf89c8a4e08cd9befe28c946b25 | /06-Segmentation/Answers/segmentByClustering.py | f39c879499bd3f98741988d498f07c1d78241e23 | [] | no_license | steff456/IBIO4680 | ca2cee74277fe17321c2229d69cfe99cd317738a | 23125aa14d7213231ab9b1b6dcc5fddc0c7b5a1c | refs/heads/master | 2021-09-14T07:44:04.872265 | 2018-05-09T16:46:44 | 2018-05-09T16:46:44 | 119,931,948 | 0 | 0 | null | 2018-02-02T04:29:30 | 2018-02-02T04:29:30 | null | UTF-8 | Python | false | false | 6,250 | py | #!/usr/local/bin/python3
#main function performing segmentation
def segmentByClustering(rgbImage, featureSpace, clusteringMethod, numberOfClusters):
#importing necessary packages and libraries
import numpy as np
from skimage import io, color
import cv2
import matplotlib.pyplot as plt
import numpy.matlib
from sklearn.cluster import KMeans
import scipy.io as sio
from PIL import Image
import PIL
from sklearn import mixture
from sklearn import cluster
import time
from scipy import ndimage as ndi
from skimage.morphology import watershed
from skimage.feature import peak_local_max
import glob
import math
import scipy
#verify the image is in np array, if not, it is casted
rgbImage=np.array(rgbImage)
#color and space maximum normalization values. It will change the representativeness of each channel
colmax=5
spacemax=5
#Image in RGB
if featureSpace == 'rgb':
image=rgbImage
image=cv2.normalize(image,np.zeros((image.shape),dtype=np.uint8),alpha=0, beta=colmax,norm_type=cv2.NORM_MINMAX,dtype=cv2.CV_32F)
#convert image to lab and resizing the channels to 0-255
elif featureSpace=='lab':
image= color.rgb2lab(rgbImage)
image=cv2.normalize(image,np.zeros((image.shape),dtype=np.uint8),alpha=0, beta=colmax,norm_type=cv2.NORM_MINMAX,dtype=cv2.CV_32F)
#convert image to hsv and resizing the channels to 0-255
elif featureSpace=='hsv':
image= color.rgb2hsv(rgbImage)
image=cv2.normalize(image,np.zeros((image.shape),dtype=np.uint8),alpha=0, beta=colmax,norm_type=cv2.NORM_MINMAX,dtype=cv2.CV_32F)
#convert image to rgb+xy and resizing all channels to 0-255
elif featureSpace=='rgb+xy':
image=rgbImage
#generating the x position and y position matrices to stack to the image
xcoord=np.matlib.repmat(np.array(range(image.shape[1])),image.shape[0],1)
ycoord=np.matlib.repmat(np.transpose([np.array(range(image.shape[0]))]),1,image.shape[1])
#set channels to 0-255 range
image=cv2.normalize(image,np.zeros((image.shape),dtype=np.uint8),alpha=0, beta=colmax,norm_type=cv2.NORM_MINMAX,dtype=cv2.CV_32F)
xcoord=cv2.normalize(ycoord,np.zeros((xcoord.shape),dtype=np.uint8),alpha=0, beta=spacemax,norm_type=cv2.NORM_MINMAX,dtype=cv2.CV_32F)
ycoord=cv2.normalize(ycoord,np.zeros((ycoord.shape),dtype=np.uint8),alpha=0, beta=spacemax,norm_type=cv2.NORM_MINMAX,dtype=cv2.CV_32F)
#concatenating image and x,y position matrices
image=np.stack((image[:,:,0],image[:,:,1],image[:,:,2],xcoord,ycoord),axis=2)
#convert image to lab+xy and resizing all channels to 0-255
elif featureSpace=='lab+xy':
#convert image to lab colorspace
image= color.rgb2lab(rgbImage)
#generating the x position and y position matrices to stack to the image
xcoord=np.matlib.repmat(np.array(range(image.shape[1])),image.shape[0],1)
ycoord=np.matlib.repmat(np.transpose([np.array(range(image.shape[0]))]),1,image.shape[1])
#set channels to 0-255 range
image=cv2.normalize(image,np.zeros((image.shape),dtype=np.uint8),alpha=0, beta=colmax,norm_type=cv2.NORM_MINMAX,dtype=cv2.CV_32F)
xcoord=cv2.normalize(xcoord,np.zeros((xcoord.shape),dtype=np.uint8),alpha=0, beta=spacemax,norm_type=cv2.NORM_MINMAX,dtype=cv2.CV_32F)
ycoord=cv2.normalize(ycoord,np.zeros((ycoord.shape),dtype=np.uint8),alpha=0, beta=spacemax,norm_type=cv2.NORM_MINMAX,dtype=cv2.CV_32F)
#concatenating image and x,y position matrices
image=np.stack((image[:,:,0],image[:,:,1],image[:,:,2],xcoord,ycoord),axis=2)
elif featureSpace=='hsv+xy':
#convert image to hsv colorspace
image= color.rgb2hsv(rgbImage)
#generating the x position and y position matrices to stack to the image
xcoord=np.matlib.repmat(np.array(range(image.shape[1])),image.shape[0],1)
ycoord=np.matlib.repmat(np.transpose([np.array(range(image.shape[0]))]),1,image.shape[1])
#set channels to 0-255 range
image=cv2.normalize(image,np.zeros((image.shape),dtype=np.uint8),alpha=0, beta=colmax,norm_type=cv2.NORM_MINMAX,dtype=cv2.CV_32F)
xcoord=cv2.normalize(xcoord,np.zeros((xcoord.shape),dtype=np.uint8),alpha=0, beta=spacemax,norm_type=cv2.NORM_MINMAX,dtype=cv2.CV_32F)
ycoord=cv2.normalize(ycoord,np.zeros((ycoord.shape),dtype=np.uint8),alpha=0, beta=spacemax,norm_type=cv2.NORM_MINMAX,dtype=cv2.CV_32F)
#concatenating image and x,y position matrices
image=np.stack((image[:,:,0],image[:,:,1],image[:,:,2],xcoord,ycoord),axis=2)
if clusteringMethod=='kmeans':
imager=np.reshape(image,(1,image.shape[0]*image.shape[1],image.shape[2]))[0]
kmeans=KMeans(n_clusters=numberOfClusters,random_state=0).fit(imager)
assig=kmeans.labels_
seg=np.reshape(assig,(image.shape[0],image.shape[1]))
return seg
elif clusteringMethod=='gmm':
imager=np.reshape(image,(1,image.shape[0]*image.shape[1],image.shape[2]))[0]
gmm=mixture.GaussianMixture(n_components=numberOfClusters,covariance_type='full').fit(imager)
assig=gmm.predict(imager)
seg=np.reshape(assig,(image.shape[0],image.shape[1]))
return seg
elif clusteringMethod=='hierarchical':
a=image.shape
image=cv2.resize(image,(100,100))
image=np.array(image)
imager=np.reshape(image,(1,image.shape[0]*image.shape[1],image.shape[2]))[0]
hierClus=cluster.AgglomerativeClustering(n_clusters=numberOfClusters,affinity='euclidean')
assig=hierClus.fit_predict(imager)
seg=np.reshape(assig,(100,100))
seg=scipy.misc.imresize(seg,(a[0],a[1]),interp='nearest')
return seg
elif clusteringMethod=='watershed':
if featureSpace=='rgb+xy' or featureSpace=='rgb':
image=image[:,:,0:3]
image=np.mean(image,axis=2)
else:
image=image[:,:,0]
local_max = peak_local_max(-1*image, indices=False,num_peaks=numberOfClusters,num_peaks_per_label=1)
markers=ndi.label(local_max)[0]
seg=watershed(image,markers)
seg=seg-1
return seg | [
"sergioalgl2@gmail.com"
] | sergioalgl2@gmail.com |
67ceb865d11bf7d82086694f8879b057f68bf848 | 864285315c3a154639355f14ab1ff14633576405 | /mapclientplugins/segmentationstep/tools/handlers/abstractselection.py | 4315d4f55f509d3c4b410e9a7a07ad7b29f48cb1 | [] | no_license | hsorby/segmentationstep | 774dc537967c9643bd0094dc4e64eefa472588b0 | 321505374f9434ac0ae832b0b00398c2d4ac1fbe | refs/heads/main | 2021-09-28T09:06:07.197158 | 2015-08-14T07:59:55 | 2015-08-14T07:59:55 | 21,375,254 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,527 | py | '''
MAP Client, a program to generate detailed musculoskeletal models for OpenSim.
Copyright (C) 2012 University of Auckland
This file is part of MAP Client. (http://launchpad.net/mapclient)
MAP Client is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MAP Client is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with MAP Client. If not, see <http://www.gnu.org/licenses/>..
'''
from PySide import QtCore
from mapclientplugins.segmentationstep.tools.handlers.abstracthandler import AbstractHandler
from mapclientplugins.segmentationstep.zincutils import setGlyphSize, setGlyphOffset, COORDINATE_SYSTEM_LOCAL, \
createSelectionBox
from mapclientplugins.segmentationstep.undoredo import CommandSelection
from mapclientplugins.segmentationstep.definitions import SELECTION_BOX_3D_GRAPHIC_NAME
class SelectionMode(object):
NONE = -1
EXCULSIVE = 0
ADDITIVE = 1
class AbstractSelection(AbstractHandler):
def __init__(self, plane, undo_redo_stack):
super(AbstractSelection, self).__init__(plane, undo_redo_stack)
self._selection_box = createSelectionBox(plane.getRegion(), SELECTION_BOX_3D_GRAPHIC_NAME)
self._selection_mode = SelectionMode.NONE
self._selection_position_start = None
def mousePressEvent(self, event):
self._selection_mode = SelectionMode.NONE
if event.modifiers() & QtCore.Qt.SHIFT and event.button() == QtCore.Qt.LeftButton:
self._selection_position_start = [event.x(), event.y()]
self._selection_mode = SelectionMode.EXCULSIVE
if event.modifiers() & QtCore.Qt.ALT:
self._selection_mode = SelectionMode.ADDITIVE
self._start_selection = self._model.getCurrentSelection()
else:
super(AbstractSelection, self).mousePressEvent(event)
def mouseMoveEvent(self, event):
if self._selection_mode != SelectionMode.NONE:
x = event.x()
y = event.y()
xdiff = float(x - self._selection_position_start[0])
ydiff = float(y - self._selection_position_start[1])
if abs(xdiff) < 0.0001:
xdiff = 1
if abs(ydiff) < 0.0001:
ydiff = 1
xoff = float(self._selection_position_start[0]) / xdiff + 0.5
yoff = float(self._selection_position_start[1]) / ydiff + 0.5
scene = self._selection_box.getScene()
scene.beginChange()
setGlyphSize(self._selection_box, [xdiff, -ydiff, 0.999])
setGlyphOffset(self._selection_box, [xoff, yoff, 0])
self._selection_box.setVisibilityFlag(True)
scene.endChange()
else:
super(AbstractSelection, self).mouseMoveEvent(event)
def mouseReleaseEvent(self, event):
if self._selection_mode != SelectionMode.NONE:
x = event.x()
y = event.y()
# Construct a small frustrum to look for nodes in.
region = self._model.getRegion()
region.beginHierarchicalChange()
self._selection_box.setVisibilityFlag(False)
selection_group = self._model.getSelectionGroupField()
if (x != self._selection_position_start[0] and y != self._selection_position_start[1]):
left = min(x, self._selection_position_start[0])
right = max(x, self._selection_position_start[0])
bottom = min(y, self._selection_position_start[1])
top = max(y, self._selection_position_start[1])
self._zinc_view.setPickingRectangle(COORDINATE_SYSTEM_LOCAL, left, bottom, right, top)
if self._selection_mode == SelectionMode.EXCULSIVE:
selection_group.clear()
self._zinc_view.addPickedNodesToFieldGroup(selection_group)
else:
node = self._zinc_view.getNearestNode(x, y)
if self._selection_mode == SelectionMode.EXCULSIVE and not node.isValid():
selection_group.clear()
if node.isValid():
group = self._model.getSelectionGroup()
if self._selection_mode == SelectionMode.EXCULSIVE:
remove_current = group.getSize() == 1 and group.containsNode(node)
selection_group.clear()
if not remove_current:
group.addNode(node)
elif self._selection_mode == SelectionMode.ADDITIVE:
if group.containsNode(node):
group.removeNode(node)
else:
group.addNode(node)
end_selection = self._model.getCurrentSelection()
c = CommandSelection(self._model, self._start_selection, end_selection)
self._undo_redo_stack.push(c)
region.endHierarchicalChange()
self._selection_mode = SelectionMode.NONE
else:
super(AbstractSelection, self).mouseReleaseEvent(event)
| [
"h.sorby@auckland.ac.nz"
] | h.sorby@auckland.ac.nz |
525be32e2f93e6b78338a34c3165291754017af7 | 8ad05f071487a25649ed51c42f5eb76ecccffa79 | /main.py | 5ba9fbd703737330b8ba15b567eecea8b1ff3d48 | [] | no_license | combjellly/WaveDash | 541595aece0a0e668a538fff346ffcf3631ec863 | e388469e1beebb8f1c8608ea85c1b6ecb7bb4ae2 | refs/heads/main | 2023-02-16T08:49:36.545841 | 2021-01-16T02:47:54 | 2021-01-16T02:47:54 | 328,087,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,793 | py | '''wavenum = input("how many voices? ")
lenght = input("how long for cycle:seconds ")
slowest = input("voice with least ammount of cycles? # of cycles " )
fastest = input("voice with most ammount of cycles? # of cycles " )
'''
import tkinter as tk
import pdfileformat as pdff
wavenum = 20
length = 20
slowest = 2
fastest = 100
file = open("pendulum.txt","w+")
d={}
root=tk.Tk()
root.title('PENDULUM CALCULATOR')
root.geometry("400x400")
root.configure(bg="brown")
output = tk.StringVar()
output.set("Generate list of voices in ms\n\n\n\n\n\n\n\n\n\n")
mytext = tk.StringVar(value='test ' * 30)
myentry = tk.Entry(root, textvariable=output, state='readonly')
myscroll = tk.Scrollbar(root, orient='horizontal', command=myentry.xview)
myentry.config(xscrollcommand=myscroll.set)
#######GUI#############################
# LABELS
lvoices = tk.Label(root, text="Number of voices")
llength = tk.Label(root, text="Length of cycle (seconds)")
lslowest = tk.Label(root, text="Slowest voice (# of cycles) ")
lfastest = tk.Label(root, text="Fastest voice (# of cycles) ")
loutput = tk.Label(root, textvariable= output)
# ENTRY
evoices = tk.Entry(root, text="Number of voices")
elength = tk.Entry(root, text="Length of cycle")
eslowest = tk.Entry(root, text="Slowest voices")
efastest = tk.Entry(root, text="Fastest voices")
#PACKING
lvoices.pack()
evoices.pack()
llength.pack()
elength.pack()
lslowest.pack()
eslowest.pack()
lfastest.pack()
efastest.pack()
loutput.pack()
myscroll.pack
###FUNCTIONS##################
#gets values from entry widgets
def getvalues(event):
global length
wavenum = int(evoices.get())
length = int(elength.get())
slowest = int(eslowest.get())
fastest = int(efastest.get())
incrementcalc(wavenum,length,slowest,fastest)
root.bind("<Return>", getvalues)
# calculates how each voice should increment in (?)
def incrementcalc(wavenum,length,slowest,fastest):
increment= (int(fastest)-int(slowest)) / (int(wavenum)-1)
changeifstatements(wavenum,length,slowest,fastest,increment)
print(increment)
# tbh idr
def changeifstatements(wavenum,length,slowest,fastest,increment):
if (increment*wavenum)>fastest:
change=-1
elif (increment*wavenum)<fastest:
change=1
else:
change = 0
wavenumdesignation(wavenum,length,slowest,fastest,increment)
# calculates n writes voice BPM
def wavenumdesignation(wavenum,length,slowest,fastest,increment):
d={}
bpmlist = ""
d['1'] = slowest
for x in range(1,wavenum):
d[x+1]=((x*increment)+slowest)
for x in d:
y = (d[x])
seconds = length/y
seconds = seconds*1000
bpm = 60/seconds
# bpmlist not actually bpm, making list with seconds for metro
bpmlist+=str(seconds)+"\n"
output.set(bpmlist)
file = open("pendulum.txt","w")
file.write(bpmlist)
file.close()
pdff.format()
root.mainloop() | [
"noreply@github.com"
] | noreply@github.com |
4466065ff3a5aad40065cdac02eaf15802721295 | de52ebdb1b4ccd54ec75686f9dc0bbf5f0548b1d | /ex_dictionary.py | 7da8fa0e9b314983af398f667b9c16942e134fec | [] | no_license | ask4git/BUFS-Post-Processing-Module | 12282a088296c9014dbffed2dd413d5b6c54fd3d | 5e811b3b7a1e129dc16567d6b1751b9aecaef519 | refs/heads/master | 2020-05-13T16:33:28.298874 | 2019-04-22T11:35:53 | 2019-04-22T11:35:53 | 181,640,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,470 | py |
# -*- coding: utf-8 -*-
import pickle
class ExDictionary:
@staticmethod
def make_ex_dictionary():
dictionary = list()
with open('.\\res\\exception_expression.txt', 'r', encoding='utf-8-sig') as ex_data_file:
for each_line in ex_data_file:
try:
expression = each_line.strip()
morpheme = expression.split('+')
expression = list()
for i in range(len(morpheme)):
expression.append(morpheme[i].split('/'))
eojeol = list()
eojeol_type = list()
for i in range(len(expression)):
eojeol.append(expression[i][0])
eojeol_type.append(expression[i][1])
expression = list()
expression.append(eojeol)
expression.append(eojeol_type)
dictionary.append(expression)
except IndexError as error:
print(error)
return None
dictionary = dictionary[1:]
return dictionary
@staticmethod
def save_dictionary(data, path):
with open(path, 'wb') as dictionary:
pickle.dump(data, dictionary)
@staticmethod
def load_dictionary(path):
with open(path, 'rb') as dictionary:
return pickle.load(dictionary, encoding='utf-8')
| [
"ask4git@gmail.com"
] | ask4git@gmail.com |
f0284d22965f628a9a0b899b316fe6e649b59ee5 | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /same_place/own_number/bad_way_or_year/different_week_or_able_work.py | 66bfb32cdb2de9f544d9b8a983695a71eb049913 | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py |
#! /usr/bin/env python
def place(str_arg):
find_world_by_case(str_arg)
print('point_and_little_problem')
def find_world_by_case(str_arg):
print(str_arg)
if __name__ == '__main__':
place('last_day')
| [
"jingkaitang@gmail.com"
] | jingkaitang@gmail.com |
ac4d6e76ee26b19ee2ff04a77b386ed4cf0059c9 | f7c1282dd377b95621436587fd2a6cb28a455d74 | /om_hr_payroll/__manifest__.py | 2c39fcc961672ef5c2c5369414d6b86b5a869f74 | [] | no_license | odoomates/odooapps | a22fa15346694563733008c42549ebc0da7fc9f6 | 459f3b25d31da24043523e72f8be09af9a1e67e9 | refs/heads/master | 2023-08-11T15:25:28.508718 | 2022-10-14T07:58:36 | 2022-10-14T07:58:36 | 173,598,986 | 182 | 306 | null | 2023-08-10T17:58:46 | 2019-03-03T16:20:23 | Python | UTF-8 | Python | false | false | 1,550 | py | # -*- coding:utf-8 -*-
{
'name': 'Odoo 16 HR Payroll',
'category': 'Generic Modules/Human Resources',
'version': '16.0.1.0.0',
'sequence': 1,
'author': 'Odoo Mates, Odoo SA',
'summary': 'Payroll For Odoo 16 Community Edition',
'live_test_url': 'https://www.youtube.com/watch?v=0kaHMTtn7oY',
'description': "Odoo 16 Payroll, Payroll Odoo 16, Odoo Community Payroll",
'website': 'https://www.odoomates.tech',
'license': 'LGPL-3',
'depends': [
'mail',
'hr_contract',
'hr_holidays',
],
'data': [
'security/hr_payroll_security.xml',
'security/ir.model.access.csv',
'data/hr_payroll_sequence.xml',
'data/hr_payroll_category.xml',
'data/hr_payroll_data.xml',
'wizard/hr_payroll_payslips_by_employees_views.xml',
'views/hr_contract_type_views.xml',
'views/hr_contract_views.xml',
'views/hr_salary_rule_views.xml',
'views/hr_payslip_views.xml',
'views/hr_employee_views.xml',
'views/hr_payroll_report.xml',
'wizard/hr_payroll_contribution_register_report_views.xml',
'views/res_config_settings_views.xml',
'views/report_contribution_register_templates.xml',
'views/report_payslip_templates.xml',
'views/report_payslip_details_templates.xml',
'views/hr_contract_history_views.xml',
'views/hr_leave_type_view.xml',
'data/mail_template.xml',
],
'images': ['static/description/banner.png'],
'application': True,
}
| [
"odoomates@gmail.com"
] | odoomates@gmail.com |
30f8760515740badc64e653bb429b07937a5b028 | 00020e9785085a14a3ced97301ec8638e770eca3 | /embark/uploader/test_models.py | 2ccfd581526c64b8f41b8b317bb67f9214ca2418 | [
"MIT"
] | permissive | MaximilianWagner/amos-ss2021-emba-service | cb1114bf65aba9c4ab945f45e54370bef9684d1b | afda9ec5a180c8462a1fed2422df2c0e2b7c2c7a | refs/heads/main | 2023-05-28T19:33:06.508271 | 2021-06-16T10:58:54 | 2021-06-16T10:58:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,049 | py | from django.test import TestCase
from .models import Firmware
class test_models(TestCase):
def setUp(self):
self.fw_file = None
# TODO: add timeout
def test_get_flags_all_true(self):
"""
test get_flags() if all flags set to String/True
"""
firmware = Firmware(firmware=self.fw_file)
firmware.version = "version"
firmware.vendor = "vendor"
firmware.device = "device"
firmware.notes = "notes"
firmware.firmware_Architecture = "x64"
firmware.cwe_checker = True
firmware.docker_container = True
firmware.deep_extraction = True
firmware.log_path = True
firmware.grep_able_log = True
firmware.relative_paths = True
firmware.ANSI_color = True
firmware.web_reporter = True
firmware.emulation_test = True
firmware.dependency_check = True
firmware.multi_threaded = True
expected_string = " -X version -Y vendor -Z device -N notes -a x64 -c -x -i -g -s -z -W -E -F -t"
self.assertEqual(firmware.get_flags(), expected_string)
def test_get_flags_all_false(self):
"""
test get_flags() if all flags set to None/False
"""
firmware = Firmware(firmware=self.fw_file)
firmware.version = None
firmware.vendor = None
firmware.device = None
firmware.notes = None
firmware.firmware_Architecture = None
firmware.cwe_checker = False
firmware.docker_container = False
firmware.deep_extraction = False
firmware.log_path = False
firmware.grep_able_log = False
firmware.relative_paths = False
firmware.ANSI_color = False
firmware.web_reporter = False
firmware.emulation_test = False
firmware.dependency_check = False
firmware.multi_threaded = False
expected_string = ""
self.assertEqual(firmware.get_flags(), expected_string)
| [
"wagnermaximilian@aol.com"
] | wagnermaximilian@aol.com |
178960390eb252c06cada38f3ada6b9792f26a8f | c12171e8838f70a02d8ed52bcdb620ffd1864a8d | /uh_paper_analysis.py | 1aeb6e9e4ed893d89c15cae3498f6d0870a10273 | [] | no_license | berdakh/source-Imaging | 02b6ecb226685610b8b4ea924a46f3bef8bee135 | 9d1ccbec64fc29da587fe126e3af25a885145c56 | refs/heads/master | 2021-06-12T22:12:03.677295 | 2021-04-09T06:40:53 | 2021-04-09T06:40:53 | 184,751,119 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,219 | py | import numpy as np
from numpy.random import randn
from scipy import stats as stats
import os
import mne
import matplotlib.pyplot as plt
from mne.minimum_norm import (apply_inverse,apply_inverse_epochs,make_inverse_operator)
from mne.connectivity import seed_target_indices, spectral_connectivity
import glob
"""
The goal of the analysis is to make a statistical appraisal of the neural
activation evoked from the BMI triggered exoskeleton at movement onset detected by Robot event.
1) The question is whether evidence can be found against the hull hypothesis that
neural activations (plasticity) on (the somatosensory and motor) cortex
a) does not depend on whether or not the when BMI is inactive (subject is idle)
b) does not change across sessions
2) Can we find any evidence against hull hypothesis that neural activations patterns
on the cortex does remain the same across sessions ?
* In other words, there is no evidence of cortical reorganization.
"""
#%%##############################################################################
# Read the EEG data
from uhClass import MRCP
mainDir = 'C:\\uhdata\\freesurfer'
subject = 'S9011'
os.chdir(os.path.join(mainDir, subject))
eegfiles = glob.glob('*-epoall.pickle')[0]
#%%
import pickle
with open(eegfiles, 'rb') as pickle_file:
eeg = pickle.load(pickle_file)
#%% Plot EEG
#from uhClass import MRCP
#filename = eegfilename.split('_s')[0]
ev =[]
for ep in eeg:
print(ep)
evoked = ep.average()
ev.append(evoked)
#%% STEP 1: COMPUTER THE SOURCE SPACE (SOURCE GRID ON MRI)
"""
The source space defines the position of the candidate source locations.
The following code compute such a cortical source space with an
OCT-5 resolution.
"""
# this is a freesurfer folder name
#src = mne.setup_source_space(subject, spacing='oct5', subjects_dir = mainDir)
#%%
#src.plot(head=True, brain=True, skull = True, subjects_dir = mainDir)
#%% READ FORWARD SOLUTION
fname_fwd = os.path.join(mainDir, subject) + '\\' + subject + '-fwd.fif'
fwd = mne.read_forward_solution(fname_fwd)
#%%
noise_cov = mne.compute_covariance(eeg[1], keep_sample_mean=True, tmin=-0.5, tmax=-0.0)
stcs = []
for ii, epoch in enumerate(ev):
# tmin, tmax = epoch.time_as_index([-1, -0.5])
# calculate noise covariance matrix
# make inverse operator
info = epoch.info
inverse_operator = make_inverse_operator(info, fwd, noise_cov, loose=0.2, depth=None)
epoch.set_eeg_reference(ref_channels = "average", projection=True)
# apply inverse solution
method = 'MNE'
snr = 3.
lambda2 = 1. / snr ** 2
stc = apply_inverse(epoch, inverse_operator, lambda2,method= method, pick_ori="normal",)
stcs.append(stc)
#%%
#e1 = copy.deepcopy(evoked)
#e2 = copy.deepcopy(evoked)
#
#e1.crop(-0.5, 0)
#e2.crop(0, 0.5)
#%%
import copy
# condition 1 -- > baseline
cond1 = []
# condition 2 ---> Movement detected by Robot
cond2 = []
for source in stcs:
cond1.append(copy.deepcopy(source).crop(-0.5, 0))
cond2.append(copy.deepcopy(source).crop(0, 0.5))
#%%
n_vertices_sample, n_times = cond1[2].data.shape
n_subjects = len(cond2)
#%%
try:
del X
except Exception:
pass
X = np.zeros([n_vertices_sample, n_times, n_subjects, 2])
for ii, c1 in enumerate(cond1):
X[:,:,ii,0] = c1.data
for ii, c2 in enumerate(cond2):
X[:,:,ii,1] = c2.data
""""
X = np.zeros([n_vertices_sample, n_times, n_subjects])
for ii, c in enumerate(cond2):
X[:,:, ii] = c.data
"""
#%%############################################################################
# Finally, we want to compare the overall activity levels in each condition,
# the diff is taken along the last axis (condition). The negative sign makes
# it so condition1 > condition2 shows up as "red blobs" (instead of blue).
X = np.abs(X) # only magnitude
X = X[:, :, :, 0] - X[:, :, :, 1] # make paired contrast
#%%############################################################################
src_fname = os.path.join(mainDir, subject) + '\\'+ subject + '-src.fif'
# Read the source space we are morphing to
src = mne.read_source_spaces(src_fname)
fsave_vertices = [s['vertno'] for s in src]
#%%############################################################################
# Compute statistic
# -----------------
# To use an algorithm optimized for spatio-temporal clustering, we
# just pass the spatial connectivity matrix (instead of spatio-temporal)
print('Computing connectivity.')
connectivity = mne.spatial_src_connectivity(src)
#X1 = X[:,:,:,1]
# Note that X needs to be a multi-dimensional array of shape
# samples (subjects) x time x space, so we permute dimensions
X1 = np.transpose(X, [2, 1, 0])
# Now let's actually do the clustering. This can take a long time...
#%% Here we set the threshold quite high to reduce computation.
p_threshold = 0.05
#t_threshold = -stats.distributions.t.ppf(p_threshold / 2., n_subjects - 1)
t_threshold = -stats.distributions.t.ppf(p_threshold / 2., n_subjects - 1)
#%%
tstep = cond2[0].tstep
from mne.stats import (spatio_temporal_cluster_1samp_test, summarize_clusters_stc)
print('Clustering.')
T_obs, clusters, cluster_p_values, H0 = clu = \
spatio_temporal_cluster_1samp_test(X1, connectivity=None, n_jobs=1,
threshold=t_threshold)
# Now select the clusters that are sig. at p < 0.05 (note that this value
# is multiple-comparisons corrected).
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
#%%
#from mne.stats import spatio_temporal_cluster_test
#
#a = spatio_temporal_cluster_test(
# X, threshold=None, n_permutations=1024, tail=0, stat_fun=None,
# connectivity=None, verbose=None, n_jobs=1, seed=None, max_step=1,
# spatial_exclude=None, step_down_p=0, t_power=1, out_type='indices',
# check_disjoint=False, buffer_size=1000)
#%%############################################################################
# Visualize the clusters
# ----------------------
print('Visualizing clusters.')
# Now let's build a convenient representation of each cluster, where each
# cluster becomes a "time point" in the SourceEstimate
#fsave_vertices = [np.arange(X.shape[0]), np.arange(X.shape[0])]
stc_all_cluster_vis = summarize_clusters_stc(clu, p_thresh=0.05, tstep=tstep,
vertices=fsave_vertices, subject=subject)
#%% Let's actually plot the first "time point" in the SourceEstimate, which
# shows all the clusters, weighted by duration
subjects_dir = os.path.join(mainDir)
# blue blobs are for condition A < condition B, red for A > B
brain = stc_all_cluster_vis.plot(surface='inflated',
hemi='both', views='lateral', subjects_dir=subjects_dir,
time_label='Duration significant (ms)', size=(800, 800),
smoothing_steps=10)
# brain.save_image('clusters.png')
#%% Statistics
"""
1) Parametric Hypothesis Testing assumes Normal distribution or iid
2) Non-parametric Hypothesis Testing does not rely on any assumption and is usually done by
methods such as Bootstrap analysis & Permutation Test
a) Paired data has dependent samples since the data is acquired from the same subject (multiple data)
Paired data: there are two measurements from each patient, one before treatment and one after treatment.
These two measurements relate to one another, we are interested in the difference between the two measurements (the
log ratio) to determine whether a gene has been up-regulated or down-regulated in breast cancer following that treatment.
b) Unpaired data has independent samples as the data is acqured from two different/distinct subjects
c) Complex data has more than two Groups (ANOVA)
###########################
The significance level (alpha) is related to the degree of certainty you require in
order to reject the null hypothesis in favor of the alternative e.g. alpha = 0.05
The p-value is the probability of observing the given sample result under the
assumption that the null hypothesis is true.
If the p-value is less than alpha, then you reject the null hypothesis.
For example, if alpha = 0.05 and the p-value is 0.03, then you reject the null hypothesis
##########################
Confidence intervals: a range of values that have a chosen probability of
containing the true hypothesized quantity.
###########################
Steps of Hypthesis testing:
1. Determine the null and alternative hypothesis, using
mathematical expressions if applicable.
2. Select a significance level (alpha).
3. Take a random sample from the population of interest.
4. Calculate a test statistic from the sample that provides
information about the null hypothesis.
5. Decision
>>> If the value of the statistic is consistent with the null hypothesis
then do not reject H0.
>>> If the value of the statistic is not consistent with the null
hypothesis, then reject H0 and accept the alternative hypothesis.
##########################
"""
#%%
#fname = 'S9017_ses1_cond1_block0001-rh.stc'
#stc = mne.read_source_estimate(fname) | [
"noreply@github.com"
] | noreply@github.com |
1fe26cb3b013a3d92b2e14e647855da39908047c | cf5d54f3808036c2d68508fe7b5f0bbf336ef6a9 | /regression/compactiv/src/play.py | 944675fe6086b08be7b81484ae666a55f6ad69b5 | [] | no_license | pedroceles/maters_db | 96eeb05172b4a10ca3f5355d55c835e9b4cb2f13 | f33d731685ab194c661088cfb1fb37d69ef084cf | refs/heads/master | 2021-01-19T00:28:22.560260 | 2016-06-21T14:57:49 | 2016-06-21T14:57:49 | 61,642,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | import os
import pandas as pd
from imputation.data_treatment import BaseTreatment
class CustomTreatment(BaseTreatment):
def __init__(self, source_path=None, *args, **kwargs):
dir_ = os.path.dirname(__file__)
source_path = os.path.join(dir_, '../original/compactiv.dat')
super(CustomTreatment, self).__init__(source_path, *args, **kwargs)
def read_file(self):
self._df = pd.read_csv(self._source_path, header=None, index_col=None)
self._df.iloc[:, -1] += 1
return self._df
| [
"pedro.celes123@gmail.com"
] | pedro.celes123@gmail.com |
820340e47e5849f5a80c36ab04b86e1b9e1ccccb | 1042cf47b885d08075e9f1bafba202726745d0e4 | /pysnips/audio/utils.py | 19ae2a2722f3e639c8cdf78d4ea0614ac9c33713 | [
"MIT"
] | permissive | phenyque/python-snippets | a7da4d32900cc665dd3e74c5f88fa2b9d3bd8326 | b8feecfd87663af13db54784c245b3fba0c93202 | refs/heads/master | 2022-03-17T19:38:12.421745 | 2019-12-08T11:02:40 | 2019-12-08T11:02:40 | 114,359,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,706 | py | """
Helper functions to solve common problems/tasks when
messing with audio files
"""
import numpy as np
import soundfile as sf
import itertools
from collections import OrderedDict
__all__ = ['ambisonics_reorder_channels',
'extract_channels_from_wav',
'monofiles_to_multitrack']
FUMA = OrderedDict(enumerate(['w',
'x', 'y', 'z',
'r', 's', 't', 'u', 'v',
'k', 'l', 'm', 'n', 'o', 'p', 'q']))
ACN = OrderedDict(enumerate(['w',
'y', 'z', 'x',
'v', 't', 'r', 's', 'u',
'q', 'o', 'm', 'k', 'l', 'n', 'p']))
AMBISONICS_ORDERINGS = {'fuma': FUMA, 'acn': ACN}
def _ambisonics_channel_count_from_order(order, three_dim=True):
"""
Helper function that computes the number of channels for a given ambisonics order.
"""
return (order + 1)**2 if three_dim else (2 * order + 1)
def ambisonics_reorder_channels(signal_array, order, input_ordering, output_ordering):
"""
Reorder ambisonics signals from one channel ordering to another.
signal_array - Array with the signals as given by soundfile.read from a wav file
order - order of the ambisonics signals, full sphere representation is assumed
input_ordering - name of channel ordering of the array ['fuma', 'acn']
output_ordering - desired output ordering ['fuma', 'acn']
"""
channel_count = _ambisonics_channel_count_from_order(order)
assert(signal_array.shape[1] == channel_count)
input_ordering = OrderedDict(itertools.islice(AMBISONICS_ORDERINGS[input_ordering].items(), channel_count))
output_ordering = OrderedDict(itertools.islice(AMBISONICS_ORDERINGS[output_ordering].items(), channel_count))
input_ordering = {v: k for k, v in input_ordering.items()}
new_order = [input_ordering[output_ordering[i]] for i in output_ordering.keys()]
return signal_array[:, new_order]
def extract_channels_from_wav(filename, channels, write_file=None):
"""Read wav file and extract only the specified channel numbers"""
s, fs = sf.read(filename)
if type(channels) == int:
channels = [channels]
s = s[:, channels]
if write_file is not None:
sf.write(write_file, s, fs)
return s
def monofiles_to_multitrack(monofiles, new_filename):
"""Read mono wav files and combine them into a multitrack wavfile"""
# TODO: this causes MemoryError when signals are very long
signals = []
for f in monofiles:
s, fs = sf.read(f)
signals.append(s)
multitrack_array = np.asarray(signals).T
sf.write(new_filename, multitrack_array, fs)
| [
"jankiene@onlinehome.de"
] | jankiene@onlinehome.de |
42a1f97987615325f30edc75f358e38ff7f7ba56 | 450916eee7580beb928ed8f387db4f0a8c1aa508 | /src/amuse/community/petar/__init__.py | 4b06f767ad173c110590200c195514cd334c5292 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | amusecode/amuse | 42095545893f5a86ea79c2a52ce54d3ce8eb204f | b57c1e2fda1457d5025307be105c2aa59b19b574 | refs/heads/main | 2023-08-31T04:50:48.880044 | 2023-08-30T12:00:20 | 2023-08-30T12:00:20 | 18,516,331 | 158 | 118 | Apache-2.0 | 2023-08-30T12:00:22 | 2014-04-07T12:35:07 | AMPL | UTF-8 | Python | false | false | 29 | py | from .interface import Petar
| [
"steven@rieder.nl"
] | steven@rieder.nl |
4ffe1c4e6d09d5cf2a2e002c6ba1082879a802a5 | 84f428469eb718e4c8d3e75f9f61a46662e24a7a | /seahub/api2/endpoints/group_discussions.py | 81c5960b9f27f6b761469db7303317e01de4cca0 | [
"Apache-2.0"
] | permissive | peckjerry/seahub | 52a324a9898dc70082b971ca510c56c8d80ec0fc | 49ac5b9ec26844ec7cee26285c7438f894853182 | refs/heads/master | 2020-12-31T03:17:07.186293 | 2016-03-15T10:19:12 | 2016-03-15T10:19:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,220 | py | import json
from django.core.paginator import EmptyPage, InvalidPage
from django.http import HttpResponse
from django.utils.dateformat import DateFormat
from rest_framework import status
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from seahub.api2.authentication import TokenAuthentication
from seahub.api2.permissions import IsGroupMember
from seahub.api2.throttling import UserRateThrottle
from seahub.api2.utils import api_error
from seahub.group.models import GroupMessage
from seahub.utils.paginator import Paginator
from .utils import api_check_group
json_content_type = 'application/json; charset=utf-8'
class GroupDiscussions(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated, IsGroupMember)
throttle_classes = (UserRateThrottle, )
@api_check_group
def get(self, request, group_id, format=None):
"""List all group discussions. Only group members can perform this op.
"""
# 1 <= page, defaults to 1
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
if page < 0:
page = 1
# 1 <= per_page <= 100, defaults to 20
try:
per_page = int(request.GET.get('per_page', '20'))
except ValueError:
per_page = 20
if per_page < 1 or per_page > 100:
per_page = 20
paginator = Paginator(GroupMessage.objects.filter(
group_id=group_id).order_by('-timestamp'), per_page)
try:
group_msgs = paginator.page(page)
except (EmptyPage, InvalidPage):
group_msgs = paginator.page(paginator.num_pages)
msgs = []
for e in group_msgs:
msgs.append({
"group_id": group_id,
"discussion_id": e.pk,
"user": e.from_email,
"content": e.message,
"created_at": e.timestamp.strftime("%Y-%m-%dT%H:%M:%S") + DateFormat(e.timestamp).format('O'),
})
return HttpResponse(json.dumps(msgs), status=200,
content_type=json_content_type)
@api_check_group
def post(self, request, group_id, format=None):
"""Post a group discussions. Only group members can perform this op.
"""
content = request.data.get('content', '')
if not content:
return api_error(status.HTTP_400_BAD_REQUEST, 'Content can not be empty.')
username = request.user.username
discuss = GroupMessage.objects.create(group_id=group_id,
from_email=username,
message=content)
return Response({
"group_id": group_id,
"discussion_id": discuss.pk,
"user": username,
"content": discuss.message,
"created_at": discuss.timestamp.strftime("%Y-%m-%dT%H:%M:%S") + DateFormat(discuss.timestamp).format('O'),
}, status=201)
| [
"xiez1989@gmail.com"
] | xiez1989@gmail.com |
79663e1e776c4e22aac164836fee33a006dcd6c3 | 67c18922f98936713be1b2d173797eaf70570630 | /models/library_book.py | 39cb0a63f4b49744596fdf16abde087477d0dd6b | [] | no_license | edinsonlenin/my_library | 4fd301f83b2bd5d0afc6b3be5d1aca405218c7dd | a6f36baa861ffcfaeab0395db1a059d478ed3f49 | refs/heads/main | 2023-08-15T23:41:06.310649 | 2021-10-15T14:31:32 | 2021-10-15T14:31:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | from odoo import models, fields
class LibraryBook(models.Model):
_name = 'library.book'
name = fields.Char('Title', required=True)
date_release = fields.Date('Release Date')
author_ids = fields.Many2many(
'res.partner',
string='Authors'
) | [
"edinsonlen@hotmail.com"
] | edinsonlen@hotmail.com |
1ae237d50f3b39abb4962276db742147c966c2c6 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/l3ext/domdef.py | d549d045a6b6b4cb4b650cffb5b307b2cc6edf07 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,636 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class DomDef(Mo):
"""
This is generated and used only by internal processes.
"""
meta = ClassMeta("cobra.model.l3ext.DomDef")
meta.moClassName = "l3extDomDef"
meta.rnFormat = "l3dom-%(name)s"
meta.category = MoCategory.REGULAR
meta.label = "Outside L3 Domain"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x80384001000601
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.infra.RtDomAtt")
meta.childClasses.add("cobra.model.fault.Counts")
meta.childClasses.add("cobra.model.extnw.RtL3InstPToDomP")
meta.childClasses.add("cobra.model.infra.RsVlanNs")
meta.childClasses.add("cobra.model.extnw.RtL3DomAtt")
meta.childClasses.add("cobra.model.infra.RtDomRef")
meta.childClasses.add("cobra.model.fault.Inst")
meta.childClasses.add("cobra.model.extnw.LblCont")
meta.childClasses.add("cobra.model.infra.RtLDevDomP")
meta.childClasses.add("cobra.model.infra.RtDomP")
meta.childClasses.add("cobra.model.infra.RsVipAddrNs")
meta.childClasses.add("cobra.model.infra.RtDynPathAtt")
meta.childClasses.add("cobra.model.extnw.RsOut")
meta.childClasses.add("cobra.model.health.Inst")
meta.childClasses.add("cobra.model.infra.RsDomVxlanNsDef")
meta.childClasses.add("cobra.model.infra.RsVlanNsDef")
meta.childClasses.add("cobra.model.infra.RtExtDevDomP")
meta.childClasses.add("cobra.model.infra.RtNicProfToDomP")
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childClasses.add("cobra.model.infra.RtDomDef")
meta.childNamesAndRnPrefix.append(("cobra.model.infra.RtNicProfToDomP", "rtextdevNicProfToDomP-"))
meta.childNamesAndRnPrefix.append(("cobra.model.extnw.RtL3InstPToDomP", "rtl3extL3InstPToDomP-"))
meta.childNamesAndRnPrefix.append(("cobra.model.infra.RtDynPathAtt", "rtl3extDynPathAtt-"))
meta.childNamesAndRnPrefix.append(("cobra.model.extnw.RtL3DomAtt", "rtl3extL3DomAtt-"))
meta.childNamesAndRnPrefix.append(("cobra.model.infra.RtExtDevDomP", "rtedmExtDevDomP-"))
meta.childNamesAndRnPrefix.append(("cobra.model.infra.RsDomVxlanNsDef", "rsdomVxlanNsDef"))
meta.childNamesAndRnPrefix.append(("cobra.model.infra.RtDomDef", "rtextdevDomDef-"))
meta.childNamesAndRnPrefix.append(("cobra.model.infra.RtLDevDomP", "rtvnsLDevDomP-"))
meta.childNamesAndRnPrefix.append(("cobra.model.infra.RtDomRef", "rtedmDomRef-"))
meta.childNamesAndRnPrefix.append(("cobra.model.infra.RtDomAtt", "rtfvDomAtt-"))
meta.childNamesAndRnPrefix.append(("cobra.model.infra.RsVipAddrNs", "rsvipAddrNs"))
meta.childNamesAndRnPrefix.append(("cobra.model.infra.RsVlanNsDef", "rsvlanNsDef"))
meta.childNamesAndRnPrefix.append(("cobra.model.infra.RsVlanNs", "rsvlanNs"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.extnw.LblCont", "lblcont"))
meta.childNamesAndRnPrefix.append(("cobra.model.infra.RtDomP", "rtdomP-"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Inst", "fault-"))
meta.childNamesAndRnPrefix.append(("cobra.model.extnw.RsOut", "rsout-"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.parentClasses.add("cobra.model.fv.RtdEpP")
meta.superClasses.add("cobra.model.infra.ADomP")
meta.superClasses.add("cobra.model.infra.DomP")
meta.superClasses.add("cobra.model.l3ext.ADomP")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Dom")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.fv.ADomP")
meta.superClasses.add("cobra.model.pol.Cont")
meta.superClasses.add("cobra.model.extnw.DomP")
meta.rnPrefixes = [
('l3dom-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "configIssues", "configIssues", 4941, PropCategory.REGULAR)
prop.label = "Configuration Issues"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("cdp-lldp-collision", "both-cdp-policy-and-lldp-policy-are-configured-for-attach-entity-profile", 16)
prop._addConstant("enhanced-lacp-lag-creation-skipped", "enhanced-lacp-lag-policy-creation-skipped,-dvs-has-lacp-v1-enabled", 4096)
prop._addConstant("invalid-mcast-addr", "missing-multicast-address-for-vxlan-mode", 512)
prop._addConstant("invalid-port", "invalid-port-for-fabric-interface", 1024)
prop._addConstant("invalid-vxlan-ns-range", "vxlan-range-below-0x800000-is-not-valid-for-n1kv-ns-mode", 128)
prop._addConstant("missing-assoc-attEntP", "domain-is-missing-association-from-attach-entity-profile", 8)
prop._addConstant("missing-encap", "invalid-or-missing-encapsulation", 1)
prop._addConstant("missing-encapblk", "invalid-or-missing-encapsulation-blocks", 4)
prop._addConstant("missing-epg", "association-to-end-point-group-not-specified", 2)
prop._addConstant("missing-internal-vlan-blk", "missing-internal-vlan-encapsulation-blocks", 2048)
prop._addConstant("missing-ns-assoc", "invalid-or-missing-association-to-vlan-or-vxlan-namespace", 256)
prop._addConstant("multiple-cdp", "more-than-one-cdp-policy-found-for-attach-entity-profile", 64)
prop._addConstant("multiple-lldp", "more-than-one-lldp-policy-found-for-attach-entity-profile", 32)
prop._addConstant("none", "n/a", 0)
meta.props.add("configIssues", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "monPolDn", "monPolDn", 14212, PropCategory.REGULAR)
prop.label = "Monitoring policy attached to this observable object"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("monPolDn", prop)
prop = PropMeta("str", "name", "name", 6853, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.range = [(1, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "ownerKey", "ownerKey", 15232, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerKey", prop)
prop = PropMeta("str", "ownerTag", "ownerTag", 15233, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerTag", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "targetDscp", "targetDscp", 1625, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.defaultValue = 64
prop.defaultValueStr = "unspecified"
prop._addConstant("AF11", "af11-low-drop", 10)
prop._addConstant("AF12", "af12-medium-drop", 12)
prop._addConstant("AF13", "af13-high-drop", 14)
prop._addConstant("AF21", "af21-low-drop", 18)
prop._addConstant("AF22", "af22-medium-drop", 20)
prop._addConstant("AF23", "af23-high-drop", 22)
prop._addConstant("AF31", "af31-low-drop", 26)
prop._addConstant("AF32", "af32-medium-drop", 28)
prop._addConstant("AF33", "af33-high-drop", 30)
prop._addConstant("AF41", "af41-low-drop", 34)
prop._addConstant("AF42", "af42-medium-drop", 36)
prop._addConstant("AF43", "af43-high-drop", 38)
prop._addConstant("CS0", "cs0", 0)
prop._addConstant("CS1", "cs1", 8)
prop._addConstant("CS2", "cs2", 16)
prop._addConstant("CS3", "cs3", 24)
prop._addConstant("CS4", "cs4", 32)
prop._addConstant("CS5", "cs5", 40)
prop._addConstant("CS6", "cs6", 48)
prop._addConstant("CS7", "cs7", 56)
prop._addConstant("EF", "expedited-forwarding", 46)
prop._addConstant("VA", "voice-admit", 44)
prop._addConstant("unspecified", "unspecified", 64)
meta.props.add("targetDscp", prop)
meta.namingProps.append(getattr(meta.props, "name"))
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Path"
meta.deploymentQueryPaths.append(DeploymentPathMeta("ADomPToEthIf", "Interface", "cobra.model.l1.EthIf"))
def __init__(self, parentMoOrDn, name, markDirty=True, **creationProps):
namingVals = [name]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"bkhoward@live.com"
] | bkhoward@live.com |
20ac3af37c2096403f4ddadec0e36b4260828f98 | df59c7216f90b529ab54d0d0ec92d6b268701c36 | /tests/dice/operators/test_advantage_operator.py | c03eb749c17181e489b5884e9ddf98df024749c9 | [
"MIT"
] | permissive | extesla/dice-python | 98ddff658c56f4158e0829fe490f066fb108ba6c | d2ce0c59c5ff985759f18f280a82a8df79772daa | refs/heads/master | 2022-06-03T03:12:59.254841 | 2022-05-30T17:55:27 | 2022-05-30T17:55:27 | 52,057,843 | 0 | 1 | null | 2016-10-27T01:22:31 | 2016-02-19T03:14:00 | Python | UTF-8 | Python | false | false | 2,983 | py | # The MIT License (MIT)
#
# Copyright (c) 2016 Sean Quinn
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from dice.operators import Advantage
from dice.tokens import Dice
import pytest
def test_instantiate_advantage_operator():
operator = Advantage([5, 17])
assert operator.original_operands == ([5, 17],)
assert operator.operands == ([5, 17],)
def test_repr():
"""
Test that the string representation of the operator is what is
expected.
Given an instance of the Advantage operator on operands
When the method __repr__ is called
Then the result should be "Advantage"
"""
operator = Advantage([5, 17])
assert repr(operator) == "Advantage([5, 17])"
def test_advantage_function_when_choosing_from_empty_array():
operator = Advantage()
with pytest.raises(IndexError):
operator.function([])
def test_advantage_function_with_invalid_iterable():
operator = Advantage()
with pytest.raises(TypeError):
operator.function(1)
def test_advantage_function_with_no_iterable():
operator = Advantage()
with pytest.raises(TypeError):
operator.function(None)
def test_evaluate_advantage_with_single_value_in_scalar_array():
operator = Advantage([5, 17])
actual = operator.evaluate()
assert actual == 17
assert operator.result == 17
assert actual == operator.result
def test_evaluate_advantage_with_multiple_values_in_scalar_array():
operator = Advantage([13, 5, 17])
actual = operator.evaluate()
assert actual == 17
assert operator.result == 17
assert actual == operator.result
def test_evaluate_advantage_with_dice_token_value(mocker):
mock_random = mocker.patch("dice.tokens.mt_rand")
mock_random.side_effect = [5, 17]
dice_token = Dice(sides=20, rolls=2)
operator = Advantage(dice_token)
actual = operator.evaluate()
assert actual == 17
assert operator.result == 17
assert actual == operator.result
| [
"swquinn@gmail.com"
] | swquinn@gmail.com |
44f758bb7c8d4183146ac4198ba226b5ea1ab1a6 | ea515ab67b832dad3a9b69bef723bd9d918395e7 | /03_Implementacao/DataBase/true_or_false_question_while_and_for_cicles/question/version_2/answers_program.py | bce77d50a8726979edc4b446b00a9c0313e7c11d | [] | no_license | projeto-exercicios/Exercicios-Python-de-correccao-automatica | b52be3211e75d97cb55b6cdccdaa1d9f9d84f65b | a7c80ea2bec33296a3c2fbe4901ca509df4b1be6 | refs/heads/master | 2022-12-13T15:53:59.283232 | 2020-09-20T21:25:57 | 2020-09-20T21:25:57 | 295,470,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | answer_1_true = while_cicle(48)
answer_2_true = p
answer_3_true = print_indexes(69)
print(answer_1_true)
print(answer_2_true)
print(answer_3_true)
| [
"ruski@milo.com"
] | ruski@milo.com |
08e3afd279cb019ded71e39480f949b6f845e6d0 | 78eac5efb55400a07d6fc2a2893002e8bc25b54f | /05. The FOR loop.py | f3a1c872f1d11e8a3bd44da169b1f0a725ac98da | [
"MIT"
] | permissive | martaksx/UdemyPythonForDataScience | fc5f76a4faf2fefe7c8a6542a025366609c8f62b | 32069ca5fcc3532afbc021ecaf6459a2ec6214af | refs/heads/master | 2020-03-27T09:30:17.803314 | 2018-08-27T21:25:26 | 2018-08-27T21:25:26 | 146,346,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py |
# coding: utf-8
# In[1]:
counter = 1
while counter < 12:
print (counter)
counter = counter + 1
# ---
# In[2]:
for i in range(5):
print("Hello Python")
# ---
# In[3]:
range(5)
# In[4]:
list(range(5))
# In[5]:
for i in range(7):
print("Hello Python: ", i)
# In[6]:
#Another way
mylist = [10,100,1000]
# In[7]:
print (mylist)
# In[8]:
for jj in mylist:
print("jj is equal to: ", jj)
| [
"noreply@github.com"
] | noreply@github.com |
d6c6ae9b0e537df0cfeac2c3f9ea0d72304b2934 | 7f437365978a3e8701c81dfaa210cfb83666218e | /huffman_compressor.py | d02a66c9052670d1c169d20bb3bfa5e6bde2f14d | [] | no_license | haoccc/JPGCompression | 1ba0497c934615d9b949edf93dcef1585fbb9a52 | bab4c6c8224dd3379ea4dd9d17e6087e20a41a6b | refs/heads/master | 2023-03-16T11:51:09.122860 | 2020-07-28T03:32:54 | 2020-07-28T03:32:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,929 | py | """
Implementation of archivator using Huffman coding
works only with alphanumeric symbols, ASCII 0-255
"""
from collections import Counter
from queue import PriorityQueue
import os
class HuffmanNode:
def __init__(self, char, freq=0, left=None, right=None):
self.char = char
self.freq = freq
self.left = left
self.right = right
def __lt__(self, other):
return self.freq < other.freq
def encode(text):
"""Returns encoded string code with format [encoded_huffman_tree][extra_zeros_num][encoded_text]"""
frequencies = Counter(text)
queue = PriorityQueue()
code_table = {}
for char, f in frequencies.items():
queue.put(HuffmanNode(char, f))
# merge nodes
while queue.qsize() > 1:
l, r = queue.get(), queue.get()
queue.put(HuffmanNode(None, l.freq + r.freq, l, r))
huffman_tree = queue.get()
_fill_code_table(huffman_tree, "", code_table)
encoded_text_code = ""
for c in text:
encoded_text_code += code_table[c]
encoded_tree_code = _encode_huffman_tree(huffman_tree, "")
# add extra zeros, as in python it is not possible read
# file bit by bit (min byte) so extra zeros will be
# added automatically which cause a loss of information
num = 8 - (len(encoded_text_code) + len(encoded_tree_code)) % 8
if num != 0:
encoded_text_code = num * "0" + encoded_text_code
print(f"frequencies: {frequencies}")
print(f"encoded huffman tree code: {encoded_tree_code}")
print(f"encoded text code: {encoded_text_code}")
return f"{encoded_tree_code}{num:08b}{encoded_text_code}"
def decode(encoded_text):
"""Returns decoded string"""
encoded_text_ar = list(encoded_text)
encoded_tree = _decode_huffman_tree(encoded_text_ar)
# remove extra zeros
number_of_extra_0_bin = encoded_text_ar[:8]
encoded_text_ar = encoded_text_ar[8:]
number_of_extra_0 = int("".join(number_of_extra_0_bin), 2)
encoded_text_ar = encoded_text_ar[number_of_extra_0:]
# decode text
text = ""
current_node = encoded_tree
for char in encoded_text_ar:
current_node = current_node.left if char == '0' else current_node.right
if current_node.char is not None:
text += current_node.char
current_node = encoded_tree
return text
def decompress(input_path, output_path):
"""Save decoded text to output file"""
with open(input_path, "rb") as in_file, open(output_path, "w") as out_file:
encoded_text = ""
byte = in_file.read(1)
while len(byte) > 0:
encoded_text += f"{bin(ord(byte))[2:]:0>8}"
byte = in_file.read(1)
decoded_text = decode(encoded_text)
out_file.write(decoded_text)
def compress(input_path, output_path):
"""Save encoded text to output file"""
with open(input_path) as in_file, open(output_path, "wb") as out_file:
text = in_file.read()
encoded_text = encode(text)
b_arr = bytearray()
for i in range(0, len(encoded_text), 8):
b_arr.append(int(encoded_text[i:i+8], 2))
out_file.write(b_arr)
def _fill_code_table(node, code, code_table):
"""Fill code table, which has chars and corresponded codes"""
if node.char is not None:
code_table[node.char] = code
else:
_fill_code_table(node.left, code + "0", code_table)
_fill_code_table(node.right, code + "1", code_table)
def _encode_huffman_tree(node, tree_text):
"""Encode huffman tree to save it in the file"""
if node.char is not None:
tree_text += "1"
tree_text += f"{ord(node.char):08b}"
else:
tree_text += "0"
tree_text = _encode_huffman_tree(node.left, tree_text)
tree_text = _encode_huffman_tree(node.right, tree_text)
return tree_text
def _decode_huffman_tree(tree_code_ar):
"""Decoding huffman tree to be able to decode the encoded text"""
# need to delete each use bit as we don't know the length of it and
# can't separate it from the text code
code_bit = tree_code_ar[0]
del tree_code_ar[0]
if code_bit == "1":
char = ""
for _ in range(8):
char += tree_code_ar[0]
del tree_code_ar[0]
return HuffmanNode(chr(int(char, 2)))
return HuffmanNode(None, left=_decode_huffman_tree(tree_code_ar), right=_decode_huffman_tree(tree_code_ar))
def _print_ratio(input_path, output_path):
before_size = os.path.getsize(input_path)
after_size = os.path.getsize(output_path)
compression_percent = round(100 - after_size / before_size * 100, 1)
print(f"before: {before_size}bytes, after: {after_size}bytes, "
f"compression {compression_percent}%")
'''file_to_compress, decompressed, compressed = "image.txt", "decompressed.txt", "compressed.bin"
compress(file_to_compress, compressed)'''
| [
"noreply@github.com"
] | noreply@github.com |
be64af51a20cd47518f646958ef4f87f88915da1 | dbdea54c3fba47939a5f8a2a8135b0f2ade56cf6 | /LeetCode/pythonSols/Tree/binarySearchTreeIterator.py | fa1876d0c02a01a5e97fdcedd1087e6ce11ea576 | [] | no_license | abhitrip/scratchpad | 3bb76686616483f082f9120d1b82175d990a730b | 45e6ba66104bb43efcce39adc92a4904f50c605d | refs/heads/master | 2021-01-17T19:11:40.366951 | 2017-03-06T01:52:53 | 2017-03-06T01:52:53 | 65,786,746 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 937 | py | # Definition for a binary tree node
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class BSTIterator(object):
def __init__(self, root):
"""
:type root: TreeNode
"""
self.stk = []
while root!=None:
self.stk.append(root)
root = root.left
def hasNext(self):
"""
:rtype: bool
"""
return len(self.stk)!=0
def next(self):
"""
:rtype: int
"""
node = self.stk.pop()
if node.right!=None:
self.stk.append(node.right)
left = node.right.left
while left!=None:
self.stk.append(left)
left = left.left
return node.val
# Your BSTIterator will be called like this:
# i, v = BSTIterator(root), []
# while i.hasNext(): v.append(i.next())
| [
"atripath@eng.ucsd.edu"
] | atripath@eng.ucsd.edu |
196bd5b3ef54d44d683ca77d74119b504f386560 | 9481096eb3e6469a2190c94673e95484086e7e6b | /project8.py | 558b77e851c891d1f6898c3a7efc86079826f839 | [] | no_license | ArthurMelo9/100daysofCode | 806113d99e7a4579b620f9462e0dc2d7efa7d039 | 99440e3777ecb947b9eb8d2de22852bace965b93 | refs/heads/master | 2023-06-08T02:02:30.950615 | 2021-06-20T23:16:46 | 2021-06-20T23:16:46 | 378,754,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,796 | py | alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
direction = input("Type 'encode' to encrypt, type 'decode' to decrypt:\n")
text = input("Type your message:\n").lower()
shift = int(input("Type the shift number:\n"))
def encrypt(plain_text, shift_amount):
cipher_text = ""
for letter in plain_text:
position = alphabet.index(letter)
new_position = position + shift_amount
cipher_text += alphabet[new_position]
print(f"The encoded text is {cipher_text}")
#TODO-1: Create a different function called 'decrypt' that takes the 'text' and 'shift' as inputs.
def decrypt (plain_text, shift_amount):
decrypt_text=""
for letter in plain_text:
position= alphabet.index(letter)
new_position= position - shift_amount
decrypt_text += alphabet[new_position]
print(f"The decrypted code is {decrypt_text}")
#TODO-2: Inside the 'decrypt' function, shift each letter of the 'text' *backwards* in the alphabet by the shift amount and print the decrypted text.
#e.g.
#cipher_text = "mjqqt"
#shift = 5
#plain_text = "hello"
#print output: "The decoded text is hello"
#TODO-3: Check if the user wanted to encrypt or decrypt the message by checking the 'direction' variable. Then call the correct function based on that 'drection' variable. You should be able to test the code to encrypt *AND* decrypt a message.
if direction =="encode":
encrypt(plain_text=text, shift_amount=shift)
elif direction == "decode":
decrypt(plain_text
=text, shift_amount=shift)
else:
print("Enter 'encode' or 'decode' to encrypt or decrypt your message.") | [
"arthurneuro7@gmail.com"
] | arthurneuro7@gmail.com |
9fc3bf3f4eee04e15d6b0b09d93ef4d0144d9c7b | 5f0e9f5a33b1b768f95afe22ab426e185b913c69 | /app/migrations/0002_imagen_imagencolor.py | a8ad6b3445442744f9931980a0458b61db390db3 | [] | no_license | ericksulca/b-b2RGB | 2c59b29ec50b16d288b2cbb83b6d32a375b30fae | 927ce588523a6e3c20e94593c3e5eeea98cd4926 | refs/heads/main | 2023-07-30T16:28:50.165068 | 2021-09-24T23:22:45 | 2021-09-24T23:22:45 | 409,697,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | # Generated by Django 3.2.7 on 2021-09-24 17:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='imagen',
name='imagencolor',
field=models.ImageField(default='no-imagen.png', null=True, upload_to='', verbose_name='Fotografía a color'),
),
]
| [
"ejyp259@hotmail.com"
] | ejyp259@hotmail.com |
08215192ca88cabc81c9fd1342f9dfdf50821767 | 0ca811fefeba82b2a6ca8f6bf054aa2b7d150e0b | /code.py | db7b4958614cb7f7a439c38a4087df87d20c5550 | [] | no_license | PECNAS/Tokanomir | ba8b06c4ba25c2edafcabbe449ab5b24e021d2a4 | cd8e7c032b41c0a49660cf17111bcc7cc54010f2 | refs/heads/master | 2022-05-30T09:25:16.630157 | 2020-04-28T21:36:15 | 2020-04-28T21:36:15 | 259,051,695 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,066 | py | '''
Данная программа анализирует ТЕКСТОВУЮ базу данных с больными
Больные имеют пять параметров:
имя, класс, симптом, наличие прививки и дата поступления
Просто гусь
+-------------------------------------------------+
| ░░░░░▄▀▀▀▄░░░░░░░░░ |
| ▄███▀░◐░░░▌░░░░░░░ |
| ░░░░▌░░░░░▐░░░░░░░ |
| ░░░░▐░░░░░▐░░░░░░░ |
| ░░░░▌░░░░░▐▄▄░░░░░ |
| ░░░░▌░░░░▄▀▒▒▀▀▀▀▄ |
| ░░░▐░░░░▐▒▒▒▒▒▒▒▒▀▀▄ |
| ░░░▐░░░░▐▄▒▒▒▒▒▒▒▒▒▒▀▄ |
| ░░░░▀▄░░░░▀▄▒▒▒▒▒▒▒▒▒▒▀▄ |
| ░░░░░░▀▄▄▄▄▄█▄▄▄▄▄▄▄▄▄▄▄▀ |
| ░░░░░░░░░░░▌▌░▌▌░░░░░ |
| ░░░░░░░░░░░▌▌░▌▌░░░░░ |
| ░░░░░░░░░▄▄▌▌▄▌▌░░░░░ |
+-------------------------------------------------+
Выполненные бонусы:
—При запуске с аргументом -color заболевшие с прививкой выводятся зелёным цветом, без прививки — красным
—В самом начале программы создаётся новая база данных, с только ДЕЙСТВИТЕЛЬНЫМИ датами. Программа работает с новой базой данных
Добавлено:
—Отключение и включение расцветки в меню
—Поиск больных по двум параметрам(третий метод поиска)
Исправить:
—Коментарии <<<ИСПРАВЛЕНО>>>
—Добавить help. ОБЯЗАТЕЛЬНО! <<<ИСПРАВЛЕНО>>>
—Исправить olor <<<ИСПРАВЛЕНО>>>
—Убрать тестовый файл при заливе...
—Переделать под словарь(картинка dictionary_for_animals.png)
—Сделать метод split(";") для строк 80-85
—Исправить действительность дат
—Поменять termcolor на Escape выражение '/033[32mТЕКСТЕКСТЕКСТ[0m' для цвета <<<ИСПРАВЛЕНО>>>
—
'''
import sys
import argparse
from information.help import get_help
from datetime import datetime
from termcolor import colored
time_now = datetime.now() # записываем, сколько сейчас времени
color = False
parser = argparse.ArgumentParser() #создаём образ парсера
parser.add_argument('-c', '--color', action="store_true") # добавляем необязательный аргумент
parser.add_argument('-gh', '--get_help', action="store_true") # добавляем необязательный аргумент
args = parser.parse_args()
if args.color == "--color": # проверяем аргумент
color = True
elif args.get_help == True:
get_help()
print("Идёт сканирование базы данных, это может занять некоторое время")
database = open("database.txt", "r") # здесь мы открываем файл с параметром чтения
array = [i for i in database.readlines()] # записываю генератор для записи всего из файла базы данных в массив
names = [] # здесь я создаю массив для имен
types = [] # здесь я создаю массив для типа
diseases = [] # здесь я создаю массив для симптома
vaccination = [] # здесь я создаю массив для вакцинирования
arrival_date = [] # здесь я создаю массив для даты поступления
exceptions = [] # здесь я создаю массив для исключений. Тут будут находится индексы тех больных, дата которых недействительная
positive_vaccination = ["имеют прививку", "с прививкой", # здесь я создаю массив для слов, вариаций которых может быть множество
"вакцинированы", "наличие прививки", "прививка"]
negative_vaccination = ["без прививки", "не имеют прививки", # здесь я создаю массив для слов, вариаций которых может быть множество
"не вакцинированы"]
variations_diseases = ["симптом", "болезнь", "диагноз"]
def insert():
for db in range(len(array)): # с помощью цикла, который повториться ровно столько раз, какова длина массива
find_name = array[db].find(";") # записываю в переменную индекс того места, до которого нам нужно брать информацию
find_type = array[db].find(";", find_name + 1) # записываю в переменную индекс того места, до которого нам нужно брать информацию и прибавляю один для того, что бы не брать точку с запятой
find_diseases = array[db].find(";", find_type + 1) # записываю в переменную индекс того места, до которого нам нужно брать информацию и прибавляю один для того, что бы не брать точку с запятой
find_vaccination = array[db].find(";", find_diseases + 1)#записываю в переменную индекс того места,до которого нам нужно брать информацию и прибавляю один для того, что бы не брать точку с запятой
find_day = array[db].find(".") # записываю в переменную индекс точки, которая разделяет дату поступления на дни, месяца и года. Берём первую точку
find_month = array[db].rfind(".") # записываю в переменную индекс точки, которая разделяет дату поступления на дни, месяца и года. Берём вторую точку
names.append(array[db][:find_name]) # добавляю в массив имен имена
types.append(array[db][find_name + 1:find_type]) # добляю в массив типа тип
diseases.append(array[db][find_type + 1:find_diseases]) # добавляю в массив симптома симптом
vaccination.append(array[db][find_diseases + 1:find_vaccination]) # добавляю в массив вакцинирования вакцинирован ли
try: # ищем февральские дни
data = datetime(int(array[db][find_month + 1:-1]), int(array[db][find_day + 1:find_month]), int(array[db][find_vaccination + 1:find_day])) # в переменную дата записываем дату больного
if data <= time_now: # если дата действительная
arrival_date.append(array[db][find_vaccination + 1:-1]) # добавляю в массив даты поступления дату поступления
elif data > time_now: # если дата недействительная
exceptions.append(db) # добавляем индекс в массив с исключениями
except ValueError: # Если нашли неверный дни в феврале
exceptions.append(db) # добавляем индекс в массив с исключениями
def color_set(index): # функция определения цвета
if args.color == True:
if vaccination[index].lower() == "да": # если в массиве в срезе содержится да
return 33 # то возвращаем зелёный цвет
else: # если в массиве в срезе содержится нет
return 31 # то возвращаем красный цвет
else: # если аргумент -color не указан
return 0 # то ставим белый цвет
def first(): # первый метод поиска
def print_value(conclusion, val):
if conclusion.lower() == "имя":
print(f"\033[{color_set(val)}m{names[val].capitalize()}\033[0m") # вывожу имена и сортирую их атрибутом end="\t"
elif conclusion.lower() == "класс":
print(f"\033[{color_set(val)}m{types[val].capitalize()}\033[0m") # то же самое, что и в прошлый раз
elif conclusion.lower() in variations_diseases:
print(f"\033[{color_set(val)}m{diseases[val].capitalize()}\033[0m") # то же самое, что и в прошлый раз
elif conclusion.lower() in positive_vaccination:
print(f"\033[{color_set(val)}m{vaccination[val].capitalize()}\033[0m") # то же самое, что и в прошлый раз
elif conclusion.lower() == "дата":
print(f"\033[{color_set(val)}m{arrival_date[val].capitalize()}\033[0m") # то же самое, что и в прошлый раз
elif conclusion.lower() == "другой": # проверка для выбора другого номера
ask_first() # вызываем функцию
elif conclusion.lower() == "стоп": # проверяем на остановку
sys.exit() # заканчиваем исполнение прогрммы
elif conclusion.lower() == "всё": # проверяем на вывод всех показателей
print(f"\033[{color_set(val)}m{array[val].replace(';', ', ').capitalize()}\033[0m") # выводим все показатели с заменой точки с зпаятой на запятую
elif conclusion.lower() == "методы": # вызываем стартовое меню для смены методы поиска
choice() # вызываем функцию
else:
print("Введён неверный параметр!\nПожалуйста введите параметр из существующих.\n'Имя', 'класс', 'симптом', 'наличие прививки', 'дата'\nВв" +
"едите слово 'стоп', для остановки\nДля смены больного введите 'Другой'\nТак же можете использовать комманду 'всё' для вывода " +
"всей информации о больном\nКоммманда 'методы' позволит сменить метод поиска больных ") # это сообщение об ошибке
def ask_first():
val = input("Введите номер больного: ")
if val != "":
if val[0] in "0123456789": #если номер больного это цифра
try: # начинаем ловить исключения
val = int(val) # переменной val ставим тип integer
while True: # бесконечный цикл
if 0 <= val <= (len(names) - 1): # если номер больного введён правильно
conclusion = input("Введите параметр: ") # то спрашиваем желаемый параметр у пользователя
print_value(conclusion, val) # вызываем функцию
else: # если значение введено неверно
print("В нашей базе данных нет больного с таким номером!\nВсего заболевших " + str(len(names) - 1)) # выводим максимум больных и сообщение об ошибке
ask_first() # вызываем функцию заново
except ValueError: # если поймали исключение о вводе строки
print("Введён неверный тип данных") # то выводим сообщение об ошибке
ask_first() # и вызываем функцию заново
elif val == "стоп": # однако если val равен стоп
sys.exit() # останавливаем программу
elif val.lower() == "методы":
choice()
else:
print("Вы ввели неверный тип данных, нужно ввести нумерацию больного!\n") # Выдаём сообщение об ошибке
ask_first() # заново вызываем функцию
else:
print("Это поле является обязательным для ввода!")
ask_first()
ask_first() # запускаем функцию
def second(): # второй метод поиска
def find_all_with_parametr(val): # функция вывода всех совпадающих значение
count = 0 # создаём переменную счёта
for i in range(len(array)): # циклом пробегаемся по массиву столько раз, какова длина массива
if val in array[i]: # если введенное значение имеется в строке массива
print(f"\033[{color_set(i)}m{str(i) + ') ' + array[i].replace(';', ', ').capitalize()}\033[0m") # вывести строку и заменить все точки с запятой на запятый
count += 1
print("Всего насчитано " + str(count) + " больных с такими показателями")
ask_second() # вызываем функцию снова
def ask_second():
val = str(input("Введите значение, по которому хотите найти больных: ")) # принимаем значение от пользователя
if (val in names) or (val in types) or (val in diseases) or (val in vaccination) or (val in arrival_date): # проверка на то, есть ли данное значение в массивах
if val.lower()[0] in "абвгдеёжзийклмнопрстуфхцчшщъыьэюя1234567890.": # если запрос не содержит лишние символы
find_all_with_parametr(val) # тогда вызываем функцию
else:
print("Введённые вами данные не существуют!\n") # Выдаём сообщение об ошибке
find_all_with_parametr(val) # заново вызываем функцию
elif val.lower() in positive_vaccination:
find_all_with_parametr("Да")
elif val.lower() in negative_vaccination:
find_all_with_parametr("Нет")
elif val.lower() == "стоп":
sys.exit()
elif val.lower() == "методы":
choice()
else: # если значения в массиве нет
print("Извините, введённое вами значение не найдено ни в одном списке\nВыберите другое значение") # выдаём сообщение об ошибке
ask_second() # заново вызываем функцию
ask_second() # запуск
def third():
def check_with_two_parametrs(first_arg, second_arg):
count = 0
for c in array:
if first_arg in c and second_arg in c:
print(f'\033[{color_set(array.index(c))}m{c.replace(";", ", ")}\033[0m')
count += 1
print("Всего насчитано " + str(count) + " больных с такими показателями")
ask_third()
def ask_third():
val = str(input("Введите значение, по которому хотите найти больных: ")) # принимаем значение от пользователя
separator = val.find(",") # задаём переменной разделитель
space = separator + 1 # задаём переменной значение разделителя плюс один, что бы не брать в учёт запятую
if val[separator + 1] == " ": # если после запятой стоит пробел
space = separator + 2 # мы задаём перменной значение разделителя плюс два, что бы не брать запятую с пробелом
first_arg, second_arg = val[:separator], val[space:] # тут в действие идёт магия питона
if (first_arg in names) or (first_arg in types) or (first_arg in diseases) or (first_arg in vaccination) or (first_arg in arrival_date): # проверка на то, есть ли первое значение в массивах
if (second_arg in names) or (second_arg in types) or (second_arg in diseases) or (second_arg in vaccination) or (second_arg in arrival_date): # проверка на то, есть ли первое значение в массивах
if val.lower()[0] in "абвгдеёжзийклмнопрстуфхцчшщъыьэюя1234567890.": # если запрос не содержит лишние символы
check_with_two_parametrs(first_arg, second_arg) # тогда вызываем функцию
elif first_arg == "" or second_arg == "": # если один из один из параметров пойска был пустой
print("Вы обязательно должны ввести оба значения") # выводим сообщение об ошибке
ask_third() # заново вызываем функцию вопросы
else:
print("Введённые вами данные не существуют!\n") # Выдаём сообщение об ошибке
ask_third() # заново вызываем функцию
elif second_arg.lower() in positive_vaccination: # тут мы облегчаем синтаксис
check_with_two_parametrs(first_arg, "Да") # тут мы облегчаем синтаксис
elif second_arg.lower() in negative_vaccination:# тут мы облегчаем синтаксис
check_with_two_parametrs(first_arg, "Нет")# тут мы облегчаем синтаксис
else:
print("Второе введённое значение не найдено!\nВыберите другое значение") # выводим сообщение об ошибке
ask_third() # вызываем функцию выбора снова
elif first_arg == "" or second_arg == "": # если один из аргументов пустой
print("Вы обязательно должны ввести оба значения") # выводим сообщение об ошибке
ask_third() # вызываем функцию выбора снова
elif first_arg.lower() in positive_vaccination: # тут мы облегчаем синтаксис
check_with_two_parametrs("Да", second_arg) # тут мы облегчаем синтаксис
elif first_arg.lower() in negative_vaccination: # тут мы облегчаем синтаксис
check_with_two_parametrs("Нет", second_arg) # тут мы облегчаем синтаксис
elif val.lower() == "стоп": # проверяем на остановку программы
sys.exit() # завершаем программу
elif val.lower() == "методы": # проверяем на смену метода поиска
choice() # вызываем функцию поиска
else: # если значения в массиве нет
print("Первое введённое значение не найдено\nВыберите другое значение") # выдаём сообщение об ошибке
ask_third() # заново вызываем функцию
ask_third()
def choice(): # функция выбора методов поиска
select = input("Какую функцию поиска запустить(первая, вторая, третья)?\n") # спрашиваем пользователя
if select.lower() == "первая": # если первый метод, то вызываем функцию первого метода
first()
elif select.lower() == "вторая": # то же самое
second()
elif select.lower() == "третья":
print("Введите два значения через запятую")
third()
elif select.lower() == "стоп": # то же самое
sys.exit()
elif select.lower() == "отключить расцветку":
global color
color = False
print("Расцветка выключена")
choice()
elif select.lower() == "включить расцветку":
color = True
print("Расцветка включена")
choice()
else:
print("Функции с такиим номером не существует!\nВведите 'первая', 'вторая' или 'стоп'.\nДля отключения расцветки пропишите 'отключить расцветку'\nДля включения подсветки" +
" пропишите 'включить расцветку'") # выввести сообщение об ошибке
choice() # заново вызываем функцию
insert()# запуск программы, входная точка
new_database = open("new_database.txt", "w") # открываем открываем файл новой базы данных на ЗАПИСЬ
counter = len(array)
names = [] # здесь я обнуляю массив для имен, для того, что бы заполнить файл новыми значениями
types = [] # здесь я обнуляю массив для типа, для того, что бы заполнить файл новыми значениями
diseases = [] # здесь я обнуляю массив для симптома, для того, что бы заполнить файл новыми значениями
vaccination = [] # здесь я обнуляю массив для вакцинирования, для того, что бы заполнить файл новыми значениями
arrival_date = [] # здесь я обнуляю массив для даты поступления, для того, что бы заполнить файл новыми значениями
for new in range(counter):
if new in exceptions: # проверяем нет ли индекса в списке с исключениями
pass # если есть, то просто игнорируем его
else: # если его нет в списке с исключениями
base_data = array[new] # создаём переменную
new_database.write(base_data) # записываем в файл
new_database.close() # закрываем новую базу данных
new_database = open("new_database.txt", "r") # открываем новую базу данных на ЧТЕНИЕ
array = [n for n in new_database.readlines()] # переназначаем список на новую базу данных
new_database.close() # закрываем новую базу данных
database.close() # закрываем базу данных
insert() # функция заполнения списков
choice() # функция выбора методов поиска | [
"busovrm4@gmail.com"
] | busovrm4@gmail.com |
daa468830747de562fd243d8d431e43330be2146 | 823538725626b8b3da48d9407f703f72f8ee591f | /t628nn/implementation/cl_layers/__init__.py | 663d55c131bb95b51c6fcb765842323a23edc2a6 | [] | no_license | Denise-Li/t628nn | 63cd2aac869e48f0fddbbab6ff7f9ce74593d6f4 | d8ecd82bc6a42022974f7d5069ab96c8cf00c67a | refs/heads/master | 2020-09-05T05:03:11.147912 | 2019-11-09T22:07:10 | 2019-11-09T22:07:10 | 219,990,357 | 0 | 0 | null | 2019-11-07T14:11:56 | 2019-11-06T12:21:22 | null | UTF-8 | Python | false | false | 143 | py | from .MaxPoolLayer import MaxPoolLayer
from .ConvolutionalLayer import ConvolutionalLayer
from .FullyConnectedLayer import FullyConnectedLayer
| [
"2387729l@student.gla.ac.uk"
] | 2387729l@student.gla.ac.uk |
651744644f01fcbc9afa85b5da4940fa6350128a | de46832c4fdafae716906e6b25bc1121646b7d03 | /Python_Code/helloworld.py | 7a5e6e25bd55d8de86eca7c087667da1ed7743f0 | [] | no_license | Tessu/HarjoitusRep | da63a1db07277eb85054870b5075c7d932d4a133 | 974427f68cd0ab338741d9edf4bc7d563c81b5b3 | refs/heads/master | 2020-08-03T16:55:04.150888 | 2017-04-28T12:25:32 | 2017-04-28T12:25:32 | 73,542,162 | 0 | 0 | null | 2016-11-17T10:49:43 | 2016-11-12T08:54:47 | Python | UTF-8 | Python | false | false | 62 | py | print "Hello World!"
hello_world = "Hello"
print hello_world | [
"henrithessler@gmail.com"
] | henrithessler@gmail.com |
8aa4fdca170152b6545b4c99a900518c6cd068a9 | b892b3141219973088a9642f830db5284c20471a | /vitaa/solver/__init__.py | 613306339f9cf287c9ff2b469fc0f8e0f7dd719c | [] | no_license | Galaxfy/ViTAA | 60f43c120db48d2824e7d8dbc3614e2e1c8f0cf2 | 0bd7638f07131035ba88c7fb7e115feebc57fd2f | refs/heads/master | 2023-03-24T08:39:41.482237 | 2020-08-27T15:18:14 | 2020-08-27T15:18:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from .build import make_optimizer
from .build import make_lr_scheduler
from .lr_scheduler import LRSchedulerWithWarmup
| [
"wangzhewz@buaa.edu.cn"
] | wangzhewz@buaa.edu.cn |
33ab37d36bf060e9503eccc0a0f4e85f69338e95 | 4b27d461266c52e8c8855728b572afa9b6db4be3 | /mysite/settings.py | 77398e8ce7a9a5829481a4faa4dde3b3e02c2d71 | [] | no_license | Netekss/Django-shop-manager | 88bfb7e5507b9ca2077abc12c216f2f337966806 | b35e6f37d7f9bd699ccb7a27af34e90f6d80860b | refs/heads/master | 2023-02-28T01:37:38.754717 | 2021-01-26T09:45:04 | 2021-01-26T09:45:04 | 315,080,285 | 1 | 2 | null | 2021-01-26T09:45:05 | 2020-11-22T16:22:32 | HTML | UTF-8 | Python | false | false | 3,115 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i*vwr^pss#n06ljg8^&2ibetq)78+4@xz9^w$7qd-i7y)=x&oy'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'warehouse',
'order',
'owner',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Warsaw'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"Jakubnetkowski29@gmail.com"
] | Jakubnetkowski29@gmail.com |
58bf69943a02a438a32fc8d8965ddc47fcd1eb4e | ad6ea02c93cea570d9487f1f8b99b00acabe6abb | /docs/architecture/build/architecture_one_shard.py | dc785906ba7cf0367b72e796c5d5494c753bbe98 | [] | permissive | talview/jitsi-deployment | 47942ca3910cfda236ea0eface49b24496057ade | a2ddd8639bca3e4c812ce283e82551d781952cbe | refs/heads/develop | 2023-04-29T03:23:19.251191 | 2020-06-15T12:29:23 | 2020-06-15T12:29:23 | 367,215,735 | 0 | 0 | MIT | 2021-05-14T01:18:38 | 2021-05-14T01:18:37 | null | UTF-8 | Python | false | false | 2,349 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from diagrams import Diagram, Cluster
from diagrams.custom import Custom
from diagrams.k8s.clusterconfig import HPA
from diagrams.k8s.compute import Deployment, Pod, StatefulSet
from diagrams.k8s.network import Ingress, Service
globe_img = "resources/globe.png"
graph_attr = {
"pad": "0.5"
}
with Diagram(filename="jitsi_meet_one_shard", direction='TB', show=False, outformat='png', graph_attr=graph_attr):
with Cluster("Conference 1"):
users_1 = [Custom("user", globe_img) for _ in range(3)]
with Cluster("Conference 2"):
users_2 = [Custom("user", globe_img) for _ in range(2)]
all_users = Custom("all users", globe_img)
with Cluster("Namespace 'jitsi'"):
n_shards = 1
n_haproxy = 2
haproxy_sts = StatefulSet("haproxy")
haproxy_pods = [Pod(f"haproxy-{j}") for j in range(n_haproxy)]
haproxy_sts >> haproxy_pods
web_service = Service("web")
ingress = Ingress("jitsi.messenger.schule")
ingress >> Service("haproxy") >> haproxy_pods >> web_service
for k in range(n_shards):
with Cluster(f"Shard-{k}"):
web_pod = Pod(f"shard-{k}-web")
prosody_pod = Pod(f"shard-{k}-prosody")
jicofo_pod = Pod(f"shard-{k}-jicofo")
Deployment(f"shard-{k}-prosody") >> prosody_pod
Deployment(f"shard-{k}-jicofo") >> jicofo_pod
web_service >> web_pod
prosody_service = Service(f"shard-{k}-prosody")
prosody_service >> prosody_pod
prosody_service << web_pod
prosody_service << jicofo_pod
n_jvbs = 3
with Cluster(f"Jitsi Videobridge Shard-{k}"):
jvb_pods = [Pod(f"shard-{k}-jvb-{i}") for i in range(n_jvbs)]
jvb_services = [Service(f"shard-{k}-jvb-{i}") for i in range(n_jvbs)]
[jvb_services[i] >> jvb_pods[i] >> prosody_service for i in range(n_jvbs)]
jvb_pods << StatefulSet(f"shard-{k}-jvb") << HPA(f"shard-{k}-hpa")
if k == 0:
users_1 >> jvb_services[0]
users_2 >> jvb_services[1]
all_users >> ingress
| [
"maximilian.kertel@woodmark.de"
] | maximilian.kertel@woodmark.de |
559142186ed45fbbaffffb507ad1b88430ecdd8f | 74b1cc170d107dc5fd48a8f01b90931f61c0c58a | /attrib.py | 444c1be006c0bb7dd53ef3bb75391d126d6648c6 | [] | no_license | narenaryan/Python-GIS | b980a7de044d9c34a2c2946576070ebc073969ac | b45be225a5e25b6fd2a4bb25f7527b3dab8e28c1 | refs/heads/master | 2020-08-27T05:57:32.362519 | 2014-12-07T13:28:28 | 2014-12-07T13:28:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | #Get attributes of a state according to Feature no
from proj1 import layer_feature_name,shapefile
layer = shapefile.GetLayer(0)
maps = layer_feature_name(layer)
for i in maps.items():
print 'Feature ',i[0],': ',i[1]
choice = int(raw_input("Enter Feature No: "))
print '\nFeature %d has following attributes'%choice
for k,v in layer.GetFeature(choice).items().items():
s = "||||%s||||%s||||"%(k,v)
print '\n',s
print len(s)*'-'
feature = layer.GetFeature(choice)
geometry = feature.GetGeometryRef()
name = geometry.GetGeometryName()
print "\nFeature's geometry data consists of a %s" % name
| [
"narenarya@live.com"
] | narenarya@live.com |
1171adb7fde817ce921d1b7439dc217dbbc905c3 | 74992d8406107d0ebae68813660f8082dd50bf21 | /users/urls.py | 5426c0a8f31b2461d8628e86b8513ee97458fc10 | [] | no_license | Ethan009/python-Learning-Log | a36896c9ac66bcbe43e9f1e8d2c88c16853216e4 | 36e16f75c4990d6df2282d41daa333dd02dc979d | refs/heads/master | 2020-04-08T11:18:41.883959 | 2018-11-27T08:33:20 | 2018-11-27T08:33:20 | 159,301,023 | 1 | 1 | null | 2019-01-19T06:10:58 | 2018-11-27T08:26:09 | Python | UTF-8 | Python | false | false | 309 | py | from django.conf.urls import url
from django.contrib.auth.views import login
from . import views
urlpatterns = [
url(r'^login/$',login,{'template_name' : 'users/login.html'} , name='login'),
url(r'^logout/$',views.logout_view,name='logout'),
url(r'^register/$',views.register,name='register'),
] | [
"ethan.lv@feixitek.com"
] | ethan.lv@feixitek.com |
3a327dde0a2902cdb8b0486ca2a1b70db57f920e | 05e65c5057cc07a3abbbb4c5fe3bb1b9ee70ea0a | /main.py | f147d00de6cd6a15ed242d659ed9209e624a9ceb | [] | no_license | VProgramMist/sshunter | a216edbbbb7ee957fd580c2f91329b1c933f6879 | 89a699d4023817f9c2285ff9c5a2717d22a54524 | refs/heads/main | 2023-05-09T02:24:29.870130 | 2021-05-22T16:12:39 | 2021-05-22T16:12:39 | 369,852,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | from flask_login import login_required, current_user
from flask import Blueprint, render_template, redirect
main = Blueprint('main', __name__)
@main.route('/')
def index():
return redirect('signup')
@main.route('/profile')
@login_required
def profile():
return render_template('auth/profile.html', name=current_user.name)
| [
"noreply@github.com"
] | noreply@github.com |
c4c3379cb706b2d7acd0e06406bc415238457867 | 8b08da919a615acf07f19ff835952aed6ad3b897 | /ml_hw2/bin/f2py | 64b827dd5ed4221a4ba4b2ff63e3e678696cd2d8 | [] | no_license | shailchokshi1992/NaiveBayes | ade7722050d8b2abb65f87fdcd6e4a174a9708a9 | 06bf338c64a8d0623a738c9e519f1cce970af31b | refs/heads/master | 2022-12-12T07:10:09.915762 | 2018-02-02T17:49:59 | 2018-02-02T17:49:59 | 108,792,943 | 0 | 1 | null | 2022-11-28T07:29:59 | 2017-10-30T02:39:17 | Python | UTF-8 | Python | false | false | 786 | #!/home/shail/Desktop/ML/HW2/ml_hw2/bin/python
# See http://cens.ioc.ee/projects/f2py2e/
from __future__ import division, print_function
import os
import sys
for mode in ["g3-numpy", "2e-numeric", "2e-numarray", "2e-numpy"]:
try:
i = sys.argv.index("--" + mode)
del sys.argv[i]
break
except ValueError:
pass
os.environ["NO_SCIPY_IMPORT"] = "f2py"
if mode == "g3-numpy":
sys.stderr.write("G3 f2py support is not implemented, yet.\\n")
sys.exit(1)
elif mode == "2e-numeric":
from f2py2e import main
elif mode == "2e-numarray":
sys.argv.append("-DNUMARRAY")
from f2py2e import main
elif mode == "2e-numpy":
from numpy.f2py import main
else:
sys.stderr.write("Unknown mode: " + repr(mode) + "\\n")
sys.exit(1)
main()
| [
"chokshishail@gmail.com"
] | chokshishail@gmail.com | |
9bf8c88b2d724bdddded87831f0850574c0aa119 | 81f777cd72ce7753d1a292a147c3fce694560524 | /fib.py | 0fe888e3a6ba8d916b3a177ae96c2c189ce4915c | [] | no_license | ajuse/python3_learn_note | 1a9d2e0aaa2b6a9eb4dcc2927bf7b5ee59f50e99 | cacc9500699efd45cedf8148f37a5e9d670d8c96 | refs/heads/master | 2020-04-15T12:33:44.078132 | 2019-06-10T11:53:33 | 2019-06-10T11:53:33 | 164,679,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | # coding=utf-8
def fib(num):
result = []
a, b = 0, 1
while num > 0:
result.append(a)
a, b = b, a + b
num = num - 1
print(result)
if __name__ == "__main__":
fib(15)
| [
"luoyunfu.1218@gmail.com"
] | luoyunfu.1218@gmail.com |
5c7e955a24c163cb331b64dfbc66e5db51d8b25a | 3b334e9ac96ba3b975f427e84d2f981cacd884ff | /common/menu.py | 9fd040c06646885131581851131788ed0df8dec3 | [] | no_license | HminiL/flask-madird-titanic-docker | 38a4d797501a252dbe1579e9e53e6f70f3ffc0a9 | 3670792f41427d22f5f2e10e4f7acbe8fa86b845 | refs/heads/main | 2023-08-11T03:41:33.002213 | 2021-09-24T03:25:04 | 2021-09-24T03:25:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | def print_menu(ls):
t = ''
for i, j in enumerate(ls):
t += str(i) + '-' + j + '\t'
return int(input(t)) | [
"hmin10580@gmail.com"
] | hmin10580@gmail.com |
fd0eeacd7c197199c2a694868d6d5ea34ded1d6a | 6b5709ef2b69047c4a2e3c58a1fed6845bd67111 | /leet_code/10_regular_expression_matching/solutions.py | ba77b7802836c7a4052afbaa4df50df0ac680e23 | [] | no_license | aasawaree2/leetcode_python | 4fd0184a7469ff598578431a82a493b2aeaa5ab2 | 28c2b0c824ac1c61c115544c12e796574a9b5c48 | refs/heads/master | 2020-04-19T17:59:47.281745 | 2019-07-22T21:15:18 | 2019-07-22T21:15:18 | 168,350,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | class Solution:
pass
def main():
s = Solution()
if __name__ == '__main__':
main()
| [
"aasawaree.21090@gmail.com"
] | aasawaree.21090@gmail.com |
a5aede7a61d80ba0b1cb090a5132fab906576547 | 713a2579e8474d181e3c86c79e618accbd8fe75b | /UdemyPythonCertCourse/Machines/vehicle_stuff.py | a2889010325b802adb430f8546bba003bf09d8f4 | [] | no_license | shulme801/Python101 | a3efd4c8577a35697acd5328fa55528227351bcf | 3480773f2ab2d4f944e48bd669be2156890b5e28 | refs/heads/master | 2023-03-11T20:51:10.745992 | 2021-02-13T17:08:29 | 2021-02-13T17:08:29 | 198,089,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 613 | py | class Vehicle:
# this variable should not be changed outside of the class specification.
# this is a class attribut
vehicle_counter = 0
def __init__ (self, body_type, make):
self.vehicle_body = body_type
self.vehicle_make = make
Vehicle.vehicle_counter += 1
def get_vehicle_count(self):
return Vehicle.vehicle_counter
def drive(self):
print("Vehicle driving...")
class Truck(Vehicle):
def drive(self):
print("truck driving...")
class Motorcycle(Vehicle):
def drive(self):
print("Motorcycle driving very fast...")
| [
"shulme801@gmail.com"
] | shulme801@gmail.com |
fd8fd1f9cb3b2413c38a13369c6f285a47e1db9c | e63b09d912f15d753386426ef1467a665fb84276 | /Lab_Asmt_10/Source Code/RetrainInceptionFinalLayer/label_image.py | 84ebefc43ceae37ffcb09802c44b0e0a3c0debe5 | [] | no_license | Lavakumar90/BigDataApplications | 8d7e12c76798d071fbe8ffe56b4530fad522c0de | a884418c223ecfb1940a47d13c66a21aeba11b7b | refs/heads/master | 2020-04-05T12:12:37.266566 | 2017-09-26T01:57:38 | 2017-09-26T01:57:38 | 81,053,633 | 0 | 0 | null | 2017-07-28T15:01:49 | 2017-02-06T06:07:10 | null | UTF-8 | Python | false | false | 1,175 | py | import tensorflow as tf, sys
#image_path = sys.argv[1]
image_path = 'data/car_photos/sports/images (1).jpeg'
# image_path = '676728-bigthumbnail.jpg'
# Read in the image_data
image_data = tf.gfile.FastGFile(image_path, 'rb').read()
# Loads label file, strips off carriage return
label_lines = [line.rstrip() for line
in tf.gfile.GFile("data/output_labels.txt")]
# Unpersists graph from file
with tf.gfile.FastGFile("data/output_graph.pb", 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
with tf.Session() as sess:
# Feed the image_data as input to the graph and get first prediction
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
predictions = sess.run(softmax_tensor, \
{'DecodeJpeg/contents:0': image_data})
# Sort to show labels of first prediction in order of confidence
top_k = predictions[0].argsort()[-len(predictions[0]):][::-1]
for node_id in top_k:
human_string = label_lines[node_id]
score = predictions[0][node_id]
print('%s (score = %.5f)' % (human_string, score)) | [
"lavasurparaju@gmail.com"
] | lavasurparaju@gmail.com |
d22510d282ed3e0b33f8d3e501117b4b8527cca0 | 91438802ee114b2fb945aae4105a17993dd6953d | /build/learning_ros_noetic/Part_5/ur10_robot/ur_traj_client/catkin_generated/pkg.installspace.context.pc.py | 4807c4137df7658f74a42609d61315e95299f603 | [] | no_license | AlexLam616/Baxter-robot | 3a4cef31fe46da0fdb23c0e3b5808d84b412d037 | d10fdcd35f29427ca14bb75f14fa9c64af3b028c | refs/heads/master | 2023-05-12T01:25:56.454549 | 2021-05-25T02:02:09 | 2021-05-25T02:02:09 | 367,070,028 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;actionlib;trajectory_msgs;control_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "ur_traj_client"
PROJECT_SPACE_DIR = "/home/alex/workspace/install"
PROJECT_VERSION = "0.0.0"
| [
"1155135145@link.cuhk.edu.hk"
] | 1155135145@link.cuhk.edu.hk |
7e11aa95bc1e4a542925e169fb76e98a6c626988 | 35252c5ccf86022e4b08ea40dee1af2d68149687 | /inventory_management/urls.py | 776ebbfaf7b08bae038f93eae2074e2d2d44a4dd | [] | no_license | Coderknight439/inventory_management | e44ea1f091c9900c032c0a0df2ede6bf86759893 | f990c0af22b213b6517e34ea051c7db408f1c2be | refs/heads/master | 2023-07-29T01:49:49.112962 | 2021-09-17T22:29:41 | 2021-09-17T22:29:41 | 406,515,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,368 | py | """inventory_managemnt URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
# path('admin/', admin.site.urls),
path('', include('home.urls')),
path('inventories/', include('inventories.urls')),
path('products/', include('products.urls')),
path('vendors/', include('vendor.urls')),
path('accounts/', include('django.contrib.auth.urls')),
path('purchase_order/', include('purchase_orders.urls')),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
admin.site.site_header = "Cosmetic Shop Admin"
admin.site.site_title = "Cosmetic Shop Admin Site"
admin.site.index_title = "Cosmetic Shop Admin" | [
"mahadi.hasan@divine-it.net"
] | mahadi.hasan@divine-it.net |
e92860ed96f4a2b3dea5c060bfdeb1f66ed08a37 | 5f1083c23f5163ad274d7690a597cb995ca88ec3 | /application/api/stage_regularity_paint_api.py | bb85876bcc779fc2336eced26ebf55b05957e460 | [] | no_license | lvwanyou/Wisdom_Mattress | f7af4236a4afe4d42651ae4dc840779a8732fb62 | abf92d6ba2791e768d6b59d594c4af652d76fe10 | refs/heads/master | 2021-09-10T08:35:58.000379 | 2018-03-23T02:03:42 | 2018-03-23T02:03:42 | 116,142,977 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,542 | py | # -*- coding: utf-8 -*-
import falcon
import json
from application.analyser import Analyser
from application.analysis.stage_regularity_analyser import StageRegularityAnalyser
class StageRegularityPaintApi:
def __init__(self, db):
self.db = db
self.analyser = Analyser(self.db)
self.stage_regularity_analyser = StageRegularityAnalyser(self.db)
def on_get(self, req, resp, user_id, date):
"""
暂时不将作息规律性写入数据库中
# data_from_db = self.db.sleep_phase[user_id].find_one({'_id': date})
# if data_from_db is None or data_from_db['ver'] != application.arith.SLEEP_STAGE_ALGORI_VERSION:
# 判断是否是读取数据库缓存还是直接进行计算。
# self.analyser.analyse(user_id, date) # analyser 进行分析,然后就分析的数据写入到数据库中去
# data_from_db = self.db.sleep_phase[user_id].find_one({'_id': date})
# sleep_stages = data_from_db.get('data', [])
"""
# 得到6:00-6:00 of the next day
sleep_stages = self.stage_regularity_analyser.calc_sleep_regularity(user_id, date)
sleep_stages_result = self.stage_regularity_analyser.translate_sleep_stages(sleep_stages)
for item in sleep_stages_result:
item['time'] = item['time'].isoformat()
# print(item['time'] + " " + item['state'])
result = {'result': sleep_stages_result}
resp.body = json.dumps(result)
resp.status = falcon.HTTP_200
| [
"lvwanyou@163.com"
] | lvwanyou@163.com |
e7d94e462e7b2e3fde8bb0883b5a7eeb7330bbdd | 8ad9c8b4c7c888482c743bf05007a5611c49ad75 | /utils/augmentation.py | c2c9d36193dc5626133645f08b1dd064389be03e | [
"MIT"
] | permissive | Papyrus-Analysis/mcrnn-pytorch | 9083f13cc94466daadbfbf6bba5ca03a4a059f4b | 6624eebe128770df5fa71235fbf0c6677f2d3b51 | refs/heads/main | 2023-05-24T08:54:38.338493 | 2021-05-20T20:58:29 | 2021-05-20T20:58:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,884 | py | import cv2
import numpy as np
from imgaug import augmenters as iaa
from scipy.interpolate import griddata
import sys
INTERPOLATION = {
"linear": cv2.INTER_LINEAR,
"cubic": cv2.INTER_CUBIC
}
class GridDistortion:
def __init__(self, prob=0.3):
self.prob = prob
def __call__(self, img):
should_transform = np.random.choice(np.arange(0, 2), p=[1 - self.prob, self.prob])
img = np.array(img)
if should_transform:
return warp_image(img)
return img
def warp_image(img, random_state=None, **kwargs):
if random_state is None:
random_state = np.random.RandomState()
w_mesh_interval = kwargs.get('w_mesh_interval', 12)
w_mesh_std = kwargs.get('w_mesh_std', 1.5)
h_mesh_interval = kwargs.get('h_mesh_interval', 12)
h_mesh_std = kwargs.get('h_mesh_std', 1.5)
interpolation_method = kwargs.get('interpolation', 'linear')
h, w = img.shape[:2]
if kwargs.get("fit_interval_to_image", True):
# Change interval so it fits the image size
w_ratio = w / float(w_mesh_interval)
h_ratio = h / float(h_mesh_interval)
w_ratio = max(1, round(w_ratio))
h_ratio = max(1, round(h_ratio))
w_mesh_interval = w / w_ratio
h_mesh_interval = h / h_ratio
############################################
# Get control points
source = np.mgrid[0:h+h_mesh_interval:h_mesh_interval, 0:w+w_mesh_interval:w_mesh_interval]
source = source.transpose(1,2,0).reshape(-1,2)
if kwargs.get("draw_grid_lines", False):
if len(img.shape) == 2:
color = 0
else:
color = np.array([0,0,255])
for s in source:
img[int(s[0]):int(s[0])+1,:] = color
img[:,int(s[1]):int(s[1])+1] = color
# Perturb source control points
destination = source.copy()
source_shape = source.shape[:1]
destination[:,0] = destination[:,0] + random_state.normal(0.0, h_mesh_std, size=source_shape)
destination[:,1] = destination[:,1] + random_state.normal(0.0, w_mesh_std, size=source_shape)
# Warp image
grid_x, grid_y = np.mgrid[0:h, 0:w]
grid_z = griddata(destination, source, (grid_x, grid_y), method=interpolation_method).astype(np.float32)
map_x = grid_z[:,:,1]
map_y = grid_z[:,:,0]
warped = cv2.remap(img, map_x, map_y, INTERPOLATION[interpolation_method], borderValue=(255,255,255))
return warped
class ImgAugTransform:
def __init__(self):
self.aug = iaa.Sequential([
iaa.Sometimes(0.35, iaa.GaussianBlur(sigma=(0, 1.5))),
iaa.Sometimes(0.35,
iaa.OneOf([iaa.Dropout(p=(0, 0.05)),
iaa.CoarseDropout(0, size_percent=0.05)])),
])
def __call__(self, img):
img = np.array(img)
return self.aug.augment_image(img)
| [
"glmanhtu@gmail.com"
] | glmanhtu@gmail.com |
2f819d9b7131ebb5ab3ba5de2b16433c41ef6657 | da7a893f0dc9c130b5f8c29d4875e7c5d98ac64f | /code-slides/0019-fib-more-fast-examples.py | 8dbdcdff07628e4544477b3860838a7d9f952cf8 | [] | no_license | py-yyc/decorators | a489d89869582a9127a5272e9342b8131ad91fe3 | bd7c65b78b3f00cf8da216eab945f3ef26c1b2a8 | refs/heads/master | 2020-06-20T18:29:59.884497 | 2016-02-23T21:48:09 | 2016-02-23T21:48:09 | 52,392,195 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 906 | py | from __future__ import print_function # noslide
## <h1>how decorators work</h1>
from time import time # noslide
from contextlib import contextmanager # noslide
@contextmanager # noslide
def timer(): # noslide
s = time() # noslide
yield # noslide
print("took {:.6f}s".format(time() - s)) # noslide
def memoize(fn): # noslide
cache = {} # noslide
def wrapper(*args): # noslide
try: # noslide
return cache[args] # noslide
except KeyError: # noslide
r = fn(*args) # noslide
cache[args] = r # noslide
return r # noslide
return wrapper # noslide
@memoize # noslide
def fib(x): # noslide
if x in [1, 2]: # noslide
return 1 # noslide
return fib(x - 1) + fib(x - 2) # noslide
with timer():
print("fib(100) =", fib(100))
with timer():
print("fib(200) =", fib(200))
## show-output
| [
"meejah@meejah.ca"
] | meejah@meejah.ca |
fef8619855d686a10de3b4cc6d72b631190df666 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_201/2282.py | f467a7480d13917624dc75ae91326fb1c6115b5b | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,362 | py | def rec_stall(n):
res = []
if n == 1: return stalls[1]
if n == 2: return stalls[2]
if n == 3: return stalls[3]
if n%2 == 0:
a = rec_stall(n/2)
b = rec_stall(n/2-1)
res.extend([[n/2-1,n/2]])
c = [list(x) for x in zip(a,b)]
c = [val for sublist in c for val in sublist]
res.extend(c)
res.extend([[0,0]])
return res
else:
a = rec_stall(n/2)
res.extend([[n/2,n/2]])
c = [list(x) for x in zip(a,a)]
c = [val for sublist in c for val in sublist]
res.extend(c)
res.extend([[0,0]])
return res
stalls = [0,0,0,0]
stalls[1] = [[0,0]]
stalls[2] = [[0,1],[0,0]]
stalls[3] = [[1,1],[0,0],[0,0]]
#stalls[4] = [[1,2],[0,1],[0,0],[0,0]]
#stalls[5] = [[2,2],[0,1],[0,1],[0,0],[0,0]]
#stalls[6] = [[2,3],[1,1],[0,1],[0,0],[0,0],[0,0]]
#print 1,rec_stall(1)
#print 2,rec_stall(2)
#print 3,rec_stall(3)
#print 4,rec_stall(4)
#print 5,rec_stall(5)
#print 6,rec_stall(6)
#print 7,rec_stall(7)
#print 8,rec_stall(8)
t = int(raw_input()) # read a line with a single integer
for i in xrange(1, t + 1):
n, m = [int(s) for s in raw_input().split(" ")] # read a list of integers, 2 in this case
if n == m:
print "Case #{}: {} {}".format(i, 0, 0)
continue
s = rec_stall(n)
#print "Case #{}: {} {}", i, s, n, m, max(s[m-1]), min(s[m-1])
print "Case #{}: {} {}".format(i, max(s[m-1]), min(s[m-1]))
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
4cc34e361b07ef53d5e150374d1db0d50e01b2b9 | 553eadc801cfc8b3aacd0004be274f871cfd6c2b | /rango/admin.py | b3f1528fbe6ec5db485f83a1b26263ea6f7eace5 | [] | no_license | subhro101/tango_with_django_project | 1b16551c23e58e6a217a06f0e4b72da568afa1cb | e9c20e3e39d45ea20c47003b7827512a48f55974 | refs/heads/master | 2023-06-24T11:38:05.351457 | 2021-07-30T13:06:42 | 2021-07-30T13:06:42 | 389,550,619 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | from django.contrib import admin
from rango.models import Category, Page
from rango.models import UserProfile
class PageAdmin(admin.ModelAdmin):
list_display = ('title', 'category', 'url')
class CategoryAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug':('name',)}
admin.site.register(Page, PageAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(UserProfile)
| [
"2601733P@student.gla.ac.uk"
] | 2601733P@student.gla.ac.uk |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.