seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
37750672638 | # coding=utf-8
from global_test_case import GlobalTestCase as TestCase
from ..models import Message, WriteItInstance, \
Moderation, Confirmation, OutboundMessage
from popit.models import Person
from django.core import mail
from subdomains.utils import reverse
import datetime
from mock import patch
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _
from django.conf import settings
from django.test.utils import override_settings
from django.utils.unittest import skip
class ModerationMessagesTestCase(TestCase):
def setUp(self):
super(ModerationMessagesTestCase, self).setUp()
self.writeitinstance1 = WriteItInstance.objects.get(id=1)
self.person1 = Person.objects.get(id=1)
self.private_message = Message.objects.create(
content='Content 1',
author_name='Felipe',
author_email="falvarez@votainteligente.cl",
subject='Subject 1',
public=False,
writeitinstance=self.writeitinstance1,
persons=[self.person1],
)
self.confirmation = Confirmation.objects.create(message=self.private_message)
self.owner = self.writeitinstance1.owner
self.owner.set_password('feroz')
self.owner.save()
def test_private_messages_confirmation_created_move_from_new_to_needs_moderation(self):
moderation, created = Moderation.objects.get_or_create(message=self.private_message)
self.private_message.recently_confirmated()
outbound_message_to_pedro = OutboundMessage.objects.get(message=self.private_message)
self.assertEquals(outbound_message_to_pedro.status, 'needmodera')
def test_private_message_is_not_accesible(self):
self.confirmation.confirmated_at = datetime.datetime.now()
self.confirmation.save()
self.private_message.confirmated = True
self.private_message.save()
url = self.private_message.get_absolute_url()
response = self.client.get(url)
self.assertEquals(response.status_code, 404)
def test_outbound_messages_of_a_confirmed_message_are_waiting_for_moderation(self):
# I need to do a get to the confirmation url
moderation, created = Moderation.objects.get_or_create(message=self.private_message)
url = reverse(
'confirm',
subdomain=self.private_message.writeitinstance.slug,
kwargs={
'slug': self.confirmation.key
},
)
self.client.get(url)
# this works proven somewhere else
outbound_message_to_pedro = OutboundMessage.objects.get(message=self.private_message)
self.assertEquals(outbound_message_to_pedro.status, 'needmodera')
def test_message_send_moderation_message(self):
# Let's have some longer message content so we can keep an eye on the text wrapping.
self.private_message.content = u'''A gaf fi dynnu sylw'r Prif Weinidog at y sefyllfa yn Ysbyty Penrhos Stanley yng Nghaergybi, lle mae un o'r ddwy ward wedi bod ar gau ers dros bythefnos erbyn hyn, oherwydd absenoldeb staff a diffyg staff wrth gefn, ac ni fydd y ward yn agor am bythefnos arall, tan 13 Ebrill—bron i dair wythnos a dweud y gwir?
A gaf i dynnu sylw'r Prif Weinidog hefyd at y sefyllfa yn Ysbyty Gwynedd yn ddiweddar, lle cadwyd etholwr i mi mewn ystafell storio dros nos wrth wella ar ôl llawdriniaeth, â’i declyn drip yn hongian oddi ar beg ar y wal, ac y rhoddwyd cloch bres Fictoraidd iddo i dynnu sylw’r nyrs. Mae'r nyrs yn gwneud gwaith gwych dan amgylchiadau anodd. A yw hynny'n swnio i'r Prif Weinidog fel GIG sydd ag adnoddau da ac yn cael ei reoli'n dda? Er bod gwleidyddiaeth cyni cyllidol yn gyfrifol am lawer o'r diffyg adnoddau, nid yw'n esgusodi camreolaeth y GIG gan Lywodraeth Cymru.'''
self.private_message.save()
moderation, created = Moderation.objects.get_or_create(message=self.private_message)
self.private_message.send_moderation_mail()
self.assertEquals(len(mail.outbox), 2)
moderation_mail = mail.outbox[1]
self.assertModerationMailSent(self.private_message, moderation_mail)
expected_from_email = self.private_message.writeitinstance.slug + "@" + settings.DEFAULT_FROM_DOMAIN
self.assertEquals(moderation_mail.from_email, expected_from_email)
def test_send_moderation_message_from_custom_connection(self):
'''If given a custom smtp config for its instance then
it sends the moderation mail with this custom config '''
config = self.private_message.writeitinstance.config
config.custom_from_domain = "custom.domain.cl"
config.email_host = 'cuttlefish.au.org'
config.email_host_password = 'f13r4'
config.email_host_user = 'fiera'
config.email_port = 25
config.email_use_tls = True
config.save()
moderation, created = Moderation.objects.get_or_create(message=self.private_message)
self.private_message.send_moderation_mail()
self.assertEquals(len(mail.outbox), 2)
moderation_mail = mail.outbox[1]
self.assertModerationMailSent(self.private_message, moderation_mail)
expected_from_email = self.private_message.writeitinstance.slug + "@" + config.custom_from_domain
self.assertEquals(moderation_mail.from_email, expected_from_email)
connection = moderation_mail.connection
self.assertEquals(connection.host, config.email_host)
self.assertEquals(connection.password, config.email_host_password)
self.assertEquals(connection.username, config.email_host_user)
self.assertEquals(connection.port, config.email_port)
self.assertEquals(connection.use_tls, config.email_use_tls)
def test_not_using_any_custom_config(self):
'''If not using any custom config the moderation
mail does not use that connection'''
moderation, created = Moderation.objects.get_or_create(message=self.private_message)
self.private_message.send_moderation_mail()
self.assertEquals(len(mail.outbox), 2)
moderation_mail = mail.outbox[1]
connection = moderation_mail.connection
self.assertFalse(hasattr(connection, 'host'))
self.assertFalse(hasattr(connection, 'password'))
self.assertFalse(hasattr(connection, 'username'))
self.assertFalse(hasattr(connection, 'port'))
self.assertFalse(hasattr(connection, 'use_tls'))
@override_settings(SEND_ALL_EMAILS_FROM_DEFAULT_FROM_EMAIL=True)
def test_moderation_sent_from_default_from_email(self):
'''Moderation is sent from default from email if specified'''
moderation, created = Moderation.objects.get_or_create(message=self.private_message)
self.private_message.send_moderation_mail()
moderation_mail = mail.outbox[1]
expected_from_email = settings.DEFAULT_FROM_EMAIL
self.assertEquals(moderation_mail.from_email, expected_from_email)
def test_create_a_moderation(self):
#I make sure that uuid.uuid1 is called and I get a sort of random key
with patch('uuid.uuid1') as string:
string.return_value.hex = 'oliwi'
message = Message.objects.create(
content='Content 1',
author_name='Felipe',
author_email="falvarez@votainteligente.cl",
subject='Fiera es una perra feroz',
public=False,
writeitinstance=self.writeitinstance1,
persons=[self.person1],
)
self.assertFalse(message.moderation is None)
self.assertEquals(message.moderation.key, 'oliwi')
string.assert_called()
# issue 114 found at https://github.com/ciudadanointeligente/write-it/issues/114
def test_send_mails_only_once(self):
with patch('nuntium.models.Message.send_moderation_mail') as send_moderation_mail:
self.writeitinstance1.config.moderation_needed_in_all_messages = True
self.writeitinstance1.config.save()
send_moderation_mail.return_value = None
message = Message.objects.create(
content='Content 1',
author_name='Felipe',
author_email="falvarez@votainteligente.cl",
subject='Fiera es una perra feroz',
public=False,
writeitinstance=self.writeitinstance1,
persons=[self.person1],
)
message.recently_confirmated()
# number_of_moderations = Moderation.objects.filter(message=message).count()
send_moderation_mail.assert_called_once_with()
def test_message_has_a_method_for_moderate(self):
self.confirmation.confirmated_at = datetime.datetime.now()
self.confirmation.save()
self.private_message.confirmated = True
self.private_message.save()
self.private_message.moderate()
outbound_message_to_pedro = OutboundMessage.objects.get(message=self.private_message)
self.assertTrue(self.private_message.moderated)
self.assertEquals(outbound_message_to_pedro.status, 'ready')
def test_message_that_has_not_been_confirmed_cannot_be_moderated(self):
# this message has not been confirmed
# and is private therefore requires moderation
message = Message.objects.create(
content='Content 1',
author_name='Felipe',
author_email="falvarez@votainteligente.cl",
subject='Fiera es una perra feroz',
public=False,
writeitinstance=self.writeitinstance1,
persons=[self.person1],
)
with self.assertRaises(ValidationError):
# this was taken from here
# http://stackoverflow.com/questions/8215653/using-a-context-manager-with-python-assertraises#8215739
try:
message.moderate()
except ValidationError as e:
self.assertEqual(e.message,
_('The message needs to be confirmated first',))
raise
self.assertFalse(message.moderated)
outbound_message_to_pedro = OutboundMessage.objects.get(message=message)
self.assertEquals(outbound_message_to_pedro.status, 'new')
def test_there_is_a_moderation_url_that_sets_the_message_to_ready(self):
self.client.login(username=self.owner.username, password='feroz')
self.confirmation.confirmated_at = datetime.datetime.now()
self.confirmation.save()
self.private_message.confirmated = True
self.private_message.save()
url = reverse('moderation_accept',
subdomain=self.private_message.writeitinstance.slug,
kwargs={
'slug': self.private_message.moderation.key
})
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'nuntium/moderation_accepted.html')
#private_message = Message.objects.get(id=self.private_message.id)
outbound_message_to_pedro = OutboundMessage.objects.get(message=self.private_message.id)
self.assertEquals(outbound_message_to_pedro.status, 'ready')
private_message = Message.objects.get(id=self.private_message.id)
self.assertTrue(private_message.moderated)
def test_moderation_get_success_url(self):
expected_url = reverse('moderation_accept',
self.private_message.writeitinstance.slug,
kwargs={
'slug': self.private_message.moderation.key
})
self.assertEquals(self.private_message.moderation.get_success_url(), expected_url)
def test_moderation_get_reject_url(self):
expected_url = reverse('moderation_rejected',
subdomain=self.private_message.writeitinstance.slug,
kwargs={
'slug': self.private_message.moderation.key
})
self.assertEquals(self.private_message.moderation.get_reject_url(), expected_url)
def test_there_is_a_reject_moderation_url_that_hides_the_message(self):
'''
This is the case when you proud owner of a writeitInstance
think that the private message should not go anywhere
and it should be hidden
'''
self.client.login(username=self.owner.username, password='feroz')
# Ok I'm going to make the message public
public_message = self.private_message
public_message.public = True
public_message.save()
url = reverse(
'moderation_rejected',
subdomain=public_message.writeitinstance.slug,
kwargs={
'slug': public_message.moderation.key
})
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'nuntium/moderation_rejected.html')
# If someone knows how to do the DoesNotExist or where to extend from
# I could do a self.assertRaises but I'm not taking any more time in this
message = Message.objects.get(id=public_message.id)
self.assertFalse(message.public)
self.assertTrue(message.moderated)
def test_when_moderation_needed_a_mail_for_its_owner_is_sent(self):
self.private_message.recently_confirmated()
# There should be two
# One is created for confirmation
# The other one is created for the moderation thing
self.assertEquals(len(mail.outbox), 2)
moderation_mail = mail.outbox[1]
# it is sent to the owner of the instance
self.assertEquals(moderation_mail.to[0], self.private_message.writeitinstance.owner.email)
self.assertTrue(self.private_message.content in moderation_mail.body)
self.assertTrue(self.private_message.subject in moderation_mail.body)
self.assertTrue(self.private_message.author_name in moderation_mail.body)
self.assertTrue(self.private_message.author_email in moderation_mail.body)
url_rejected = (reverse('moderation_rejected',
subdomain=self.private_message.writeitinstance.slug,
kwargs={'slug': self.private_message.moderation.key})
)
url_accept = (reverse('moderation_accept',
subdomain=self.private_message.writeitinstance.slug,
kwargs={'slug': self.private_message.moderation.key})
)
self.assertIn(url_rejected, moderation_mail.body)
self.assertIn(url_accept, moderation_mail.body)
def test_creates_automatically_a_moderation_when_a_private_message_is_created(self):
message = Message.objects.create(
content='Content 1',
author_name='Felipe',
author_email="falvarez@votainteligente.cl",
subject='Fiera es una perra feroz',
public=False,
writeitinstance=self.writeitinstance1,
persons=[self.person1],
)
self.assertFalse(message.moderation is None)
def test_a_moderation_does_not_change_its_key_on_save(self):
'''
I found that everytime I did resave a moderation
it key was regenerated
'''
previous_key = self.private_message.moderation.key
self.private_message.moderation.save()
moderation = Moderation.objects.get(message=self.private_message)
post_key = moderation.key
self.assertEquals(previous_key, post_key)
def test_moderates_method(self):
moderation = Moderation.objects.get(message=self.private_message)
moderation.success()
message = Message.objects.get(moderation=moderation)
self.assertTrue(message.moderated)
# this test is for the issue https://github.com/ciudadanointeligente/write-it/issues/186
@skip('Message creation is no longer in the instance detail view')
def test_confirmated_but_not_moderated_message_in_a_moderable_instance_is_in_needs_moderation_status(self):
self.writeitinstance1.config.moderation_needed_in_all_messages = True
self.writeitinstance1.config.save()
data = {
'author_email': u'falvarez@votainteligente.cl',
'author_name': u'feli',
'public': True,
'subject': u'Fiera no está',
'content': u'¿Dónde está Fiera Feroz? en la playa?',
'persons': [self.person1.id],
}
url = self.writeitinstance1.get_absolute_url()
self.client.post(url, data, follow=True)
message = Message.objects.get(
author_name="feli",
author_email="falvarez@votainteligente.cl",
subject="Fiera no está",
content='¿Dónde está Fiera Feroz? en la playa?')
confirmation = Confirmation.objects.get(message=message)
self.client.get(confirmation.get_absolute_url())
# one message to Pedro
outbound_message = OutboundMessage.objects.get(message=message)
# Here I have the bug!!!!!
self.assertEquals(outbound_message.status, 'needmodera')
# This one is the bug!!\
def test_non_authenticated_users_cant_accept_messages(self):
"""Moderation accept links require users to be logged in"""
self.confirmation.confirmated_at = datetime.datetime.now()
self.confirmation.save()
self.private_message.confirmated = True
self.private_message.save()
url = reverse('moderation_accept',
subdomain=self.private_message.writeitinstance.slug,
kwargs={
'slug': self.private_message.moderation.key
})
response = self.client.get(url)
self.assertEquals(response.status_code, 302)
outbound_message_to_pedro = OutboundMessage.objects.get(message=self.private_message.id)
self.assertEquals(outbound_message_to_pedro.status, 'new')
private_message = Message.objects.get(id=self.private_message.id)
self.assertFalse(private_message.moderated)
def test_non_authenticated_users_cant_reject_messages(self):
"""Moderation reject links require users to be logged in"""
self.confirmation.confirmated_at = datetime.datetime.now()
self.confirmation.save()
self.private_message.confirmated = True
self.private_message.save()
url = reverse('moderation_rejected',
subdomain=self.private_message.writeitinstance.slug,
kwargs={
'slug': self.private_message.moderation.key
})
response = self.client.get(url)
self.assertEquals(response.status_code, 302)
outbound_message_to_pedro = OutboundMessage.objects.get(message=self.private_message.id)
self.assertEquals(outbound_message_to_pedro.status, 'new')
private_message = Message.objects.get(id=self.private_message.id)
self.assertFalse(private_message.moderated)
| ciudadanointeligente/write-it | nuntium/tests/moderation_messages_test.py | moderation_messages_test.py | py | 19,167 | python | en | code | 38 | github-code | 36 |
42578063251 | ''' This module contains TextForWeights class which is a text widget
used for displaying and editing weight restrictions.
Attributes:
TEXT_WIDTH_VAL (int): width of Text widget.
TEXT_HEIGHT_VAL (int): heigth of Text widget.
'''
from tkinter import Text, NONE, N, W, E, S, HORIZONTAL, END, TclError
from tkinter import StringVar, LEFT
from tkinter.ttk import Frame, Label, Scrollbar
from pyDEA.core.utils.dea_utils import create_bounds, contraint_is_price_ratio_type
TEXT_WIDTH_VAL = 50
TEXT_HEIGHT_VAL = 10
class TextForWeights(Frame):
''' Implements text widget used for displaying and editing weight
restrictions.
Attributes:
parent (Tk object): parent of this widget.
weight_name (str): text describing type of weight restrictions,
e.g. absolute, virtual, etc.
examples (str): string with an example of usage of this
type of weight restrictions, e.g. I1 <= 2.
current_categories (list of str): list of current categories.
params (Parameters): parameters.
param_name (str): parameter name that corresponds to this type
of weight restrictions, e.g. ABS_WEIGHT_RESTRICTIONS.
text (Text): text widget used for displaying weight restrictions.
error_tag_exists (bool): if set to True, means that there are
invalid weight restrictions, False otherwise.
errors_strvar (StringVar): is used for storing and displaying
error messages for invalid weight restrictions.
is_price_ratio (bool): if True price ratio constraints are
expected to be entered, if False other constraint types are
expected to be entered. Defaults to False.
Args:
parent (Tk object): parent of this widget.
weight_name (str): text describing type of weight restrictions,
e.g. absolute, virtual, etc.
examples (str): string with an example of usage of this
type of weight restrictions, e.g. I1 <= 2.
current_categories (list of str): list of current categories.
params (Parameters): parameters.
param_name (str): parameter name that corresponds to this type
of weight restrictions, e.g. ABS_WEIGHT_RESTRICTIONS.
is_price_ratio (bool): if True price ratio constraints are
expected to be entered, if False other constraint types are
expected to be entered. Defaults to False.
'''
def __init__(self, parent, weight_name, examples,
current_categories, params, param_name,
is_price_ratio_constraint = False, *args, **kw):
Frame.__init__(self, parent, *args, **kw)
self.parent = parent
self.examples = examples
self.current_categories = current_categories
self.params = params
self.param_name = param_name
self.text = None
self.error_tag_exists = False
self.errors_strvar = StringVar()
self.weight_name = weight_name
self.is_price_ratio_constraint = is_price_ratio_constraint
self.create_widgets(weight_name)
def create_widgets(self, weight_name):
''' Creates all widgets.
'''
constraints_lbl = Label(
self, text='Enter {0} weight restrictions:'.format(
weight_name))
constraints_lbl.grid(padx=10, pady=2, sticky=N+W)
examples_lbl = Label(self, text='e.g. {0}'.format(self.examples))
examples_lbl.grid(row=1, column=0, padx=10, pady=5, sticky=N+W)
errors_lbl = Label(self, textvariable=self.errors_strvar,
foreground='red', anchor=W, justify=LEFT,
wraplength=80)
errors_lbl.grid(row=2, column=2, sticky=N+W, padx=5, pady=5)
self.grid_rowconfigure(2, weight=1)
self.grid_columnconfigure(0, weight=1)
xscrollbar = Scrollbar(self, orient=HORIZONTAL)
xscrollbar.grid(row=3, column=0, sticky=E+W)
yscrollbar = Scrollbar(self)
yscrollbar.grid(row=2, column=1, sticky=N+S)
self.text = Text(self, wrap=NONE, width=TEXT_WIDTH_VAL,
height=TEXT_HEIGHT_VAL,
xscrollcommand=xscrollbar.set,
yscrollcommand=yscrollbar.set)
self.text.grid(row=2, column=0, sticky=N+S+E+W)
xscrollbar.config(command=self.text.xview)
yscrollbar.config(command=self.text.yview)
def delete_weights(self):
''' Removes all weight restrictions.
'''
self.text.delete(1.0, END)
self.error_tag_exists = False
def insert_weight(self, weight):
''' Add a given weight restriction in the end of the
text widget.
Args:
weight (str): string that describes a given weight
restriction.
'''
# we assume that inserted weight is always correct
self.text.insert(END, weight + '\n')
def validate_weights(self):
''' Checks if all weight restrictions are valid.
Returns:
bool: True if all weight restrictions are valid,
False otherwise.
'''
self.text.config(foreground='black')
if self.error_tag_exists:
try:
self.text.tag_remove('error', 'error.first', 'error.last')
except TclError:
# there is no error tag
pass
self.error_tag_exists = False
self.errors_strvar.set('')
errors = []
all_constraints = []
for count, line in enumerate(self.text.get('1.0', 'end-1c').splitlines()):
# Iterate lines
if line:
weight_as_list = []
weight_as_list.append(line)
try:
bounds = create_bounds(weight_as_list, self.current_categories)
assert len(bounds) == 1
key, value = bounds.popitem()
is_price_ratio = contraint_is_price_ratio_type(key)
if self.is_price_ratio_constraint and not is_price_ratio:
raise ValueError('Constraint {0} is not a price ratio constraint'.
format(line))
if not self.is_price_ratio_constraint and is_price_ratio:
raise ValueError('Constraint {0} is a price ratio constraint.'
' Use {1} weight restriction type constraint instead.'.
format(line, self.weight_name))
except ValueError as err:
self.text.tag_add('error', '%d.0' % (count + 1),
'%d.end' % (count + 1))
self.error_tag_exists = True
errors.append('* ' + str(err) + '\n')
else:
# combine correct lines for parameters
all_constraints.append(line)
errors.append('\n')
if self.error_tag_exists:
self.text.tag_config('error', foreground='red')
constraints = '; '.join(all_constraints)
self.params.update_parameter(self.param_name, constraints)
if self.error_tag_exists:
self.errors_strvar.set(self.get_all_errors(errors))
if constraints:
return True
return False
def get_all_errors(self, error_list):
''' Returns given list of errors as one string.
Args:
error_list (list of str): list of strings to concatenate.
Returns:
str: concatenated string.
'''
if len(error_list) == 0:
return ''
return ''.join(error_list)
| araith/pyDEA | pyDEA/core/gui_modules/text_for_weights_gui.py | text_for_weights_gui.py | py | 7,933 | python | en | code | 38 | github-code | 36 |
37862194723 | from scipy.spatial.distance import cosine
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("AI-Growth-Lab/PatentSBERTa")
def get_sim(anchor: str, target: str) -> float:
anchor_embed = model.encode([anchor])
target_embed = model.encode([target])
return float(1 - cosine(anchor_embed, target_embed))
examples = [
["renewable power", "renewable energy"],
["previously captured image", "image captured previously"],
["labeled ligand", "container labelling"],
["gold alloy", "platinum"],
["dissolve in glycol", "family gathering"],
]
if __name__ == '__main__':
get_sim("renewable power", "renewable energy")
| vquilon/kaggle-competitions | patent-phrase-to-phrase-matching/models/patent_sbert_a.py | patent_sbert_a.py | py | 681 | python | en | code | 0 | github-code | 36 |
569522296 | from qtsalome import QSqlQuery
from Base.tableDeBase import TableDeBase
class TableGroupes (TableDeBase):
def __init__(self):
TableDeBase.__init__(self,"Groupes")
self.setField(('Groupe','Maillage','Version','Entite','NbEntite'))
self.setTypeField(('str','int','int','str','int'),('nomGroupe','idMaillage','idVersion','Entite'))
def createSqlTable(self):
query=QSqlQuery()
texteQuery ="create table Groupes( nomGroupe varchar(40),idMaillage int, idVersion int,"
texteQuery+="Entite var(40), NbEntite int,"
texteQuery+="foreign key (idMaillage) references Maillages(id),"
texteQuery+="foreign key (idVersion) references Versions(id),"
texteQuery+="primary key (nomGroupe,idMaillage,idVersion,Entite));"
print("Creation de TableGroupes : ", query.exec_(texteQuery))
def getVal(self,nomGroupe,idMaillage,idVersion,typeMaille):
val=0 # Valeur si l enregistrement n existe pas
query=QSqlQuery()
texteQuery ='select NbEntite from Groupes where nomGroupe ="' + nomGroupe +'"'
texteQuery +=' and idMaillage=' + str(idMaillage)
texteQuery +=' and idVersion = ' + str(idVersion)
texteQuery +=' and Entite ="' + str(typeMaille) + '";'
query.exec_(texteQuery)
while (query.next()) :
val=query.value(0).toInt()[0]
while (query.next()) :
print("plusieurs enregistrements dans groupe pour ", nomGroupe," ",str(idMaillage)," ",str(idVersion),"\n")
return val
def getAllEntity(self):
query=QSqlQuery()
texteQuery ="select distinct Entite from Groupes;"
query.exec_(texteQuery)
maListe=[]
while (query.next()) :
maListe.append(str(query.value(0).toString()))
return maListe
| luzpaz/occ-smesh | src/Tools/Verima/Base/tableGroupes.py | tableGroupes.py | py | 1,938 | python | en | code | 2 | github-code | 36 |
74446495144 | from typing import Dict, List
from aiplayground.api.bots import Bot
from aiplayground.api.tournaments.models import Participant, Tournament, Match, PlayerQueueEntry, MatchState
from collections import defaultdict
import operator
from aiplayground.exceptions import AlreadyInTournament
from aiplayground.logging import logger
from aiplayground.types import PlayerSID
def add_player(bot: Bot, tournament: Tournament) -> Participant:
logger.debug("Getting tournament lock: %s", tournament.id)
with tournament.lock():
logger.debug("Got lock for tournament: %s", tournament.id)
participants = tournament.participants
participant_ids = {participant.bot.id for participant in participants}
if bot.id in participant_ids:
raise AlreadyInTournament
index = max(x.index for x in participants) + 1 if participants else 1
participant = Participant.create(index=index, bot=bot, tournament=tournament)
for opponent in participants:
if opponent.disqualified:
continue
Match.create(
index=100000 * index + opponent.index,
tournament=tournament,
players=[participant, opponent],
state=MatchState.pending,
)
return participant
def pick_match(tournament: Tournament) -> Match:
with tournament.lock():
queued_players = PlayerQueueEntry.list(tournament_id=tournament.id)
participants_by_id = {participant.id: participant for participant in tournament.participants}
participant_sids: Dict[Participant, List[PlayerSID]] = defaultdict(default_factory=list)
for player in queued_players:
participant = participants_by_id[player.participant_id]
participant_sids[participant].append(player.sid)
online_participants = set(participant_sids)
matches = [
match
for match in tournament.matches
if match.state == MatchState.pending and not set(match.players) - online_participants
]
sorted_matches = sorted(matches, key=operator.attrgetter("index"))
match = sorted_matches[0]
match.state = MatchState.running
match.save()
return match
| jackadamson/AI-Playground | aiplayground/api/tournaments/tournaments.py | tournaments.py | py | 2,268 | python | en | code | 0 | github-code | 36 |
43297592584 | import sys
from rpython.rlib.rarithmetic import r_uint, r_singlefloat, r_longlong, r_ulonglong
from rpython.rlib.libffi import IS_32_BIT
from pypy.module._rawffi.alt.interp_ffitype import app_types, descr_new_pointer
from pypy.module._rawffi.alt.type_converter import FromAppLevelConverter, ToAppLevelConverter
class DummyFromAppLevelConverter(FromAppLevelConverter):
def handle_all(self, w_ffitype, w_obj, val, lgt=None):
self.lastval = val
handle_signed = handle_all
handle_unsigned = handle_all
handle_pointer = handle_all
handle_char = handle_all
handle_unichar = handle_all
handle_longlong = handle_all
handle_char_p = handle_all
handle_unichar_p = handle_all
handle_float = handle_all
handle_singlefloat = handle_all
def handle_struct(self, w_ffitype, w_structinstance):
self.lastval = w_structinstance
def convert(self, w_ffitype, w_obj):
self.unwrap_and_do(w_ffitype, w_obj)
return self.lastval
class TestFromAppLevel(object):
spaceconfig = dict(usemodules=('_rawffi',))
def setup_class(cls):
converter = DummyFromAppLevelConverter(cls.space)
cls.from_app_level = staticmethod(converter.convert)
def check(self, w_ffitype, w_obj, expected):
v = self.from_app_level(w_ffitype, w_obj)
assert v == expected
assert type(v) is type(expected)
def test_int(self):
self.check(app_types.sint, self.space.wrap(42), 42)
self.check(app_types.sint, self.space.wrap(sys.maxint+1), -sys.maxint-1)
self.check(app_types.sint, self.space.wrap(sys.maxint*2), -2)
def test_unsigned(self):
space = self.space
self.check(app_types.uint, space.wrap(42), r_uint(42))
self.check(app_types.uint, space.wrap(-1), r_uint(sys.maxint*2 +1))
self.check(app_types.uint, space.wrap(sys.maxint*3),
r_uint(sys.maxint - 2))
self.check(app_types.ulong, space.wrap(sys.maxint+12),
r_uint(sys.maxint+12))
self.check(app_types.ulong, space.wrap(sys.maxint*2+3), r_uint(1))
def test_char(self):
space = self.space
self.check(app_types.char, space.wrap('a'), ord('a'))
self.check(app_types.unichar, space.wrap(u'\u1234'), 0x1234)
def test_signed_longlong(self):
space = self.space
maxint32 = 2147483647 # we cannot really go above maxint on 64 bits
# (and we would not test anything, as there long
# is the same as long long)
expected = maxint32+1
if IS_32_BIT:
expected = r_longlong(expected)
self.check(app_types.slonglong, space.wrap(maxint32+1), expected)
def test_unsigned_longlong(self):
space = self.space
maxint64 = 9223372036854775807 # maxint64+1 does not fit into a
# longlong, but it does into a
# ulonglong
if IS_32_BIT:
# internally, the type converter always casts to signed longlongs
expected = r_longlong(-maxint64-1)
else:
# on 64 bit, ulonglong == uint (i.e., unsigned long in C terms)
expected = r_uint(maxint64+1)
self.check(app_types.ulonglong, space.wrap(maxint64+1), expected)
def test_float_and_double(self):
space = self.space
self.check(app_types.float, space.wrap(12.34), r_singlefloat(12.34))
self.check(app_types.double, space.wrap(12.34), 12.34)
def test_pointer(self):
# pointers are "unsigned" at applevel, but signed at interp-level (for
# no good reason, at interp-level Signed or Unsigned makes no
# difference for passing bits around)
space = self.space
self.check(app_types.void_p, space.wrap(42), 42)
self.check(app_types.void_p, space.wrap(sys.maxint+1), -sys.maxint-1)
#
# typed pointers
w_ptr_sint = descr_new_pointer(space, None, app_types.sint)
self.check(w_ptr_sint, space.wrap(sys.maxint+1), -sys.maxint-1)
def test__as_ffi_pointer_(self):
space = self.space
w_MyPointerWrapper = space.appexec([], """():
from _rawffi.alt import types
class MyPointerWrapper(object):
def __init__(self, value):
self.value = value
def _as_ffi_pointer_(self, ffitype):
assert ffitype is types.void_p
return self.value
return MyPointerWrapper
""")
w_obj = space.call_function(w_MyPointerWrapper, space.wrap(42))
self.check(app_types.void_p, w_obj, 42)
def test_strings(self):
# first, try automatic conversion from applevel
self.check(app_types.char_p, self.space.newbytes('foo'), 'foo')
self.check(app_types.unichar_p, self.space.wrap(u'foo\u1234'), u'foo\u1234'.encode('utf8'))
self.check(app_types.unichar_p, self.space.wrap('foo'), 'foo')
# then, try to pass explicit pointers
self.check(app_types.char_p, self.space.wrap(42), 42)
self.check(app_types.unichar_p, self.space.wrap(42), 42)
class DummyToAppLevelConverter(ToAppLevelConverter):
def get_all(self, w_ffitype):
return self.val
get_signed = get_all
get_unsigned = get_all
get_pointer = get_all
get_char = get_all
get_unichar = get_all
get_longlong = get_all
get_char_p = get_all
get_unichar_p = get_all
get_float = get_all
get_singlefloat = get_all
get_unsigned_which_fits_into_a_signed = get_all
def convert(self, w_ffitype, val):
self.val = val
return self.do_and_wrap(w_ffitype)
class TestToAppLevel(object):
spaceconfig = dict(usemodules=('_rawffi',))
def setup_class(cls):
converter = DummyToAppLevelConverter(cls.space)
cls.from_app_level = staticmethod(converter.convert)
def check(self, w_ffitype, val, w_expected):
w_v = self.from_app_level(w_ffitype, val)
assert self.space.eq_w(w_v, w_expected)
def test_int(self):
self.check(app_types.sint, 42, self.space.wrap(42))
self.check(app_types.sint, -sys.maxint-1, self.space.wrap(-sys.maxint-1))
def test_uint(self):
self.check(app_types.uint, 42, self.space.wrap(42))
self.check(app_types.uint, r_uint(sys.maxint+1), self.space.wrap(sys.maxint+1))
| mozillazg/pypy | pypy/module/_rawffi/alt/test/test_type_converter.py | test_type_converter.py | py | 6,464 | python | en | code | 430 | github-code | 36 |
29914164702 | #_*_ coding:utf-8 _*_
# import datetime
# time1=datetime.date(2019,1,29)
# time2=datetime.date(2019,7,12)
# days=(time2-time1).days
# seconds=(time2-time1).total_seconds()
# time3=time1+datetime.timedelta(30)
# print(time3)
'''
假设你有无限数量的邮票,面值分别为6角,7角,8角,请问你最大的不可支付邮资是多少元?
'''
a=6
b=7
c=8
t=50
s=[]
for i in range(t+1):
s1=a*i
s.append(s1)
for j in range(t+1):
s2=a*i+b*j
s.append(s2)
for k in range(t+1):
s3=a*i+b*j+c*k
s.append(s3)
print(len(s))
s.sort()
news=[]
for i in s:
if i not in news:
news.append(i)
print('组合生成的最大数%s'%news[-1])
r=[]
for i in range(6*t):
if i in news:
pass
else:
r.append(i)
print('组合不能生成的数字是%s'%r)
print(('不能生成的最大数字是%s'%r[-1]))
'''
冒泡排序
'''
def bubbleSort(li):
for i in range(len(li)-1):
for j in range(len(li)-i-1):
if li[j]>li[j+1]:
li[j],li[j+1]=li[j+1],li[j]
return li
li=[1,2,3,14,25,2,0,1,7]
print(bubbleSort(li))
| haitest/617_repository | runAll.py | runAll.py | py | 1,132 | python | en | code | 0 | github-code | 36 |
1253707592 | import time
from selenium import webdriver
# browser = webdriver.Firefox()
# browser.get('https://www.jd.com/')
# browser.execute_script('window.scrollTo(0, document.body.scrollHeight)')
# browser.execute_script('alert("123")')
# browser.close()
#
#
#
# print("=================================================")
bro = webdriver.Firefox()
bro.get('https://www.taobao.com')
#节点定位 find系列的方法
input_text = bro.find_element_by_id('q')
#节点交互
input_text.send_keys('苹果')
time.sleep(2)
#执行js程序(js注入)
bro.execute_script('window.scrollTo(0,document.body.scrollHeight)')
time.sleep(2)
btn = bro.find_element_by_css_selector('.btn-search')
btn.click()
time.sleep(3)
bro.quit() | lyk4411/untitled | pythonWebCrawler/JavaScriptFireFox.py | JavaScriptFireFox.py | py | 711 | python | en | code | 0 | github-code | 36 |
18068483494 | # -*- coding:utf8 -*-
import tweepy
import os
import sys
import json
import time
import urllib2
import requests
"""
ref. http://kslee7746.tistory.com/entry/python-tweepy-%EC%82%AC%EC%9A%A9%ED%95%9C-%ED%8A%B8%EC%9C%84%ED%84%B0-%ED%81%AC%EB%A1%A4%EB%A7%81crawling
ref. https://proinlab.com/archives/1562
ref. http://kyeoneee.tistory.com/9
"""
reload(sys)
sys.setdefaultencoding('utf-8')
hotTopics = json.loads(urllib2.urlopen("http://polatics.news/get_hot_topics").read())
track_list = [topic["topic"] for topic in hotTopics]
# api 인증 요청
consumer_token = ""
consumer_secret = ""
auth = tweepy.OAuthHandler(consumer_token, consumer_secret)
# access 토큰 요청
access_token = ""
access_token_secret = ""
auth.set_access_token(access_token, access_token_secret)
f = open("twitter_%s.txt"%(time.strftime("%H-%d-%m-%Y")), "a");
print (time.strftime("%H:%d:%m:%Y"))
# api 생성
api = tweepy.API(auth)
data_len = 0
buf = []
class StreamListener(tweepy.StreamListener):
def on_data(self, data):
global data_len
global track_list
global buf
if data_len == 1000:
json_results = json.dumps(buf)
post_data= {'twitter': json_results}
res = requests.post("http://polatics.news/add_twitter", data=post_data)
buf = []
data_len = 0
print("전송 " + res.text)
return
json_data = json.loads(data)
#print ("=======================================================")
#print ("핫토픽: " + ",".join([ht for ht in track_list if ht in json.loads(data)["text"]]))
#print (json.loads(data)["text"])
#print ("유저아이디: " + json.loads(data)["user"]["name"])
ret = {}
ret["created_at"] = json_data["created_at"]
ret["text"] = json_data["text"]
ret["name"] = json_data["user"]["name"]
ret["screen_name"] = json_data["user"]["screen_name"]
ret["topic"] = [ht for ht in track_list if ht in json.loads(data)["text"]]
if len(ret["topic"]) > 0:
buf.append(ret)
f.write(data.encode("utf-8"))
data_len = data_len + 1
def on_error(self, status_code):
if status_code == 420:
return False
location = "%s,%s,%s" % ("35.95", "128.25", "1000km") # 대한민국 중심 좌표, 반지름
if __name__ == "__main__":
strmr = StreamListener()
strmr = tweepy.Stream(auth=api.auth, listener=strmr)
strmr.filter(track=track_list)
"""
keyword = "박근혜 OR 문재인" # 검색어
wfile = open(os.getcwd()+"/twitter.txt", mode='w')
cursor = tweepy.Cursor(api.search,
q=keyword,
since='2017-10-01', # 2015-01-01 이후에 작성된 트윗들로 가져옴
count=100, # 페이지당 반환할 트위터 수 최대 100
geocode=location,
include_entities=True)
for i, tweet in enumerate(cursor.items()):
print("{}: {}".format(i, tweet.text))
wfile.write(tweet.text + '\n')
wfile.close()
"""
| songjein/polatics | twitter_client.py | twitter_client.py | py | 2,878 | python | en | code | 7 | github-code | 36 |
72908301864 | def dijkstra(graph, start):
# Инициализация списка расстояний
distances = {vertex: float('inf') for vertex in graph}
distances[start] = 0
while True:
# Находим вершину с минимальным текущим расстоянием
min_distance = float('inf')
min_vertex = None
for vertex, distance in distances.items():
if distance < min_distance:
min_distance = distance
min_vertex = vertex
# Если текущее расстояние до всех вершин бесконечно, выходим из цикла
if min_vertex is None:
break
# Обновляем расстояния до соседних вершин
for neighbor, weight in graph[min_vertex].items():
distance = min_distance + weight
if distance < distances[neighbor]:
distances[neighbor] = distance
# Помечаем текущую вершину как обработанную
distances[min_vertex] = float('inf')
return distances
# Пример графа в виде словаря смежности
graph = {
'A': {'B': 1, 'C': 4},
'B': {'A': 1, 'C': 2, 'D': 5},
'C': {'A': 4, 'B': 2, 'D': 1},
'D': {'B': 5, 'C': 1}
}
# Начальная вершина
start_vertex = 'A'
# Вызов функции Дейкстры
result = dijkstra(graph, start_vertex)
# Вывод результатов
print("Кратчайшие расстояния от вершины", start_vertex, "до всех остальных вершин:")
for vertex, distance in result.items():
print(f"{vertex}: {distance}")
| TatsianaPoto/yandex | Algorithm_complexity/search/dijkstra_arr.py | dijkstra_arr.py | py | 1,751 | python | ru | code | 0 | github-code | 36 |
17363789570 | '''
Задача 3. Клетки
В научной лаборатории выводят и тестируют новые виды клеток. Есть список из N этих клеток,
где элемент списка — это показатель эффективности, а индекс списка — это ранг клетки.
Учёные отбирают клетки по следующему принципу: если эффективность клетки меньше её ранга,
то эта клетка не подходит. Напишите программу, которая выводит на экран те элементы списка,
значения которых меньше их индекса.
Пример:
Кол-во клеток: 5
Эффективность 1 клетки: 3
Эффективность 2 клетки: 0
Эффективность 3 клетки: 6
Эффективность 4 клетки: 2
Эффективность 5 клетки: 10
Неподходящие значения: 0 2
'''
def user_information():
"""Функция принимает данные от пользователя и возвращает количество клеток и список с данными"""
number_of_calls = int(input('Кол-во клеток: '))
calls_list = []
for index_call in range(1, number_of_calls + 1):
calls_list.append(int(input(f'Эффективность {index_call} клетки: ')))
return (calls_list, number_of_calls)
def handler(calls_list, number_of_calls):
'''Функция принимает список и возвращает список в котором значение меньше индекса списка'''
list_wrong_calls = []
for index_call in range(number_of_calls):
if index_call > calls_list[index_call]:
list_wrong_calls.append(calls_list[index_call])
return list_wrong_calls
if __name__ == '__main__':
print('Неподходящие значения: ', *handler(*user_information()))
| Pasha-lt/Skillbox-security | Python_basic/lesson_15/hw_15_03.py | hw_15_03.py | py | 2,093 | python | ru | code | 0 | github-code | 36 |
37635311990 | # Given an array of integers nums sorted in ascending order, find the starting and ending position of a given target value.
# If target is not found in the array, return [-1, -1].
# You must write an algorithm with O(log n) runtime complexity.
# Example 1:
# Input: nums = [5,7,7,8,8,10], target = 8
# Output: [3,4]
# Example 2:
# Input: nums = [5,7,7,8,8,10], target = 6
# Output: [-1,-1]
# Example 3:
# Input: nums = [], target = 0
# Output: [-1,-1]
# Constraints:
# 0 <= nums.length <= 105
# -109 <= nums[i] <= 109
# nums is a non-decreasing array.
# -109 <= target <= 109
class Solution:
def searchRange(self, nums: List[int], target: int) -> List[int]:
def bleft(nums,target):
l,r = 0,len(nums)-1
while l<=r:
m = (l+r)//2
if nums[m]>=target:
r = m -1
else:
l = m + 1
return l
def bright(nums,target):
l,r = 0,len(nums)-1
while l<=r:
m = (l+r)//2
if nums[m]<=target:
l = m + 1
else:
r = m - 1
return r
l,r = bleft(nums,target),bright(nums,target)
return [l,r] if 0<=l<len(nums) and 0<=r<len(nums) and nums[l]==target else [-1,-1] | sunnyyeti/Leetcode-solutions | 34 Find First and Last Position of Elements in Sorted Array.py | 34 Find First and Last Position of Elements in Sorted Array.py | py | 1,330 | python | en | code | 0 | github-code | 36 |
29398421861 | import setuptools
from setuptools import find_namespace_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="JarvisAI",
version="3.9",
author="Dipesh",
author_email="dipeshpal17@gmail.com",
description="JarvisAI is python library to build your own AI virtual assistant with natural language processing.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Dipeshpal/Jarvis_AI",
include_package_data=True,
packages=find_namespace_packages(include=['JarvisAI.*', 'JarvisAI']),
install_requires=['numpy', 'gtts==2.2.1', 'playsound==1.2.2',
'SpeechRecognition==3.8.1', 'pipwin==0.5.0', 'lxml==4.6.1', 'pyjokes',
'beautifulsoup4==4.9.3', 'wikipedia==1.4.0', 'auto_face_recognition', 'transformers==4.3.2',
'lazyme==0.0.23', 'librosa==0.8.0', "torch==1.7.1", "requests", "opencv-contrib-python==4.5.2.52",
"opencv-python==4.5.2.52", "cvzone==1.1.1", "pyttsx3", "googlesearch-python", "spacy",
"mediapipe==0.8.8", "googlesearch-python==1.0.1", "youtube-search-python==1.5.3", "shutup==0.1.3"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
project_urls={
'Documentation': 'https://github.com/Dipeshpal/Jarvis_AI',
'Donate': 'https://www.buymeacoffee.com/dipeshpal',
'Say Thanks!': 'https://youtube.com/techportofficial',
'Source': 'https://github.com/Dipeshpal/Jarvis_AI',
},
)
| MrVanHendrix/Beth.Ai | BETH_Ai/BETH_Ai/setup.py | setup.py | py | 1,722 | python | en | code | 0 | github-code | 36 |
31379873011 | __author__ = 'Vincent'
from scrapy.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors import LinkExtractor
from scrapy.selector import HtmlXPathSelector
from mediascrap.items import NewsItem
import datetime
from pymongo import MongoClient
class ChokomagSpider(CrawlSpider):
"""
A spider crawls domains (in accordance with some rules we will define
to collect all the pages that we wish to extract our LemondeItems instances
from. Most of this crawling logic is provided by Scrapy in the CrawlSpider class, so we
can extend this class when writing our first spider.
"""
name = "chokomag"
allowed_domains =["chokomag.com"]
start_urls =[
"http://chokomag.com/beaute/cheveux/",
"http://chokomag.com/tag/make-up/",
"http://chokomag.com/beaute/box/",
"http://chokomag.com/beaute/soins/",
"http://chokomag.com/beaute/parfums/",
"http://chokomag.com/mode/",
"http://chokomag.com/tag/concours/",
"http://chokomag.com/non-classe/"
]
rules = [
#site which should be saved
Rule(
LinkExtractor(
allow = ['/(\d+|\w+)']),
'parse_page')]
def parse_page(self,response):
hxs = HtmlXPathSelector(response)
body = ''.join(hxs.select('//div[@class="entry-content clearfix"]/p//text()').extract()).strip()
item = NewsItem()
if len(body)> 0 :
item['body'] = body
item['url'] = response.url
item['timeOfScrap'] = datetime.datetime.now()
return item
else :
pass
| fisheatfish/mediascrap | mediascrap/spiders/chokomag.py | chokomag.py | py | 1,620 | python | en | code | 0 | github-code | 36 |
23966959472 | """
Data readers for remote sensing devices
=======================================
Written by Eliot Quon (eliot.quon@nrel.gov)
This is a collection of readers to be used with the NWTC datatools.wfip2
module for processing WFIP2 data downloaded from the A2e Data Archive
and Portal (DAP). No effort is made to standardize the names of the
dataframe columns and the original data headers are retained wherever
possible.
"""
import numpy as np
import pandas as pd
import codecs # for utf-8 handling
#
# Lidar data readers
#
def windcube_v1(fname,
return_header=False,
default_columns=None,
default_altitudes=None,
):
"""WindCube v1
Users: CU Boulder, ...
The default columns and altitudes are used when there is no header
in the file. Can optionally return a dictionary of lidar operating
parameters parsed from the header.
"""
scan_info = dict()
# gives "UnicodeDecodeError: 'utf-8' codec can't decode byte ... in
# position ...: invalid start byte" error
#with open(fname,'r') as f:
with open(fname,'r',encoding='utf-8',errors='ignore') as f:
firstline = f.readline()
if '=' in firstline:
# we have a header
Nheader = int(firstline.split('=')[-1])
for _ in range(Nheader):
line = f.readline()
if '=' in line:
param_value = line.strip().split('=')
try:
ival = int(param_value[1])
scan_info[param_value[0]] = ival # integer
except ValueError:
try:
fval = float(param_value[1])
scan_info[param_value[0]] = fval # float
except ValueError:
scan_info[param_value[0]] = param_value[1] # string
# assume first column is "Date" which actuatlly corresponds to two
# separate date and time columns
columns = ['date','time'] + f.readline().split()[1:]
altitudes = np.array([ float(h)
for h in scan_info['Altitudes(m)'].strip().split('\t') ])
else:
# some files have no header, let's hope for the best...
f.seek(0)
columns = default_columns
altitudes = default_altitudes
df = pd.read_table(f,
delim_whitespace=True,
names=columns,
parse_dates=[['date', 'time']],
dayfirst=True)
# unpivot the columns
um_vars = [ 'um'+str(i) for i in range(1,len(altitudes)+1) ]
vm_vars = [ 'vm'+str(i) for i in range(1,len(altitudes)+1) ]
um = pd.melt(df, id_vars=['date_time'], var_name='um_var', value_name='um', value_vars=um_vars)
vm = pd.melt(df, id_vars=['date_time'], var_name='vm_var', value_name='vm', value_vars=vm_vars)
um['height'] = um['um_var'].map(dict(zip(um_vars, altitudes)))
vm['height'] = vm['vm_var'].map(dict(zip(vm_vars, altitudes)))
newdf = pd.merge(um, vm, on=['date_time','height'])
# calculate wind speed and direction
newdf['speed'] = np.sqrt(newdf['um']**2 + newdf['vm']**2)
newdf['direction'] = 270.0 - 180.0/np.pi*np.arctan2(newdf['vm'],newdf['um'])
newdf.loc[newdf['direction'] > 360.0,'direction'] -= 360.0
# return calculated columns only
newdf = newdf[['date_time','height','speed','direction']]
if return_header:
return newdf, scan_info
else:
return newdf
#
# Radar data readers
#
def read_profiler_data_block(f,datatypes=['WINDS','RASS']):
"""Dependency for wind_profiler radar"""
# Line 1 (may not be present for subsequent blocks within the same file
if f.readline().strip() == '':
f.readline() # Line 2: station name
assert(f.readline().split()[0] in datatypes) # Line 3: WINDS, version
f.readline() # Line 4: lat (N), long (W), elevation (m)
Y,m,d,H,M,S,_ = f.readline().split() # Line 5: date
date_time = pd.to_datetime('20{}{}{} {}{}{}'.format(Y,m,d,H,M,S))
f.readline() # Line 6: consensus averaging time
f.readline() # Line 7: beam info
f.readline() # Line 8: beam info
f.readline() # Line 9: beam info
f.readline() # Line 10: beam info
header = f.readline().split()
header = [ col + '.' + str(header[:i].count(col))
if header.count(col) > 1
else col
for i,col in enumerate(header) ]
block = []
line = f.readline()
while not line.strip()=='$' and not line=='':
block.append(line.split())
line = f.readline()
df = pd.DataFrame(data=block,columns=header,dtype=float)
df['date_time'] = date_time
return df
def radar_profiler(fname,
modes=2,
check_na=['SPD','DIR'],
na_values=999999):
"""Wind Profiler radar with RASS
Users: Earth Sciences Research Laboratory (ESRL)
Assumed data format for consensus data format rev 5.1 based on
provided reference for rev 4.1 from:
https://a2e.energy.gov/data/wfip2/attach/915mhz-cns-winds-data-format.txt
Set 'modes' to None to read all blocks in the file
Additional data format reference:
https://www.esrl.noaa.gov/psd/data/obs/formats/
"""
dataframes = []
with open(fname,'r') as f:
if modes is not None:
for _ in range(modes):
dataframes.append(read_profiler_data_block(f))
else:
while True:
try:
dataframes.append(read_profiler_data_block(f))
except (IOError,IndexError):
break
df = pd.concat(dataframes)
if na_values is not None:
nalist = []
for col in check_na:
if col in df.columns:
matches = [col]
else:
matches = [ c for c in df.columns if c.startswith(col+'.') ]
if len(matches) > 0:
nalist += matches
else:
print('Note: column '+col+'* not found')
check_na = nalist
if not hasattr(na_values,'__iter__'):
na_values = [na_values]
#print('Checking',check_na,'for',na_values)
for val in na_values:
#df.loc[df['SPD']==val,'SPD'] = np.nan # flag bad values
#df.loc[df['DIR']==val,'DIR'] = np.nan # flag bad values
for col in check_na:
df.loc[df[col]==val,col] = np.nan # flag bad values
return df
# aliases, for backward compatibility
ESRL_wind_profiler = radar_profiler
"""ESRL profiler configuration for WFIP 2 experiment:
* 'WINDS' output has 2 sets of returns (configuration modes) per file
* 'RASS' has only 1
WINDS(1) WINDS(2) RASS
consensus averaging time [min] 24.0 24.0 3.0
beams 3.0 3.0 1.0
range gates 44.0 61.0 25.0
coherant integrations 160.0 76.0 10.0
spectral averages 50.0 50.0 28.0
pulse width [ns] 417.0 708.0 417.0
inner pulse period [ms] 25.0 53.0 2.0
full-scale Doppler value [m/s] 20.5 20.3 409.6
delay to first gate [ns] 3792.0 4958.0 4000.0
"""
TTU_radar_profiler = radar_profiler
#
# Sodar data readers
#
# PCSodar data block format description: https://a2e.energy.gov/data/wfip2/attach/variables-in-datafile.pdf
PCSodar_header = [
'height_m','windspeed_ms','winddirection_deg','reliability',
'w_speed_ms','w_reliability','w_count','w_stdev_ms','w_amplitude','w_noise','w_SNR','w_valid_count',
'v_speed_ms','v_reliability','v_count','v_stdev_ms','v_amplitude','v_noise','v_SNR','v_valid_count',
'u_speed_ms','u_reliability','u_count','u_stdev_ms','u_amplitude','u_noise','u_SNR','u_valid_count',
]
def ARL_wind_profiler(fname,
bad_speed_value=-99.9,
bad_direction_value=999):
"""ARL Wind Profiler
Users: Air Resources Laboratory (ARL), ...
Read each block within a file (in PCSodar format) as a separate
dataframe, and then return a concatenated dataframe
"""
dataframes = []
Nh = len(range_gates)
with open(fname,'r') as f:
firstline = f.readline()
while not firstline=='':
_,year,month,day,time,_ = firstline.replace('"','').split(',')
date_time = pd.to_datetime('{}{}{} {}'.format(year,month,day,time[:5])) # time format is "HH:MM"
f.readline() # ignore sodar operating parameters
block = []
for _ in range(Nh):
block.append(f.readline().strip().split(','))
df = pd.DataFrame(data=block,columns=header,dtype=float)
assert(np.all(df['height_m'].values==range_gates)) # make sure we're always reading the number of rows we think we are
df['date_time'] = date_time
df.loc[df['windspeed_ms']==bad_speed_value,'windspeed_ms'] = np.nan # flag bad values
df.loc[df['winddirection_deg']==bad_direction_value,'winddirection_deg'] = np.nan # flag bad values
dataframes.append(df)
firstline = f.readline()
return pd.concat(dataframes)
def scintec_profiler(fname):
"""Scintec MFAS Flat Array Sodar
Reads files in the APRun file format:
https://a2e.energy.gov/data/wfip2/attach/sodar-aprun-software-manual-1-27.pdf (p.20)
Returned timestamps correspond to the BEGINNING of each measurement
interval
"""
dflist = []
with open(fname,'r') as f:
f.readline() # FORMAT-1
dateline = f.readline() # YYYY-MM-DD HH:MM:SS file_count
datetime0 = pd.to_datetime(' '.join(dateline.split()[:2]))
f.readline() # type of instrument
number_of = f.readline().split() # comment lines, variables, height levels
Ncomments,Nvar,Nz = [ int(val) for val in number_of ]
f.readline() # blank
for _ in range(3): f.readline() # file information section
for _ in range(Ncomments): f.readline()
for _ in range(3): f.readline() # file type section
assert(f.readline().strip() == 'Main Data')
for _ in range(3): f.readline() # variable defintions section
columns = []
na_values = {}
for _ in range(Nvar+1):
defn = f.readline().strip().split('#') # e.g. "wind speed # speed # m/s # G1 # 0 # 99.99"
col = defn[0].strip()
columns.append(col)
try:
na_values[col] = float(defn[-1])
except ValueError: pass
for _ in range(3): f.readline() # beginning of data block
firstread = True
f.readline()
# read profiles
while True:
timedata = f.readline().split() # [YYYY-MM-DD, day] [HH:MM:SS, end time of interval] [HH:MM:SS, interval duration]
if len(timedata) < 3: break
datetime_end = pd.to_datetime(timedata[0]+' '+timedata[1])
duration = pd.to_timedelta(timedata[2])
datetime_start = datetime_end - duration
if firstread:
assert(datetime_end == datetime0)
firstread = False
data = []
f.readline() # skip column names
for _ in range(Nz): data.append(f.readline().split())
df = pd.DataFrame(data=data,columns=columns)
df['datetime'] = datetime_start
dflist.append(df)
f.readline()
df = pd.concat(dflist)
df['height'] = pd.to_numeric(df['height'])
for col,nan in na_values.items():
df.loc[df[col]==nan,col] = np.nan # flag bad values
return df
#
# Microwave radiometer data readers
#
def ESRL_radiometrics_mwr(fname,verbose=True):
"""NOAA/PSD Microwave Radiometer level 2 files
https://a2e.energy.gov/data/wfip2/attach/level2-files-record-types.pdf
Additional formatting are inferred...
"""
records = dict()
with open(fname,'r') as f:
for line in f:
line = line.strip().split(',')
if not line[0] == 'Record': break
rec_id = int(line[2])
records[rec_id] = line[3:]
Nrecords = len(records.keys())
if verbose: print(Nrecords, 'records', records.keys(), 'read')
def record_header(record_id):
header_id = record_id - record_id%10
assert(header_id in records.keys())
return ['datetime','id'] + records[header_id]
# read entire file at once
with open(fname,'r') as f:
for _ in range(Nrecords): f.readline()
#rawdata = [ line.strip().split(',')[1:] for line in f.readlines() ]
rawdata = [ line.strip().rstrip(',').split(',')[1:] for line in f.readlines() ]
if verbose: print(len(rawdata),'lines read')
# sort data by record type (can't read all at once because each line
# has a different length)
data = dict()
datanames = dict()
for linesplit in rawdata:
# split line format: datetime, record_number, record_data
rec = int(linesplit[1])
if rec == 99:
if verbose: print('[99] ',' '.join(linesplit[2:]))
elif rec == 101:
datanames[int(linesplit[2])] = linesplit[3]
else:
try:
data[rec].append(linesplit)
except KeyError:
data[rec] = [linesplit]
if verbose: print(len(data.keys()), 'data sets', data.keys(), 'read')
if verbose: print('data names:',datanames)
for record_id in data.keys():
if verbose: print('Processing',record_id,record_header(record_id))
df = pd.DataFrame(data=data[record_id],
columns=record_header(record_id),
dtype=float)
df['datetime'] = pd.to_datetime(df['datetime'])
data[record_id] = df
for record_id, record_name in datanames.items():
if verbose: print('Renaming record',record_id,' --> ',record_name)
data[record_name] = data.pop(record_id)
return data
#
# Ceilometer
#
def Vaisala_CL31(fname,verbose=True,zcol=8,unpack=True,
status_col='Status',
cloud_cols=['Height1','Height2','Height3']):
"""Vaisala CL31 ceilometer XLSX output processed with CL-VIEW software
Assume we want heights in meters
https://a2e.energy.gov/data/wfip2/attach/datafilexlsx-example.pdf
"""
if verbose: print('Loading '+fname+'...')
xlsx = pd.read_excel(fname)
header = xlsx.iloc[2].values
header2 = xlsx.iloc[3].values
header[0] = 'Date'
header[1] = 'Time'
if verbose:
# note: first row of excel spreadsheet gets put into the header # (skipped row)
print(xlsx.iloc[0,0]) # skipped row
print(xlsx.iloc[1,0]) # skipped row
print('Cloud height units:',header2[3:6])
print('Backscatter height units:',header2[zcol-1])
print(xlsx.iloc[-1,0]) # skipped row
header[zcol-1:] = header2[zcol-1:]
# now create a new dataframe without extra header information
df = pd.DataFrame(data=xlsx.iloc[4:-1].values, columns=header)
df = df.replace('/////', np.nan)
# create timestamps
df['date_time'] = df[['Date','Time']].apply(lambda x: pd.datetime.combine(x[0].date(),x[1]), axis=1)
df = df.set_index('date_time')
df = df.drop(['Date','Time','Sig. Sum','Meters'],axis=1)
# split up dataframe
if unpack:
status = df[status_col]
clouds = df[cloud_cols]
backscatter = df.drop([status_col]+cloud_cols, axis=1)
return backscatter, clouds, status
else:
return df
| NWTC/datatools | remote_sensing.py | remote_sensing.py | py | 15,912 | python | en | code | 2 | github-code | 36 |
19031635136 | from Data import Data
class Delete:
def __init__(self):
self.Data=Data()
def run(self,arr):
print(arr[1][1])
if arr[1][0]=='@':
num=Data.find_name_or_id_by_id_or_name(arr[1][1],1)
else:
num='#'+str(arr[1][1])
z=self.Data.find(arr[1])
print(z)
print("Do you really want to delete " + str(z[1]) +": " +str(z[2]) +" ?" )
print("Please confirm by 'y' or 'Y', or cancel by 'n' or 'N'.")
value = input("> confirm >>>")
if value=="y" or value=="Y":
print(num)
self.Data.delete(num)
elif value!= "n" or value!="N":
print("You have typed an invalid response. Please either confirm by 'y'/'Y', orcancel by 'n'/'N'.")
| RivkiZolti/DNA | delete.py | delete.py | py | 796 | python | en | code | 3 | github-code | 36 |
5618318828 | import cv2
import os
import argparse
def image_folder_to_video(folder_path, output_path):
# Get the list of image filenames
filenames = [os.path.join(folder_path, f) for f in os.listdir(folder_path) if f.lower().endswith('.jpg')]
filenames.sort() # Sort the filenames
# Get the dimensions of the first image
image = cv2.imread(filenames[0])
height, width, _ = image.shape
# Define the codec and create a video writer object
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter(output_path, fourcc, 30.0, (width, height))
# Add each image to the video with 0.2 seconds of duration
for filename in filenames:
image = cv2.imread(filename)
for i in range(15):
out.write(image)
# Release the video writer object
out.release()
def main():
parser = argparse.ArgumentParser(description='Convert a folder of images to a video.')
parser.add_argument('input_folder', metavar='input_folder', type=str,
help='The path to the folder containing the input images.')
parser.add_argument('output_video', metavar='output_video', type=str,
help='The path to the output video file.')
args = parser.parse_args()
input_folder = args.input_folder
output_video = args.output_video
image_folder_to_video(input_folder, output_video)
if __name__ == "__main__":
main()
| danfinlay/face-lapse | src/picstitch.py | picstitch.py | py | 1,418 | python | en | code | 0 | github-code | 36 |
11410582849 | #David Crespo, 2017
class Web:
def __init__(self, datos):
self.frm = self.partir(datos)
self.pttn = self.get_peticion()
self.mtd = self.pttn[0]
self.webfile = self.pttn[1]
self.direccion = None
self.tiene_extension = False
self.cmds = self.get_comandos()
self.get_direccion()
def partir(self, datos):
datos = datos.decode('utf-8')
return datos.split('\r\n')
def get_peticion(self):
datos = self.frm[0]
return datos.split(' ')
def get_metodo(self):
return self.mtd
def not_found(self):
self.direccion = 'sistema/404.html'
def get_direccion(self):
if self.webfile == '/':
self.direccion = 'webs/index.html'
else:
try:
ext = self.extension(self.webfile)
if self.tiene_extension == False:
self.direccion = self.get_directorio()+self.webfile+'.'+ext
else:
self.direccion = self.get_directorio()+self.webfile
return self.direccion
except:
self.not_found()
return self.direccion
def get_directorio(self):
ext = self.extension(self.webfile)
sel_directorio = {
'htm': 'webs',
'html': 'webs',
'css': 'css',
'ico': 'imgs',
'jpg': 'imgs',
'jpeg': 'imgs',
'pdf': 'docs',
'js': 'scripts',
'avi': 'media',
'mp3': 'media',
'zip': 'varios',
'ogg': 'media',
'ttf': 'fonts',
'mp4': 'media',
'mkv': 'media',
'png': 'imgs',
'rar': 'varios'}
return sel_directorio.get(ext)
def comandos(self):
return self.cmds.split('&')
def ordenes(self):
lista = []
com = []
for i in self.comandos():
com = i.split('=')
lista += [com]
return lista
def get_comandos(self):
a=0
for i in self.frm:
if i == '':
#print(self.frm[a+1])
return str(self.frm[a+1])
break
else:
a = a + 1
def metodo_post(self):
print(self.ordenes())
def es_texto(self):
ext = self.extension(self.webfile)
sel_contenido = {
'htm': True,
'html': True,
'css': True,
'ico': False,
'jpg': False,
'jpeg': False,
'pdf': False,
'js': True,
'avi': False,
'mp3': False,
'zip': False,
'ogg': False,
'ttf': False,
'mp4': False,
'mkv': False,
'png': False,
'rar': False}
return sel_contenido.get(ext)
def tipos_mime(self):
ext = self.extension(self.direccion)
sel_tipo = {
'htm': 'text/html',
'html': 'text/html',
'css': 'text/css',
'ico': 'image/x-icon',
'jpg': 'image/jpeg',
'jpeg': 'image/jpeg',
'pdf': 'application/pdf',
'js': 'application/javascript',
'avi': 'video/avi',
'mp3': 'audio/mpeg3',
'zip': 'application/zip',
'ogg': 'application/ogg',
'ttf': 'application/x-font-ttf',
'mp4': 'video/mp4',
'mkv': 'video/x-matroska',
'png': 'image/png',
'rar': 'application/x-rar-compressed'}
return sel_tipo.get(ext)
def extension(self, nombre):
try:
ext = nombre.split('.')
self.tiene_extension = True
return ext[1]
except:
self.tiene_extension = False
return 'html'
def web_head(self):
return 'HTTP/1.1 200 OK\r\nContent-Type: %s\r\n\r\n'%(self.tipos_mime())
| maskr/pyserver | htmlsrv.py | htmlsrv.py | py | 3,190 | python | es | code | 0 | github-code | 36 |
4551962390 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "stdrickforce" # Tengyuan Fan
# Email: <stdrickforce@gmail.com> <tfan@xingin.com>
# Definition for binary tree with next pointer.
# class TreeLinkNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# self.next = None
class Solution(object):
def connect(self, root):
"""
:type root: TreeLinkNode
:rtype: nothing
"""
if __name__ == '__main__':
Solution().connect()
| terencefan/leetcode | python/116.py | 116.py | py | 546 | python | en | code | 1 | github-code | 36 |
3337416724 | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 11 14:09:58 2019
@author: Neel Tiruviluamala
Description: More efficient way to merge datasets. Note: This code was written
by Dr. Tiruviluamala of the USC Math department.
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import time
df1 = pd.read_csv('Data.csv', encoding = "ISO-8859-1", low_memory=False)
df2 = pd.read_csv('Data_stats.csv', encoding = "ISO-8859-1", low_memory=False)
#Create a base date so that we can recast dates in df1 and df2 as integers
#that represent the number of days since the base date
base_date = pd.to_datetime('1-1-2000')
#Create the elapsed days (from the base date) field in df1 and df2
df1.Date = pd.to_datetime(df1.Date, format = '%d/%m/%Y')
df1['el_date'] = (df1.Date - base_date).dt.days
df2['el_date'] = (pd.to_datetime(df2.Changed_Date, format = '%m/%d/%Y') - base_date).dt.days
#Create a list of winner/loser pairs apparent in both df1 and df2
df1['Winner_Loser'] = df1.apply(lambda x: str(x.Winner)+str(x.Loser), axis = 1)
df2['Winner_Loser'] = df2.apply(lambda x: str(x.wchangedname) + str(x.lchangedname), axis = 1)
A = set(df1.Winner_Loser.unique())
B = set(df2.Winner_Loser.unique())
winner_loser_pairs = A.union(B)
winner_loser_pairs = list(winner_loser_pairs)
#Create a dictionary mapping between winner_loser_pairs and multiples of 100,000
#This is done because there are ~7000 integers in the 'el_date' fields and we want
#to place the winner_loser_pairs at different orders of magnitude
stp = 100000
dict_vals = np.arange(0, stp*len(winner_loser_pairs),stp)
wl_dict = dict(zip(winner_loser_pairs, dict_vals))
#Create the corresponding fields in df1 and df2 that contain the integer values associated
#with each winner/loser pair
df1['wl_number'] = df1.apply(lambda x: wl_dict[x.Winner_Loser],axis = 1)
df2['wl_number'] = df2.apply(lambda x: wl_dict[x.Winner_Loser],axis = 1)
#Create a merge number in df1 and df2 that adds the wl_number to the el_date number
#Note that the only way a merge number in df1 will be "close" to a merge number in df2 is if
#they correspond to the same row (and should thus be merged together)
df1['merge_number'] = df1.wl_number + df1.el_date
df2['merge_number'] = df2.wl_number + df2.el_date
df1 = df1.sort_values('merge_number')
df2 = df2.sort_values('merge_number')
#pd.merge_asof will merge two data frames based on "close" values. We have done
#all the work to create this close value (merge_number) and so we proceed naturally
df1.to_csv('df1.csv')
df2.to_csv('df2.csv')
# df_merged = pd.merge_asof(df1, df2, on = "merge_number", tolerance = 10)
# df_merged.to_csv('merger.csv')
left = pd.read_csv('df1.csv', encoding = "ISO-8859-1", low_memory=False)
right = pd.read_csv('df2.csv', encoding = "ISO-8859-1", low_memory=False)
df_merged = pd.merge_asof(left, right, on = "merge_number", tolerance = 25)
df3 = df_merged.dropna(subset = ['wchangedname'])
df3 = df3.drop(columns = ['Unnamed: 0_x', 'el_date_x', 'Unnamed: 0_y', 'Source.Name', 'surface',
'Unnamed: 13', 'Unnamed: 14', 'wlastname', 'wchangedname', 'winner_name',
'llastname', 'lmiddlename', 'lfirstname', 'lchangedname', 'loser_name',
'best_of', 'Winner_Loser_y'])
df3.to_csv('Final Merged.csv')
| rajdua22/tennis_betting | Cleaning_Merging/Merge_Datasets.py | Merge_Datasets.py | py | 3,429 | python | en | code | 3 | github-code | 36 |
21252216686 |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class bgp(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-system-capabilities - based on the path /capabilities/bgp. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__next_hop_mpls','__redistribute_isis',)
_yang_name = 'bgp'
_rest_name = 'bgp'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__redistribute_isis = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="redistribute-isis", rest_name="redistribute-isis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-system-capabilities', defining_module='brocade-system-capabilities', yang_type='boolean', is_config=False)
self.__next_hop_mpls = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="next-hop-mpls", rest_name="next-hop-mpls", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-system-capabilities', defining_module='brocade-system-capabilities', yang_type='boolean', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'capabilities', u'bgp']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'capabilities', u'bgp']
def _get_next_hop_mpls(self):
"""
Getter method for next_hop_mpls, mapped from YANG variable /capabilities/bgp/next_hop_mpls (boolean)
"""
return self.__next_hop_mpls
def _set_next_hop_mpls(self, v, load=False):
"""
Setter method for next_hop_mpls, mapped from YANG variable /capabilities/bgp/next_hop_mpls (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_next_hop_mpls is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_next_hop_mpls() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="next-hop-mpls", rest_name="next-hop-mpls", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-system-capabilities', defining_module='brocade-system-capabilities', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """next_hop_mpls must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="next-hop-mpls", rest_name="next-hop-mpls", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-system-capabilities', defining_module='brocade-system-capabilities', yang_type='boolean', is_config=False)""",
})
self.__next_hop_mpls = t
if hasattr(self, '_set'):
self._set()
def _unset_next_hop_mpls(self):
self.__next_hop_mpls = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="next-hop-mpls", rest_name="next-hop-mpls", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-system-capabilities', defining_module='brocade-system-capabilities', yang_type='boolean', is_config=False)
def _get_redistribute_isis(self):
"""
Getter method for redistribute_isis, mapped from YANG variable /capabilities/bgp/redistribute_isis (boolean)
"""
return self.__redistribute_isis
def _set_redistribute_isis(self, v, load=False):
"""
Setter method for redistribute_isis, mapped from YANG variable /capabilities/bgp/redistribute_isis (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_redistribute_isis is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_redistribute_isis() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="redistribute-isis", rest_name="redistribute-isis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-system-capabilities', defining_module='brocade-system-capabilities', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """redistribute_isis must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="redistribute-isis", rest_name="redistribute-isis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-system-capabilities', defining_module='brocade-system-capabilities', yang_type='boolean', is_config=False)""",
})
self.__redistribute_isis = t
if hasattr(self, '_set'):
self._set()
def _unset_redistribute_isis(self):
self.__redistribute_isis = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="redistribute-isis", rest_name="redistribute-isis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-system-capabilities', defining_module='brocade-system-capabilities', yang_type='boolean', is_config=False)
next_hop_mpls = __builtin__.property(_get_next_hop_mpls)
redistribute_isis = __builtin__.property(_get_redistribute_isis)
_pyangbind_elements = {'next_hop_mpls': next_hop_mpls, 'redistribute_isis': redistribute_isis, }
| extremenetworks/pybind | pybind/slxos/v17s_1_02/capabilities/bgp/__init__.py | __init__.py | py | 8,050 | python | en | code | 0 | github-code | 36 |
37706438436 | import time
from tadek.core import utils
from tadek.engine.testresult import TestCaseResult
from tadek.engine.channels import register, TestResultChannel
from tadek.engine.testexec import *
__all__ = ["SummaryChannel", "COUNTER_N_TESTS", "COUNTER_TESTS_RUN",
"COUNTER_CORE_DUMPS", "COUNTER_RUN_TIME"]
# Counters names
COUNTER_N_TESTS = "ntests"
COUNTER_TESTS_RUN = "testsRun"
COUNTER_CORE_DUMPS = "coreDumps"
COUNTER_RUN_TIME = "runTime"
# A list of status counters
STATUS_COUNTERS = (
STATUS_NO_RUN,
STATUS_NOT_COMPLETED,
STATUS_PASSED,
STATUS_FAILED,
STATUS_ERROR
)
# Time stamp counters:
_START_STAMP = "startStamp"
_STOP_STAMP = "stopStamp"
def _countCaseResults(result):
'''
Counts all test case results in the given test result tree.
'''
if isinstance(result, TestCaseResult):
return 1
count = 0
for child in result.children:
count += _countCaseResults(child)
return count
class SummaryChannel(TestResultChannel):
'''
Channel used to gain summary of tests.
'''
def __init__(self, name, enabled=True, verbose=False, **params):
TestResultChannel.__init__(self, name, enabled, verbose, **params)
self._counters = {}
def start(self, result):
'''
Initializes all the channel counters.
'''
TestResultChannel.start(self, result)
stamp = time.time()
self._counters = {
_START_STAMP: stamp,
_STOP_STAMP: stamp,
COUNTER_N_TESTS: _countCaseResults(result),
COUNTER_TESTS_RUN: 0,
COUNTER_CORE_DUMPS: 0
}
for status in STATUS_COUNTERS:
self._counters[status] = 0
def startTest(self, result, device):
'''
Processes a test start execution for the summary channel.
'''
TestResultChannel.startTest(self, result, device)
if isinstance(result, TestCaseResult):
self._counters[STATUS_NOT_COMPLETED] += 1
self._counters[COUNTER_TESTS_RUN] += 1
def stopTest(self, result, device):
'''
Processes a test stop execution for the summary channel.
'''
TestResultChannel.stopTest(self, result, device)
if isinstance(result, TestCaseResult):
self._counters[STATUS_NOT_COMPLETED] -= 1
self._counters[device.status] += 1
self._counters[_STOP_STAMP] = time.time()
if device.cores:
self._counters[COUNTER_CORE_DUMPS] += len(device.cores)
def getSummary(self):
'''
Gets a summary of test results.
'''
runTime = self._counters[_STOP_STAMP] - self._counters[_START_STAMP]
summary = {
COUNTER_RUN_TIME: utils.runTimeToString(runTime),
COUNTER_N_TESTS: self._counters[COUNTER_N_TESTS],
COUNTER_TESTS_RUN: self._counters[COUNTER_TESTS_RUN],
COUNTER_CORE_DUMPS: self._counters[COUNTER_CORE_DUMPS]
}
# Copy all status counters except STATUS_NO_RUN
for status in STATUS_COUNTERS[1:]:
summary[status] = self._counters[status]
return summary
register(SummaryChannel)
| tadek-project/tadek-common | tadek/engine/channels/summarychannel.py | summarychannel.py | py | 3,192 | python | en | code | 2 | github-code | 36 |
8194426823 | import ast
import os, glob
_bBeVerbose = False
def _InsertLine(sFilename: str, index: int, sContent: str):
"""Insert string content at line index into source file f
Args:
sFilename (str): File to be modified
index (int): Line number to be inserted at
sContent (str): Line to be inserted
"""
with open(sFilename, "r") as f:
lsContents = f.readlines()
# EndWith
lsContents.insert(index, sContent)
with open(sFilename, "w") as f:
lsContents = "".join(lsContents)
f.write(lsContents)
# EndWith
# EndDef
def _CountLeadingBlanks(s: str):
"""Calculate number of leading blanks
Args:
s (str): Input string
Returns:
int: Number of leading blanks
"""
return (len(s) - len(s.lstrip())) if s != "\n" else 0
# EndDef
def _PermutateBlockEnd(_sBlockId):
sBlockID_lower = f"{_sBlockId[0:1]}".lower() + _sBlockId[1:]
sBlockID_upper = f"{_sBlockId[0:1]}".upper() + _sBlockId[1:]
sComment = f"# End{sBlockID_upper}"
lsAlternatives = [
f"# end{sBlockID_lower}",
f"# end{sBlockID_upper}",
f"# end {sBlockID_lower}",
f"# end {sBlockID_upper}",
f"# End{sBlockID_lower}",
f"# End{sBlockID_upper}",
f"# End {sBlockID_lower}",
f"# End {sBlockID_upper}",
]
return {"_sComment": sComment, "_lsAlternatives": lsAlternatives}
# EndDef
class CFormatCodeBlocksAnalyzer(ast.NodeVisitor):
"""Code analyzer class derived from ast.Nodevisitor checking for
Catharsys Coding Guidelines infractions
"""
def __init__(self, _sFilename):
with open(_sFilename, "r") as source:
self.lsCode = source.readlines()
# EndWith
self.sFilename = _sFilename
# break switch to stop traversing the AST, since adding a line requires reloading
self.bContinue = True
# EndDef
""" Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `visit` method. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
see https://docs.python.org/3/library/ast.html#node-classes
"""
def visit_If(self, _xNode):
self._VisitGeneric(_xNode, **_PermutateBlockEnd("if"))
self.generic_visit(_xNode)
# EndDef
def visit_For(self, _xNode):
self._VisitGeneric(_xNode, **_PermutateBlockEnd("for"))
self.generic_visit(_xNode)
# EndDef
def visit_ClassDef(self, _xNode):
self._VisitGeneric(_xNode, **_PermutateBlockEnd("class"))
self.generic_visit(_xNode)
# EndDef
def visit_FunctionDef(self, _xNode):
self._VisitGeneric(_xNode, **_PermutateBlockEnd("def"))
self.generic_visit(_xNode)
# EndDef
def visit_While(self, _xNode):
self._VisitGeneric(_xNode, **_PermutateBlockEnd("while"))
self.generic_visit(_xNode)
# EndDef
def visit_Try(self, _xNode):
self._VisitGeneric(_xNode, **_PermutateBlockEnd("try"))
self.generic_visit(_xNode)
# EndDef
def visit_With(self, _xNode):
self._VisitGeneric(_xNode, **_PermutateBlockEnd("with"))
self.generic_visit(_xNode)
# EndDef
def _VisitGeneric(self, _xNode, _sComment: str, _lsAlternatives=None):
"""Generic visit functionality
Args:
node (ast.node): Node being visited
commentstring (str): String that is expected to show up at the end of
the code belonging to the node.
altstrings (List of strings): Alternatives to commentstring that are also acceptable.
Defaults to None.
"""
if _lsAlternatives is None:
lsTargets = [_sComment]
else:
_lsAlternatives.append(_sComment)
lsTargets = _lsAlternatives
# EndIf
if self.bContinue:
# skip over potential # Endwhatever comments, till the correct indentation level is reached or EOF
iCurrentInvestionLineNo = _xNode.end_lineno
iEndLineNo4Insertion = iCurrentInvestionLineNo
iMaxLineCnt = len(self.lsCode)
bExpectedStartFound = False
while iCurrentInvestionLineNo < iMaxLineCnt:
sCurrentLine = self.lsCode[iCurrentInvestionLineNo]
iLeadingBlanks = _CountLeadingBlanks(sCurrentLine)
if len(sCurrentLine.strip()) > 0 and iLeadingBlanks <= _xNode.col_offset:
if len(sCurrentLine) > _xNode.col_offset:
bExpectedStartFound = any(
[sCurrentLine[_xNode.col_offset :].startswith(sComment) for sComment in lsTargets]
)
# EndIf
break
# EndIf
iCurrentInvestionLineNo += 1
if iLeadingBlanks > _xNode.col_offset:
iEndLineNo4Insertion = iCurrentInvestionLineNo
# EndIf
# EndWhile
if not bExpectedStartFound:
global _bBeVerbose
if _bBeVerbose:
# Endwhatever not found at the correct indent level
if hasattr(_xNode, "name"):
print(f"{_sComment} in line {_xNode.end_lineno} missing for {_xNode.name}")
else:
print(f"{_sComment} in line {_xNode.end_lineno} missing")
# endif
# EndIf
sInsertion = " " * _xNode.col_offset + f"{_sComment}\n"
_InsertLine(self.sFilename, iEndLineNo4Insertion, sInsertion)
self.bContinue = False
# EndIf
# EndIf
# EndDef
# EndClass
def _FormatCodeBlocks(_sFilename: str):
"""Main formatting loops
Args:
filename (str): Code file to be formatted
"""
global _bBeVerbose
if _bBeVerbose:
print(f"opening {_sFilename} to analyse FormatCodeBlocks")
# EndIf
# Read file contents
with open(_sFilename, "r") as source:
xTree = ast.parse(source.read())
# EndWith
# set up Python AST analyzer, set continue flag to False.
# This flag ensures that only one one code change is performed,
# since insertion changes the linenumbers.
# If Analyzer.visit terminates without setting the continue flag,
# no more changes need to be made to the code
xAnalyzer = CFormatCodeBlocksAnalyzer(_sFilename)
xAnalyzer.bContinue = False
# Fix issues one by one till no findings are left
while not xAnalyzer.bContinue:
# Reread the file contents, set up AST and visit
with open(_sFilename, "r") as source:
xTree = ast.parse(source.read())
# EndWith
xAnalyzer = CFormatCodeBlocksAnalyzer(_sFilename)
xAnalyzer.visit(xTree)
# EndWhile
return
# EndDef
####################################################################
def RunFormat(
*,
_bCodeBlocks=False,
_bVerbose=False,
_lsFilename=None,
_lsFolder=None,
_bFolderRecursive=False,
):
if not _bCodeBlocks:
raise RuntimeError("RunFormat expects at least one action [--code-blocks, ]")
# EndIf
lsResultingFiles = []
global _bBeVerbose
_bBeVerbose = _bVerbose
if _lsFilename is not None:
if not isinstance(_lsFilename, list):
raise ValueError("RunFormat expected a list of filenames as input")
# EndIf
for sFilename in _lsFilename:
if os.path.isfile(sFilename):
lsResultingFiles.append(os.path.realpath(sFilename))
# EndIf
# EndFor
# EndIf
if _lsFolder is not None:
if not isinstance(_lsFolder, list):
raise ValueError("RunFormat expected a list of folder as input")
# EndIf
for sFolder in _lsFolder:
if _bFolderRecursive:
lsDirFiles = []
for sRoot, lsDirectories, lsFiles in os.walk(sFolder):
for sFile in lsFiles:
if sFile.endswith(".py"):
lsDirFiles.append(os.path.realpath(os.path.join(sRoot, sFile)))
# EndIf
# EndFor
# EndFor
else:
lsDirFiles = [os.path.realpath(sFile) for sFile in glob.glob(sFolder + "/*.py")]
# EndIf
for sFile in lsDirFiles:
lsResultingFiles.append(sFile)
# EndFor
# EndFor
# EndIf
if _bVerbose:
print("filenames:")
for sFile in lsResultingFiles:
print(sFile)
# EndFor
# EndIf
for sFile in lsResultingFiles:
if _bCodeBlocks:
_FormatCodeBlocks(sFile)
# EndIf
# EndFor
# enddef
# ------------------------------------------------------------------------------------------------------------
if __name__ == "__main__":
import sys
# filepath = "D:/TOOLS/common/catharsys/image-render-setup-develop/src/catharsys/setup/cmd/devl_code_format.py"
if len(sys.argv) >= 2:
filepath = sys.argv[1]
if os.path.isfile(filepath):
_FormatCodeBlocks(filepath)
# EndIf
# EndIf
# EndIf
| boschresearch/image-render-setup | src/catharsys/setup/cmd/code_format_impl.py | code_format_impl.py | py | 9,488 | python | en | code | 4 | github-code | 36 |
75096299625 | import requests
from bs4 import BeautifulSoup
import re
def loastone_login():
print('http://na.finalfantasyxiv.com/lodestone/account/login/')
#Get a page from the Loadstone
# returns a BeautifulSoup object
def get_loadstone_page(url,session_id):
#Time format used for cookies
#import time
#time.strftime('%a, %d-%b-%Y %H:%M:%S %Z')
#ldst_is_support_browser=1, ldst_touchstone=1, ldst_bypass_browser=1", expires=session_expiration
cookies = dict(ldst_sess=session_id,domain='finalfantasyxiv.com', path='/')
raw_page = requests.get(url, cookies=cookies)
if(raw_page.status_code != 200):
raise Exception("Unable to download web page!")
return BeautifulSoup(raw_page.text,'html.parser')
#Each item has a separate detail page that must be loaded to determine if it's HQ or not
def is_item_hq(raw_item,session_id):
tooltip_url = 'http://na.finalfantasyxiv.com/' + item.find('div', attrs={"class": 'item_txt'})['data-lazy_load_url']
tooltip_page = get_loadstone_page(tooltip_url,session_id)
return bool(tooltip_page.find("img", src = re.compile('http://img\.finalfantasyxiv\.com/lds/pc/global/images/common/ic/hq.png.*')))
#Debug function to write some data to 'test.html'
def write_data(data):
out_file=open('test.html','w')
#for i in data:
#out_file.write(str(i))
out_file.write(str(data))
out_file.close()
#Debug function to write a pretty parsed version of a Loadstone page
def write_loadstone_page(url,session_id):
soup_page = get_loadstone_page(url,session_id)
write_data(soup_page.prettify().encode('utf8'))
#Use this to convert the provided items into something useful
def list_items_table(items):
item_row_format='<tr><td><img src="{image}"></img></td><td>{name}</td><td>{quantity}</td><td>{location}</td><td>{sub_location}</td></tr>\n'
item_buffer = '<table>\n'
for i in items:
item_buffer += item_row_format.format(**i)
item_buffer += '</table>\n'
return item_buffer
#Get all items in the Free company chest (does not get number of crystals or gil)
#Does not handle HQ Items yet
def get_fc_items(fc_id,session_id):
url = 'http://na.finalfantasyxiv.com/lodestone/freecompany/'+str(fc_id)+'/chest/'
soup_page = get_loadstone_page(url,session_id)
#Get all items
raw_items=soup_page.find_all("tr", attrs={"data-default_sort": True})
#Parse the items
items=[]
for item in raw_items:
tmp = {}
tmp['name'] = item.find("h2", attrs={"class": 'db-tooltip__item__name'}).text.strip()
tmp['quantity'] = int(item['data-stack'])
tmp['image'] = item.find("img")['src']
tmp['location'] = 'Company Chest'
tmp['sub_location'] = item.find_parent('tbody')['id']
items.append(tmp)
return items
#Get all items in a retainers inventory (does not get number of crystals or gil)
#Does not handle HQ Items yet
def get_retainer_items(char_id,retainer_id,session_id):
url = 'http://na.finalfantasyxiv.com/lodestone/character/'+str(char_id)+'/retainer/'+retainer_id+'/baggage/'
soup_page = get_loadstone_page(url,session_id)
#Get retainers name
retainer_name = soup_page.find("div", attrs={"class": 'retainer--name'}).p.text.strip()
#Get all items
raw_items=soup_page.find_all("tr", attrs={"data-default_sort": True})
#Parse the items
items=[]
for item in raw_items:
#if(is_item_hq(item,session_id)):
#print("HQ")
tmp = {}
tmp['name'] = item.find("a", attrs={"class": 'highlight'}).text.strip()
tmp['quantity'] = int(item['data-stack'])
tmp['image'] = item.find("img")['src']
tmp['location'] = 'Retainer: ' + retainer_name
tmp['sub_location'] = 'Inventory'
items.append(tmp)
return items
#Get all items a retainer is selling (does not get number of crystals or gil)
#HQ Item handling is suspect
#Note: This may return already sold items:
# sale_inventory is supposed to filter those out, but I din't think it's working correctly
def get_retainer_selling(char_id,retainer_id,session_id):
url = 'http://na.finalfantasyxiv.com/lodestone/character/'+str(char_id)+'/retainer/'+retainer_id+'/market/'
soup_page = get_loadstone_page(url,session_id)
#Get retainers name
retainer_name = soup_page.find("div", attrs={"class": 'retainer--name'}).p.text.strip()
#Get all items
sale_inventory=soup_page.find("div", attrs={"class": 'active'}).find('tbody')
#If no items, just return an empty set
if not sale_inventory:
return []
raw_items=sale_inventory.find_all("tr")
#Parse the items
items=[]
for item in raw_items:
tmp = {}
tmp['name'] = item.find("a", attrs={"class": 'highlight'}).text.strip()
tmp['quantity'] = int(item.find("td", attrs={"class": 'even'}).text.strip())
tmp['image'] = item.find("img")['src']
tmp['location'] = 'Retainer: ' + retainer_name
tmp['sub_location'] = 'Selling'
tmp['is_hq'] = bool(item.find("img", src = re.compile('http://img\.finalfantasyxiv\.com/lds/pc/global/images/common/ic/hq.png.*')))
items.append(tmp)
return items
| EmperorArthur/Loadstone_Parser | parse_loadstone.py | parse_loadstone.py | py | 5,272 | python | en | code | 0 | github-code | 36 |
11484721845 | from kubecepodvs.sumo.mapmessage.trafficmap import TrafficMap
class SumotrInputHandle:
def __init__(self, filename, map_: TrafficMap):
self._map = map_
# 这里去掉了traffic log相关的属性
with open(str(filename), 'r') as file:
time = 0.0
time_step_flag = True
# edge
edge_id = ''
# lane
lane_id = ''
# vehicle id
vehicle_id = ''
# pos
pos = 0.0
# speed
speed = 0.0
while True:
sline = str(file.readline(1024))
if not sline:
break
if sline.find('timestep') != -1 and time_step_flag:
vec = sline.split('"')
time = float(vec[1])
time_step_flag = False
elif sline.find('timestep') != -1:
time_step_flag = True
elif sline.find('edge id') != -1:
vec = sline.split('"')
edge_id = vec[1]
elif sline.find('lane id') != -1:
vec = sline.split('"')
lane_id = vec[1]
elif sline.find('vehicle id') != -1:
vec = sline.split('"')
vehicle_id = vec[1]
pos = float(vec[3])
speed = float(vec[5])
# 这里去掉了vehicle status和traffic log相关的代码
| LeyNmania/kubecepodvs | kubecepodvs/sumo/io/sumotrinputhandle.py | sumotrinputhandle.py | py | 1,540 | python | en | code | 0 | github-code | 36 |
71673331943 | import pandas as pd
import datetime as dt
from kucoincli.client import Client
def test_lending_liquidity(quote='USDT'):
"""Obtain max point-in-time liquidity for lending markets in USDT terms"""
client = Client()
l = client.symbols(marginable=True).baseCurrency
liq = {}
for curr in l:
try:
df = client.lending_rate(curr)
stats = client.get_stats(curr + '-' + quote)
max_borrow = (((stats.buy + stats.sell) / 2) * df['size'].sum())
liq[curr] = max_borrow
except:
pass
return pd.Series(liq).sort_values(ascending=False)
def test_trading_liquidity(lookback=90, interval='1day'):
"""Calculate mean turnover for marginable currencies in `interval` granularity over `lookback` days"""
client = Client()
l = client.symbols(marginable=True).index
liq = {}
start = dt.datetime.now() - dt.timedelta(days=lookback)
for curr in l:
mean_vol = client.ohlcv(
tickers=curr,
interval=interval,
start=start
).turnover.mean()
liq[curr] = mean_vol
return pd.Series(liq).sort_values(ascending=False)
def get_csv_data(path):
"""Reads in CSV file exported from SQL db"""
df = pd.read_csv(path, index_col="time")
df.index = pd.to_datetime(df.index)
df = df.astype(float)
return df.sort_index(ascending=True)
| jaythequant/VBToptimizers | research/utils.py | utils.py | py | 1,411 | python | en | code | 2 | github-code | 36 |
72076380264 | # SPDX-License-Identifier: LGPL-3.0-only
"""Package for the doorstop.core tests."""
import logging
import os
from typing import List
from unittest.mock import MagicMock, Mock, patch
from doorstop.core.base import BaseFileObject
from doorstop.core.document import Document
from doorstop.core.item import Item
from doorstop.core.validators.item_validator import ItemValidator
from doorstop.core.vcs.mockvcs import WorkingCopy
ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
TESTS_ROOT = os.path.dirname(__file__)
FILES = os.path.join(os.path.dirname(__file__), "files")
FILES_MD = os.path.join(os.path.dirname(__file__), "files_md")
SYS = os.path.join(FILES, "parent")
TST = os.path.join(FILES, "child")
EMPTY = os.path.join(FILES, "empty") # an empty directory
EXTERNAL = os.path.join(FILES, "external") # external files to reference
NEW = os.path.join(FILES, "new") # new document with no items
ENV = "TEST_INTEGRATION" # environment variable to enable integration tests
REASON = "'{0}' variable not set".format(ENV)
if not os.path.exists(EMPTY):
os.makedirs(EMPTY)
class DocumentNoSkip(Document):
"""Document class that is never skipped."""
SKIP = "__disabled__" # never skip test Documents
class MockFileObject(BaseFileObject): # pylint: disable=W0223,R0902
"""Mock FileObject class with stubbed file IO."""
def __init__(self, *args, **kwargs):
self._file = kwargs.pop("_file", "") # mock file system contents
with patch("os.path.isfile", Mock(return_value=True)):
super().__init__(*args, **kwargs) # type: ignore
self._read = Mock(side_effect=self._mock_read)
self._write = Mock(side_effect=self._mock_write)
_create = Mock()
def _mock_read(self, path):
"""Mock read method."""
logging.debug("mock read path: {}".format(path))
text = self._file
logging.debug("mock read text: {}".format(repr(text)))
return text
def _mock_write(self, text, path):
"""Mock write method."""
logging.debug("mock write text: {}".format(repr(text)))
logging.debug("mock write path: {}".format(path))
self._file = text
def __bool__(self):
return True
class MockItem(MockFileObject, Item): # pylint: disable=W0223,R0902
"""Mock Item class with stubbed file IO."""
class MockItemValidator(ItemValidator): # pylint: disable=W0223,R0902
"""Mock Item class with stubbed file IO."""
def _no_get_issues_document(self, item, document, skip): # pylint: disable=W0613
return
yield # pylint: disable=W0101
def disable_get_issues_document(self):
self._get_issues_document = self._no_get_issues_document
class MockDocument(MockFileObject, Document): # pylint: disable=W0223,R0902
"""Mock Document class with stubbed file IO."""
class MockSimpleDocument:
"""Mock Document class with basic default members."""
def __init__(self):
self.parent = None
self.prefix = "RQ"
self.itemformat = "yaml"
self._items: List[Item] = []
self.extended_reviewed: List[str] = []
def __iter__(self):
yield from self._items
def set_items(self, items):
self._items = items
class MockDocumentSkip(MockDocument): # pylint: disable=W0223,R0902
"""Mock Document class that is always skipped in tree placement."""
skip = True
class MockDocumentNoSkip(MockDocumentSkip): # pylint: disable=W0223,R0902
"""Mock Document class that is never skipped in tree placement."""
SKIP = "__disabled__" # never skip mock Documents
class MockItemAndVCS(MockItem): # pylint: disable=W0223,R0902
"""Mock item class with stubbed IO and a mock VCS reference."""
def __init__(self, *args, **kwargs):
super().__init__(None, *args, **kwargs)
self.tree = Mock()
self.tree.vcs = WorkingCopy(None)
class MockDataMixIn: # pylint: disable=R0903
"""Data for test cases requiring mock items and documents."""
# purely mock objects
mock_document = MagicMock()
mock_document.prefix = "MOCK"
mock_document.items = []
mock_document.assets = None
mock_document.template = None
mock_tree = MagicMock()
mock_tree.documents = [mock_document]
# mock objects that behave like the real thing
item = MockItemAndVCS(
"path/to/req3.yml",
_file=(
"links: [sys3]" + "\n"
"text: 'Heading'" + "\n"
"level: 1.1.0" + "\n"
"normative: false"
),
)
item2 = MockItemAndVCS(
"path/to/req3.yml",
_file=("links: [sys3]\ntext: '" + ("Hello, world! " * 10) + "'\nlevel: 1.2"),
)
_mock_item = Mock()
_mock_item.uid = "sys3"
_mock_item.document.prefix = "sys"
item2.tree = Mock()
item2.tree.find_item = Mock(return_value=_mock_item)
_mock_item2 = Mock()
_mock_item2.uid = "tst1"
_mock_item2.document.prefix = "tst"
# pylint: disable=undefined-variable
item2.find_child_links = lambda: [MockDataMixIn._mock_item2.uid] # type: ignore
item2.find_child_items = lambda: [MockDataMixIn._mock_item2] # type: ignore
document = MagicMock(spec=["items"])
document.items = [
item,
item2,
MockItemAndVCS(
"path/to/req1.yml", _file="links: []\ntext: 'abc\n123'\nlevel: 1.1"
),
MockItemAndVCS("path/to/req2.yml", _file="links: []\ntext: ''\nlevel: 2"),
MockItemAndVCS(
"path/to/req4.yml",
_file="links: []\nref: 'CHECK_PUBLISHED_CONTENT'\n" "level: 2.1.1",
),
MockItemAndVCS(
"path/to/req2.yml",
_file="links: [sys1]\ntext: 'Heading 2'\nlevel: 2.1.0\n" "normative: false",
),
]
document.copy_assets = Mock()
document.assets = None
document.template = None
item3 = MockItem(
None,
"path/to/req4.yml",
_file=(
"links: [sys4]" + "\n"
"text: 'This shall...'" + "\n"
"ref: Doorstop.sublime-project" + "\n"
"level: 1.2" + "\n"
"normative: true"
),
)
_mock_item3 = Mock()
_mock_item3.uid = "sys4"
_mock_item3.document.prefix = "sys"
item3.tree = Mock()
item3.tree.find_item = Mock(return_value=_mock_item3)
item3.tree.vcs.paths = [
(
"Doorstop.sublime-project",
"Doorstop.sublime-project",
"Doorstop.sublime-project",
)
]
item4 = MockItemAndVCS(
"path/to/req3.yml",
_file=(
"links: [sys3]" + "\n"
"text: 'Heading'" + "\n"
"long: " + ('"' + "0" * 66 + '"') + "\n"
"level: 1.1.0" + "\n"
"normative: false"
),
)
item5 = MockItemAndVCS(
"path/to/req3.yml",
_file=(
"links: [sys3]" + "\n"
"text: 'Heading'" + "\n"
"level: 2.1.2" + "\n"
"normative: false" + "\n"
"ref: 'abc123'"
),
)
item6 = MockItemAndVCS(
"path/to/req3.yml",
_file=(
"links: [sys3]" + "\n"
"text: 'Heading'" + "\n"
"level: 2.1.2" + "\n"
"normative: false" + "\n"
"references:" + "\n"
" - path: abc1" + "\n"
" type: file" + "\n"
" - path: abc2" + "\n"
" type: file" + "\n"
),
)
| doorstop-dev/doorstop | doorstop/core/tests/__init__.py | __init__.py | py | 7,484 | python | en | code | 424 | github-code | 36 |
18317520839 | # ----------------------------------------------------------------------------#
# Imports
# ----------------------------------------------------------------------------#
import random
from flask import Flask, abort, jsonify, request
from flask_cors import CORS
from models import setup_db, Category, Question
# ----------------------------------------------------------------------------#
# App Config.
# ----------------------------------------------------------------------------#
app = Flask(__name__)
setup_db(app)
cors = CORS(app, resources={r"/api/*": {"origins": "*"}})
# ----------------------------------------------------------------------------#
# PAGINATION LOGIC
# ----------------------------------------------------------------------------#
QUES_PER_PAGE = 10
def paginate_questions(request, selection):
page = request.args.get('page', 1, type=int)
start = (page - 1) * QUES_PER_PAGE
end = start + QUES_PER_PAGE
questions = [question.format() for question in selection]
current_questions = questions[start:end]
return current_questions
# ----------------------------------------------------------------------------#
# AFTER REQUEST
# ----------------------------------------------------------------------------#
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization,true')
response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS')
return response
# ----------------------------------------------------------------------------#
# GET CATEGORIES ENDPOINT
# ----------------------------------------------------------------------------#
@app.route('/categories', methods=['GET'])
def retrieve_categories():
categories = Category.query.order_by(Category.type).all()
if len(categories) == 0:
abort(404)
else:
categories_dict = {}
for category in categories:
categories_dict[category.id] = category.type
return jsonify({
'success': True,
'categories': categories_dict
})
# ----------------------------------------------------------------------------#
# GET QUESTIONS ENDPOINT
# ----------------------------------------------------------------------------#
@app.route('/questions')
def get_questions():
all_questions = Question.query.order_by(Question.id).all()
total_questions = len(all_questions)
pagenated_questions = paginate_questions(request, all_questions)
if (len(pagenated_questions) == 0):
abort(404)
try:
categories = Category.query.all()
categoriesDict = {}
for category in categories:
categoriesDict[category.id] = category.type
return jsonify({
'success': True,
'questions': pagenated_questions,
'total_questions': total_questions,
'categories': categoriesDict
})
except Exception as e:
print(e)
abort(400)
# ----------------------------------------------------------------------------#
# DELETE QUESTIONS ENDPOINT
# ----------------------------------------------------------------------------#
@app.route('/questions/<int:id>', methods=['DELETE'])
def delete_questions(id):
try:
question_to_be_deleted = Question.query.filter_by(id=id).one_or_none()
if question_to_be_deleted is None:
abort(404)
else:
question_to_be_deleted.delete()
return jsonify({
'success': True,
'deleted': str(id)
})
except Exception as e:
print(e)
abort(400)
# ----------------------------------------------------------------------------#
# POST QUESTIONS ENDPOINT
# ----------------------------------------------------------------------------#
@app.route("/questions", methods=['POST'])
def add_question():
body = request.get_json()
if body is None:
abort(400)
new_question = body.get('question', None)
new_answer = body.get('answer', None)
new_category = body.get('category', None)
new_difficulty = body.get('difficulty', None)
if new_question is None or new_answer is None or new_category is None or new_difficulty is None:
abort(400)
else:
try:
added_question = Question(question=new_question, answer=new_answer, category=new_category,
difficulty=new_difficulty)
added_question.insert()
all_questions = Question.query.order_by(Question.id).all()
current_questions = paginate_questions(request, all_questions)
return jsonify({
'success': True,
'created': added_question.id,
'questions': current_questions,
'total_questions': len(all_questions)
})
except Exception as e:
print(e)
abort(422)
# ----------------------------------------------------------------------------#
# SEARCH QUESTIONS ENDPOINT
# ----------------------------------------------------------------------------#
@app.route("/questions/search", methods=['POST'])
def search_question():
body = request.get_json()
search_ques = body.get('searchTerm', None)
if search_ques:
searched_question = Question.query.filter(Question.question.ilike(f'%{search_ques}%')).all()
return jsonify({
'success': True,
'questions': [question.format() for question in searched_question],
'total_questions': len(searched_question),
'current_category': None
})
else:
abort(404)
# ----------------------------------------------------------------------------#
# GET QUESTIONS BY CATEGORY ENDPOINT
# ----------------------------------------------------------------------------#
@app.route("/categories/<int:id>/questions")
def questions_by_category(id):
searched_category = Category.query.filter_by(id=id).one_or_none()
if searched_category:
questions_in_category = Question.query.filter_by(category=str(id)).all()
current_questions = paginate_questions(request, questions_in_category)
return jsonify({
'success': True,
'questions': current_questions,
'total_questions': len(questions_in_category),
'current_category': searched_category.type
})
else:
abort(404)
# ----------------------------------------------------------------------------#
# POST QUIZ ENDPOINT
# ----------------------------------------------------------------------------#
@app.route('/quizzes', methods=['POST'])
def get_quiz():
body = request.get_json()
quiz_category = body.get('quiz_category')
previous_question = body.get('previous_questions')
if quiz_category is None:
abort(422)
try:
if (quiz_category['id'] == 0):
# To handle all categories
questions_query = Question.query.all()
else:
questions_query = Question.query.filter_by(category=quiz_category['id']).all()
random_ques_index = random.randint(0, len(questions_query) - 1)
next_question = questions_query[random_ques_index]
while next_question.id not in previous_question:
next_question = questions_query[random_ques_index]
return jsonify({
'success': True,
'question': {
"id": next_question.id,
"question": next_question.question,
"answer": next_question.answer,
"difficulty": next_question.difficulty,
"category": next_question.category
},
'previousQuestion': previous_question
})
except Exception as e:
print(e)
abort(404)
# ----------------------------------------------------------------------------#
# ERROR HANDLERS FOR HTTP CODES
# ----------------------------------------------------------------------------#
@app.errorhandler(404)
def not_found(error):
return jsonify({
"success": False,
"error": 404,
"message": "Resource not found"
}), 404
@app.errorhandler(422)
def unprocessable_entity(error):
return jsonify({
"success": False,
"error": 422,
"message": "Unprocessable entity"
}), 422
@app.errorhandler(400)
def bad_request(error):
return jsonify({
"success": False,
"error": 400,
"message": "Bad request"
}), 400
# ----------------------------------------------------------------------------#
# Launch.
# ----------------------------------------------------------------------------#
# Default port:
if __name__ == '__main__':
app.run()
def create_app():
return app
| RaghavGoel13/trivia-solution | backend/flaskr/app.py | app.py | py | 8,877 | python | en | code | 0 | github-code | 36 |
73172240425 | import cv2
import numpy as np
class VideoCamera(object):
def __init__(self):
self.video = cv2.VideoCapture(0)
def __del__(self):
self.video.release()
def get_frame(self):
while True:
a="Not Found"
lower_green = np.array([45, 140, 50])
upper_green = np.array([75, 255, 255])
success, frame = self.video.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
hsv = cv2.medianBlur(hsv, 5)
imgThreshHighgreen = cv2.inRange(hsv, lower_green, upper_green)
circlesgreen = cv2.HoughCircles(imgThreshHighgreen, cv2.HOUGH_GRADIENT, 1, 20, param1=50, param2=30,
minRadius=0, maxRadius=0)
if circlesgreen is not None:
a="Green Ball"
ret, jpeg = cv2.imencode('.jpg',frame)
return jpeg.tobytes(),a
VideoCamera().get_frame()
| ishivanshgoel/Technocrats-T1 | camera.py | camera.py | py | 936 | python | en | code | 0 | github-code | 36 |
38833898049 | from django.urls import path
from rest_framework.routers import DefaultRouter
from src.dates_api.views import DateViewSet, PopularMonthListView
app_name = "dates_api"
router = DefaultRouter()
router.register("dates", DateViewSet, basename="dates")
urlpatterns = [
path("popular/", PopularMonthListView.as_view(), name="popular_month"),
]
urlpatterns += router.urls
| danielkosytorz/dates-DRF-app | backend/src/dates_api/urls.py | urls.py | py | 373 | python | en | code | 0 | github-code | 36 |
22317892633 | from django.urls import path
from django.conf.urls import include
from django.contrib import admin
from app.accounts.api.v1.views import (
UserCreatView,
UserUpdateView,
GetAuthToken,
AvatarAPIView,
ClubAPIView,
)
app_name = 'accounts'
urlpatterns = [
path('login/', GetAuthToken.as_view(), name='user-login'),
path('create/', UserCreatView.as_view(), name='user-create'),
path('update/<int:pk>/', UserUpdateView.as_view(), name='user-update'),
path('avatar/<int:pk>/', AvatarAPIView.as_view(), name='user-avatar'),
path('club/', ClubAPIView.as_view(), name='club-details')
]
| AndresGomesIglesias/LanTool-Backend | app/accounts/api/v1/urls.py | urls.py | py | 664 | python | en | code | 0 | github-code | 36 |
16232453581 | from faker import Faker
from faker.providers import person, job, company, internet, phone_number
class BaseCard:
def __init__(self, name, family_name, e_mail, priv_phone):
self.name = name
self.family_name = family_name
self.e_mail = e_mail
self.priv_phone = priv_phone
self._length = len(self.name) + len(self.family_name)
def __str__(self):
return 'Name:' + self.name + '; Family Name:' + self.family_name + '; E-mail:' + self.e_mail + \
'; Phone:' + self.priv_phone
def contacts(self):
print(f'Please contact with: {self.name} {self.family_name} private phone:{self.priv_phone}')
@property
def length(self):
return self._length
@length.setter
def length(self, value):
if value != len(self.name) + len(self.family_name) + 1:
raise ValueError(f' Value {value} not eq to len of name and family name')
else:
self._length = value
class BusinessCard(BaseCard):
def __init__(self, position, company, business_phone, *args, **kwargs):
super().__init__(*args, **kwargs)
self.position = position
self.company = company
self.business_phone = business_phone
def contacts(self):
print(f'Please contact with: {self.name} {self.family_name} corporate phone:{self.business_phone}')
def __str__(self):
return super().__str__() + '; Position:' + self.position + ' ;Company:' + self.company + \
' ;Buss Phone:' + self.business_phone + ' ;LEN:' + str(self._length)
def create_contacts(type='Base', quantity=1):
fake = Faker()
fake.add_provider(person)
fake.add_provider(job)
fake.add_provider(internet)
list_of_cards = []
if type == 'Base':
for i in range(quantity):
card = BaseCard(name=fake.first_name_male(), family_name=fake.last_name_male(),
e_mail=fake.company_email(), priv_phone=fake.phone_number())
list_of_cards.append(card)
elif type == 'Business':
fake.add_provider(company)
for i in range(quantity):
card = BusinessCard(name=fake.first_name_male(), family_name=fake.last_name_male(),
e_mail=fake.company_email(), priv_phone=fake.phone_number(), position=fake.job(),
company=fake.company(), business_phone=fake.phone_number())
list_of_cards.append(card)
else:
ValueError('fWrong card type provided - Base or Business')
return list_of_cards
cards = create_contacts(type='Business', quantity=10)
for card in cards:
print(card)
| szczesnym/Kodilla-Python | Chapter7/AddressBook.py | AddressBook.py | py | 2,662 | python | en | code | 0 | github-code | 36 |
25445515185 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2022/5/6 20:48
# @Author : nujaijey
# @File : 字符串.py
# @Desc :
# s1 = ""
# s2 = " "
# print("s1的长度:", len(s1))
# print("s2的长度:", len(s2))
# s3 = '1234567'
# res3 = s3[0:7:3]
# print(res3)
# s4 = ' python '
# print(s4)
# res = s4.strip()
# print(res)
s5 = '***python***'
print(s5)
res = s5.strip('*')
print(res)
# 给你一个字符串 s,由若干单词组成,单词前后用一些空格字符隔开。返回字符串中 最后一个 单词的长度。
# 单词 是指仅由字母组成、不包含任何空格字符的最大子字符串。
# 提示:
# 1 <= s.length <= 104
# s 仅有英文字母和空格 ' ' 组成
# s 中至少存在一个单词
def lengthOfLastWord(s):
"""
:type s: str
:rtype: int
"""
# length = ''
# if s.strip() == '':
# print('s不包含字母')
# elif s.split()[-1] == ' ':
# strip_str = s.strip()
# length = len(strip_str.split()[-1])
# else:
# length = len(s.split()[-1])
# print(length)
strip_s = s.strip()
split_s = strip_s.split()
length = len(split_s[-1]) if split_s else 0
print(length)
lengthOfLastWord(' ')
lengthOfLastWord('Hello World')
lengthOfLastWord(' fly me to the moon ')
| nujaijey/python_study | basics/字符串.py | 字符串.py | py | 1,319 | python | zh | code | 0 | github-code | 36 |
20115420162 | import logging
import concurrent.futures
import pandas as pd
import random
import time
import requests
import os
import sys
from datetime import datetime
from utils.vars import *
from utils.common import *
from requests.structures import CaseInsensitiveDict
# Output the logs to the stdout
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logger = logging.getLogger(__file__)
class AzureAuthScaleAutomation:
def __init__(self,
microsoft_groups_token: str,
microsoft_create_apps_token: str,
windows_consent_app_token: str,
redirect_urls: list):
self.microsoft_groups_headers = CaseInsensitiveDict()
self.microsoft_groups_headers["Accept"] = "application/json"
self.microsoft_groups_headers["Content-Type"] = "application/json"
self.microsoft_groups_headers["Authorization"] = microsoft_groups_token
self.microsoft_create_apps_headers = CaseInsensitiveDict()
self.microsoft_create_apps_headers["Accept"] = "application/json"
self.microsoft_create_apps_headers["Content-Type"] = "application/json"
self.microsoft_create_apps_headers["Authorization"] = microsoft_create_apps_token
self.windows_consent_app_headers = CaseInsensitiveDict()
self.windows_consent_app_headers["Accept"] = "application/json"
self.windows_consent_app_headers["Content-Type"] = "application/json"
self.windows_consent_app_headers["Authorization"] = windows_consent_app_token
self.redirect_urls = redirect_urls
self.groups_size = None
self.groups_df = None
def __create_group_scale(self, scale_group_index: int):
"""
Creates an azure group
This function simply creates a group in an azure directory
Parameters
----------
scale_group_index : int
This is a just a number to add at the end of the group name
"""
index_str = str(scale_group_index)
json_body = {
"displayName": f"scale_group{index_str}",
"mailEnabled": True,
"securityEnabled": True,
"groupTypes": [
"Unified"
],
"description": f"scale_group{index_str}",
"mailNickname": f"scale_group{index_str}",
"visibility": "private"
}
requests.post(url=GROUPS_AZURE_LINK, headers=self.microsoft_groups_headers, json=json_body)
def create_group_scale_threading(self, start_index: int, number_of_groups: int):
"""
Creates multiple azure groups
This function creates azure groups using multi-threading approach
Parameters
----------
start_index : int
The sequential number that will be the suffix of the credential name
number_of_groups : int
The number of accounts that will be created
"""
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(self.__create_group_scale, range(start_index, start_index + number_of_groups))
def create_azure_app_registration(self, app_name: str):
"""
Creates a web app in azure
This function simply creates a web app in azure with a specific name which is requested as a parameter
Parameters
----------
app_name : str
Returns
-------
Response
"""
create_app_body = {
"displayName": app_name,
"spa": {
"redirectUris": []
},
"publicClient": {
"redirectUris": []
},
"web": {
"redirectUris": []
},
"signInAudience": "AzureADMyOrg",
"requiredResourceAccess": [
{
"resourceAppId": "00000003-0000-0000-c000-000000000000",
"resourceAccess": [
{
"id": "e1fe6dd8-ba31-4d61-89e7-88639da4683d",
"type": "Scope"
}
]
}
]
}
return requests.post(APPS_AZURE_LINK, headers=self.microsoft_create_apps_headers, json=create_app_body)
def __assign_groups_to_members(self, user_id):
"""
Assigns 1 to 6 random groups
This function is called from assign_groups_to_members_threading, because it uses a multi-threading
approach to call this function. This function assigns randomly from 1 to 6 groups to this user.
Parameters
----------
user_id : str
The id of the user
"""
json_body = {
"requests": []
}
num_of_groups = random.randint(1, 6)
random_groups = random.sample(range(0, self.groups_size), num_of_groups)
for group_index in random_groups:
group_id = self.groups_df.iloc[group_index]
tmp_request = {
"id": f"member_{group_id}_{user_id}",
"method": "POST",
"url": f"/groups/{group_id}/members/$ref",
"headers": {
"Content-Type": "application/json"
},
"body": {
"@odata.id": f"https://graph.microsoft.com/beta/directoryObjects/{user_id}"
}
}
json_body["requests"].append(tmp_request)
requests.post(ASSIGN_GROUPS_AZURE_LINK, headers=self.microsoft_groups_headers, json=json_body)
def assign_groups_to_members_threading(self, users_csv_path: str, groups_csv_path: str):
"""
Assigns randomly assigns 1 to 6 groups to users in azure
This function loads a csv with users info and groups info, then selects 1000 to 1000 users
to call a function that assigns a user with groups. After that, it needs an sleep time because
azure rejects many requests at the same, maybe it's something about DDOS protection. Be aware that the
execution may be interrupted or something, that's because you selected too many users. If that's
the case, then you should wait the program to end and watch manually who was the last users with
groups assigned.
Parameters
----------
users_csv_path : str
The path for the csv of the users you want to assign to groups
groups_csv_path : str
The path for the csv file of the groups you want to be assign
"""
users_df = pd.read_csv(users_csv_path)
groups_df = pd.read_csv(groups_csv_path)
self.groups_df = groups_df["id"]
users_df = users_df["id"]
users_size = users_df.size
self.groups_size = self.groups_df.size
start_index = end_index = 0
end_index = clamp(end_index+1000, 0, users_size-1)
while start_index < users_size:
tmp_users_df = users_df.loc[start_index:end_index]
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(self.__assign_groups_to_members, tmp_users_df)
logger.info(f"Execution finished from {start_index} to {end_index}, waiting to run 1000 more...")
time.sleep(130)
start_index = end_index+1
end_index = clamp(end_index+1000, 0, users_size-1)
logger.info(f"start index is: {start_index} and last index is: {end_index}")
def grant_read_permissions(self, app_object_id: str):
"""
Grants the basic permissions to an app
This function grants Read permissions to User, Directory and Group to the app. This only receives
the app_object_id of the app to work.
Parameters
----------
app_object_id : str
"""
grant_read_permissions = {
"requiredResourceAccess": [
{
"resourceAppId": "00000003-0000-0000-c000-000000000000",
"resourceAccess": [
{
"id": "e1fe6dd8-ba31-4d61-89e7-88639da4683d",
"type": "Scope"
},
{
"id": "df021288-bdef-4463-88db-98f22de89214",
"type": "Role"
},
{
"id": "5b567255-7703-4780-807c-7be8301ae99b",
"type": "Role"
},
{
"id": "7ab1d382-f21e-4acd-a863-ba3e13f7da61",
"type": "Role"
}
]
}
]
}
requests.patch(f"{APPS_AZURE_LINK}/{app_object_id}",
headers=self.microsoft_create_apps_headers, json=grant_read_permissions)
def consent_admin_permissions(self, app_client_id: str):
"""
Consents the permissions of an app
This function gives admin consent of all the permissions in the app. By default, the app
has only Read permissions.
Parameters
----------
app_client_id: str
"""
admin_consent_body = {
"clientAppId": f"{app_client_id}",
"onBehalfOfAll": True,
"checkOnly": False,
"tags": [],
"constrainToRra": True,
"dynamicPermissions": [
{
"appIdentifier": "00000003-0000-0000-c000-000000000000",
"appRoles": [
"User.Read.All",
"Group.Read.All",
"Directory.Read.All"
],
"scopes": [
"User.Read"
]
}
]
}
# Gives the consent to the app permissions as an admin
requests.post(ADMIN_CONSENT_FOR_APP_URL, headers=self.windows_consent_app_headers,
json=admin_consent_body)
def __create_secret_client(self, app_object_id: str, years_to_expire: int = 3):
"""
Creates api permission credentials to the app
This function generates an api key and client secret to access the app. This function has to be
private because you need to catch the client secret that is only shown once.
Parameters
----------
app_object_id : str
years_to_expire : int
Output
------
json
"""
start_date_time = datetime.now()
end_date_time = start_date_time.replace(year=start_date_time.year + years_to_expire)
parsed_start_dt = get_datetime_to_ISO_format(start_date_time)
parsed_end_dt = get_datetime_to_ISO_format(end_date_time)
app_secret_client_body = {
"passwordCredential": {
"displayName": "test-description",
"endDateTime": f"{parsed_end_dt}",
"startDateTime": f"{parsed_start_dt}"
}
}
return requests.post(f"{APPS_AZURE_LINK}/{app_object_id}/addPassword",
headers=self.microsoft_create_apps_headers, json=app_secret_client_body)
def modify_redirect_urls_of_app(self, app_object_id: str):
"""
Updates the redirect_urls of a web app
This function sends a patch request to modify the redirect urls of a specific web app. It is important to
know that this functions fully updates the redirect urls, so make sure to initialize this class
with all urls you want your app to have.
Parameters
----------
app_object_id : str
"""
redirect_url_body = {
"spa": {
"redirectUris": []
},
"publicClient": {
"redirectUris": []
},
"web": {
"redirectUris": self.redirect_urls
}
}
requests.patch(f"{APPS_AZURE_LINK}/{app_object_id}", headers=self.microsoft_create_apps_headers,
json=redirect_url_body)
def modify_redirect_urls_of_app_threading(self, object_id_csv_path: str):
"""
Modifies multiple redirect urls from a csv
This function reads a csv containing the object ids of the azure apps, then executes
the funcion modify_redirect_urls_of_app using multi-threading
Parameters
----------
object_id_csv_path : str
The path of the parameter of the csv
"""
df = pd.read_csv(object_id_csv_path)
df = df['object_id']
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(self.modify_redirect_urls_of_app, df)
def create_azure_app_registrations_apis(self, tenant_id: str, start: int, number_of_apps: int):
"""
Registers, configures an azure app and returns a csv with API credentials
This function calls several functions to fully configure an azure app. The first one is for the
creation of the app, then another to grant User.Read.All, Directory.Read.All, Group.Read.All. Then,
you it calls a function that works from another API to give admin consent the previous permissions
of the app. Furthermore, creates an API key and saves the secret key. After that, adds the
redirect urls in the app and at last, creates two csv with the credentials of the app.
Parameters
----------
start : int
The start index for the app name that will be created
number_of_apps: int
The number of apps that will be created starting from the ``start`` index
tenant_id: str
The id of the azure active directory
Returns
-------
None
Doesn't return anything but creates two csv files
Examples
--------
>>> create_azure_app_registrations_apis("89f8652e-c99e-43a0-ab1f-9273081e5aaa", 10, 5)
>>> create_azure_app_registrations_apis("89f8652e-c99e-43a0-ab1f-9273081e5aaa", 5, 1)
"""
tenant_id_list = list()
client_id_list = list()
client_secret_list = list()
object_id_list = list()
for app_index in range(start, start + number_of_apps):
try:
create_request_response = self.create_azure_app_registration(f"aruba-cloudauth-cred-scale-{app_index}")
app_object_id = create_request_response.json()["id"]
app_client_id = create_request_response.json()["appId"]
self.grant_read_permissions(app_object_id)
self.consent_admin_permissions(app_client_id)
app_secret_client_request = self.__create_secret_client(app_object_id)
secret_client_content = app_secret_client_request.json()
client_secret = secret_client_content["secretText"]
self.modify_redirect_urls_of_app(app_object_id)
tenant_id_list.append(tenant_id)
client_id_list.append(app_client_id)
client_secret_list.append(client_secret)
object_id_list.append(app_object_id)
except Exception:
logger.error(Exception("The script didn't finished as expected! Saving the results in the csv"))
break
df = pd.DataFrame({'tenant_id': tenant_id_list,
'client_id': client_id_list,
'client_secret': client_secret_list})
does_file_exists = os.path.isfile(r"azure_app_credentials.csv")
df.to_csv(r"azure_app_credentials.csv", index=False, header=(not does_file_exists), mode='a')
df_object_id = pd.DataFrame({'object_id': object_id_list})
does_file_exists_object_id = os.path.isfile(r"azure_app_object_id.csv")
df_object_id.to_csv(r"azure_app_object_id.csv", index=False, header=(not does_file_exists_object_id), mode='a')
def __delete_active_application(self, app_object_id: str):
"""
Deletes permanently an application
This function deletes temporary an active azure application, then deletes it permanently from that
temporary directory.
Parameters
----------
app_object_id: str
The object id of the application
"""
requests.delete(f"{APPS_AZURE_LINK}/{app_object_id}", headers=self.microsoft_create_apps_headers)
requests.delete(f"{APP_AZURE_DELETE_PERMANENTLY_LINK}/{app_object_id}", headers=self.microsoft_create_apps_headers)
def delete_active_application_threading(self, object_id_csv_path):
"""
Deletes multiple applications
This function reads a csv with the object ids of the application that will be deleted, then deletes
each of them twice; the first one is temporary and the second one is permanently.
Parameters
----------
object_id_csv_path: csv
The csv containing one or more objects id for applications
"""
df = pd.read_csv(object_id_csv_path)
df = df['object_id']
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(self.__delete_active_application, df)
| jotozhun/azure-pre-scale | azure_auth_scale.py | azure_auth_scale.py | py | 17,441 | python | en | code | 0 | github-code | 36 |
6994509380 | from lib.cuckoo.common.abstracts import Signature
class DropBox(Signature):
name = "cloud_dropbox"
description = "Looks up the Dropbox cloud service"
severity = 2
categories = ["cloud"]
authors = ["RedSocks"]
minimum = "2.0"
domains = [
"dropbox.com",
"www.dropbox.com",
"dl.dropboxusercontent.com",
"dl.dropbox.com",
"dl-balancer.x.dropbox.com",
"www.v.dropbox.com",
"duc-balancer.x.dropbox.com",
]
def on_complete(self):
for indicator in self.domains:
if self.check_domain(pattern=indicator):
self.mark_ioc("domain", indicator)
return True
| cuckoosandbox/community | modules/signatures/windows/cloud_dropbox.py | cloud_dropbox.py | py | 689 | python | en | code | 312 | github-code | 36 |
74736328744 | import pyautogui
# Returns two integers, the width and height of the screen. (The primary monitor, in multi-monitor setups.)
screenWidth, screenHeight = pyautogui.size()
# Returns two integers, the x and y of the mouse cursor's current position.
currentMouseX, currentMouseY = pyautogui.position()
print(screenWidth, screenHeight, currentMouseX, currentMouseY)
def moveMouse(start, finish):
pyautogui.mouseUp(button="left")
pyautogui.moveTo(start["x"], start["y"])
pyautogui.mouseDown()
# pyautogui.moveTo(finish["x"], finish["y"])
pyautogui.dragTo(finish["x"], finish["y"], 1, button='left')
pyautogui.mouseUp(button="left")
def mouseTo(point):
pyautogui.moveTo(point["x"], point["y"])
def withinGameBox(screen):
currentMouseX, currentMouseY = pyautogui.position()
if currentMouseX < screen['x1'] or \
currentMouseY < screen['y1'] or \
currentMouseX > screen['x2'] or \
currentMouseY > screen['y2']:
# Out of game box
return False
return True
| davidyu37/fruit-ninja-cv | mouse.py | mouse.py | py | 1,028 | python | en | code | 0 | github-code | 36 |
417569576 | from abc import ABC,abstractmethod
class Shape(ABC):
def __init__(self,dim1,dim2):
self.dim1=dim1
self.dim2=dim2
@abstractmethod
def area(self):
#print("Shape has no area")
pass
#this method have no body other method will overwrite this and use this
class Triangle(Shape):
#As this class inherit Shape class it must use area or abstract method otherwise error will be shown
def area(self):
area=0.5* self.dim1*self.dim2
print("Area of Triangle is :",area)
class Rectangle(Shape):
def area(self):
area=self.dim1*self.dim2
print("Area of Rectangle is :",area)
# we can't create abstract class object
# s1= Shape(10,20)
# s1.area()
t1=Triangle(20,30)
t1.area()
r1=Rectangle(20,30)
r1.area() | Rakibuz/Robotics_HCI | OOP_Python/Abstraction.py | Abstraction.py | py | 792 | python | en | code | 0 | github-code | 36 |
25947046788 | # Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def deleteDuplicates(self, head: ListNode) -> ListNode:
if not head or not head.next:
return head
cur_val = head.val
last_head = head
start_head = head
while head:
if head.val != cur_val:
last_head.next = head
last_head = head
if not head.next:
last_head.next = None
return start_head
cur_val = head.val
head = head.next
return start_head
| dzaytsev91/leetcode-algorithms | easy/83_remove_duplicates_from_sorted_list.py | 83_remove_duplicates_from_sorted_list.py | py | 678 | python | en | code | 2 | github-code | 36 |
7595557598 | #!/usr/bin/env python3
import argparse
import sys
import os
from pathlib import Path
import json
import io
import re
import tempfile
import shutil
import copy
re_has_whitespace = re.compile(r"\s+")
re_has_indent = re.compile(r"\s{4}\s+")
re_empty_line = re.compile(r"^\s*$")
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--package", required=False, help="the namespace of the package to process")
parser.add_argument("-b", "--build", required=True, help="the path of the build folder")
parser.add_argument("-f", "--force", action="store_true", required=False, help="force generation, as opposed to generating only if more recent")
return parser.parse_args(args)
def error(msg):
print("[ERROR] " + msg, file=sys.stderr)
def progress(msg):
print("[PROGRESS] " + msg)
def get_package_filename(build, package):
return str(Path(build) / "packages" / (package + ".json"))
def get_source_filename(build, source):
unmodified = str(Path(build) / "files" / (source + ".yapl"))
if os.path.exists(unmodified):
return unmodified
return str(Path(build) / "files" / "modified_locally" / (source + ".yapl"))
def validate_parsed_args(parsed_args):
if not os.path.isdir(parsed_args.build):
error("The build parameter must specify a directory file")
return 1
if parsed_args.package is not None:
if not os.path.isfile(get_package_filename(parsed_args.build, parsed_args.package)):
error("The parameters must specify a package file")
return 1
return 0
def main(parsed_args):
progress("Starting")
progress("Preparing output directories")
modules_path = Path(parsed_args.build) / "modules"
modules_path.mkdir(parents=True, exist_ok=True)
if parsed_args.force:
shutil.rmtree(modules_path, ignore_errors=False, onerror=None)
modules_path.mkdir(parents=True, exist_ok=True)
if parsed_args.package:
process_package(parsed_args.build, parsed_args.package)
else:
packages_path = Path(parsed_args.build) / "packages"
packages = list(packages_path.glob("*.json"))
n = 1
for package in packages:
progress("Processing package {} ({} of {})".format(package, n, len(packages)))
n = n + 1
process_package(parsed_args.build, Path(package).stem)
progress("Finished")
return 0
def load_package(build, package):
package_filename = get_package_filename(build, package)
with io.open(package_filename) as json_file:
return json.load(json_file)
def load_sourcefile(build, hash):
source_filename = get_source_filename(build, hash)
with io.open(source_filename, "r") as yapl_file:
return yapl_file.read()
def dedent(l):
if l == "":
return l
elif l[0] == "\t":
return l[1:]
elif l[:4] == " ":
return l[4:]
elif l.strip() == "":
return ""
else:
assert False, "expected leading whitespace, not '{}'".format(l)
return l
def process_package(build, package):
progress("Processing package " + package)
package_descriptor = load_package(build, package)
source_files = list(package_descriptor["source_files"].items())
modules_path = Path(build) / "modules"
n = 1
for source_filename, sourcefile_info in source_files:
progress("Processing sourcefile {} ({} of {})".format(source_filename, n, len(source_files)))
n = n + 1
revision = sourcefile_info["revision"]
if os.path.isdir(modules_path / revision):
# TODO: this can lead to inconsistency
continue
def save_modules(build, package_descriptor, source_filename, sourcefile_info):
revision = sourcefile_info["revision"]
source_file = load_sourcefile(build, revision)
lines = source_file.split("\n")
extractor = None
parent_metadata = {
"package":package_descriptor["identifier"],
"identifier":package_descriptor["identifier"],
"source": {
"filename": source_filename,
"revision": revision,
},
"symbols": copy.deepcopy(package_descriptor["symbols"])
}
for line in lines:
if line.startswith("#!"):
# ignore hash-bangs
pass
elif extractor is None and re_has_indent.match(line):
pass
elif extractor is not None:
if not extractor.process_line(line):
extractor.save(build)
extractor = None
elif ModuleExtractor.matches(line):
extractor = ModuleExtractor(parent_metadata, line)
save_modules(build, package_descriptor, source_filename, sourcefile_info)
class Extractor:
def __init__(self, parent_metadata, category):
self._parent_metadata = parent_metadata
self._metadata = {
"identifier":None, # to be filled by subclasses
"category":category,
"source":parent_metadata["source"],
}
if "module" in parent_metadata:
self._metadata["module"] = parent_metadata["module"]
elif "package" in parent_metadata:
self._metadata["package"] = parent_metadata["package"]
self._lines = []
def process_line(self, line):
if line.startswith("}"):
return False
self._lines.append(dedent(line))
return True
def get_path(self, build):
_filename = self.get_filename(build)
_path = Path(_filename).parents[0]
_path.mkdir(parents=True, exist_ok=True)
return str(_path)
def get_filename(self, build):
assert False
def save(self, build):
_path = self.get_path(build)
Path(_path).mkdir(parents=True, exist_ok=True)
def strip_empty_lines(lines):
# this is a brute force evil algorithm, fix this one day.
while lines and re_empty_line.match(lines[0]):
lines.pop(0)
while lines and re_empty_line.match(lines[-1]):
lines.pop(-1)
lines.append("")
return lines
_lines = strip_empty_lines(self._lines)
_filename = self.get_filename(build)
with io.open(_filename + ".yapl", "w") as yapl_file:
yapl_file.write("\n".join(strip_empty_lines(_lines)))
with io.open(_filename + ".json", 'w') as metadata_file:
json.dump(self._metadata, metadata_file, sort_keys=True, indent=4)
@classmethod
def matches(cls, line):
m = cls._expression.match(line)
return m is not None and m and True
class FunctionExtractor(Extractor):
_expression = re.compile(r"^(private\s+){0,1}\s*("
r"((function)|(constructor)|(destructor)|(method)|(generator)|(closure))"
r"(\-((function)|(constructor)|(destructor)|(method)|(generator)|(closure)))*"
r")\s+(.+)")
_identifier = re.compile(r"\s*([^\(]+)(.*)")
def __init__(self, parent_metadata, line):
super().__init__(parent_metadata, "functions")
m = FunctionExtractor._expression.match(line)
_private = m.group(1) is not None and m.group(1).startswith("private")
remainder = m.group(len(m.groups()))
m = FunctionExtractor._identifier.match(remainder)
_name = m.group(1)
remainder = m.group(len(m.groups()))
self._metadata["identifier"] = parent_metadata["identifier"] + "." + _name
self._metadata["function"] = {
"name":_name,
"private":_private,
}
def process_line(self, line):
if line.startswith("}"):
return False
return True
def save(self, build):
return
class ModuleFunctionExtractor(FunctionExtractor):
def __init__(self, parent, line):
super().__init__(parent._metadata, line)
parent.declare_symbol("functions", self._metadata["function"]["name"], self._metadata["identifier"])
class VariableExtractor(Extractor):
_expression = re.compile(r"^(private\s+){0,1}\s*("
r"(\w+)\s*:\s*"
r"(\w+)\s*"
r"((:{0,1}=)\s*(.*))"
r")$"
)
def __init__(self, parent_metadata, line):
super().__init__(parent_metadata, "variables")
m = VariableExtractor._expression.match(line)
_private = m.group(1) is not None and m.group(1).startswith("private")
_name = m.group(3)
_type = m.group(4)
_op = m.group(6)
_mutable = m.group(6) is not None and _op == ":="
_remainder = m.group(len(m.groups()))
# TODO: extends
self._metadata["identifier"] = parent_metadata["identifier"] + "." + _name
self._metadata["scope"] = parent_metadata["identifier"]
self._metadata["variable"] = {
"type":_type,
"name":_name,
"private":_private,
"mutable":_mutable
}
self._lines.append("{}:{} {} {}".format(_name, _type, _op, _remainder))
def process_line(self, line):
if re_has_indent.match(line):
self._lines.append(dedent(line))
return True
return False
def save(self, build):
return
class ClassExtractor(Extractor):
_expression = re.compile(r"^(private\s+){0,1}\s*("
r"((abstract\s+class)|(class)|(interface)|(structure))"
r")\s+(.+)")
_identifier = re.compile(r"(\w+).*")
def __init__(self, parent_metadata, line):
super().__init__(parent_metadata, "classes")
m = ClassExtractor._expression.match(line)
_private = m.group(1) is not None and m.group(1).startswith("private")
remainder = m.group(len(m.groups()))
_name = ClassExtractor._identifier.match(remainder).group(1)
self._metadata["identifier"] = parent_metadata["identifier"] + "." + _name
self._metadata["class"] = {
"name":_name,
"private":_private,
}
def save(self, build):
return
class ModuleClassExtractor(ClassExtractor):
def __init__(self, parent, line):
super().__init__(parent._metadata, line)
self._parent = parent
parent.declare_symbol("classes", self._metadata["class"]["name"], self._metadata["identifier"])
class ModuleVariableExtractor(VariableExtractor):
def __init__(self, parent, line):
super().__init__(parent._metadata, line)
parent.declare_symbol("variables", self._metadata["variable"]["name"], self._metadata["identifier"])
class TypeExtractor(Extractor):
_expression = re.compile(r"^(private\s+){0,1}\s*("
r"(type)|(alias)"
r")\s+(.+)$"
)
_identifier = re.compile(r"\s*([^:\{\(\s=]+)(.*)")
def __init__(self, parent_metadata, line):
super().__init__(parent_metadata, "types")
m = TypeExtractor._expression.match(line)
_private = m.group(1) is not None and m.group(1).startswith("private")
remainder = m.group(len(m.groups()))
m = TypeExtractor._identifier.match(remainder)
_name = m.group(1)
self._metadata["identifier"] = parent_metadata["identifier"] + "." + _name
self._metadata["type"] = {
"name":_name,
"private":_private
}
def process_line(self, line):
super().process_line(line)
return False
def save(self, build):
return
class ModuleTypeExtractor(TypeExtractor):
def __init__(self, parent, line):
super().__init__(parent._metadata, line)
parent.declare_symbol("types", self._metadata["type"]["name"], self._metadata["identifier"])
class ModuleExtractor(Extractor):
_expression = re.compile(r'^((module)|(service)|(process)|(restservice))\s+(\w+)\s*\{\s*$')
_import = re.compile(r'^\s{4}import\s+(.+)')
_import_from = re.compile(r'^\s{4}import\s+(.*)\sfrom\s+(.+)')
def __init__(self, parent_metadata, line):
super().__init__(parent_metadata, "modules")
m = ModuleExtractor._expression.match(line)
_name = m.group(len(m.groups()))
assert _name == str(Path(parent_metadata["source"]["filename"]).stem)
self._metadata["identifier"] = parent_metadata["identifier"] + "." + _name
self._metadata["symbols"] = copy.deepcopy(parent_metadata["symbols"])
def declare_symbol(self, symbol_category, symbol_name, symbol_identifier):
self._metadata["symbols"][symbol_name] = symbol_identifier
def get_filename(self, build):
return str(Path(build) / "modules" / self._metadata["source"]["revision"] / "module")
def process_line(self, line):
if ModuleExtractor._import.match(line):
m = ModuleExtractor._import.match(line)
import_from = None
if ModuleExtractor._import_from.match(line):
x = ModuleExtractor._import_from.match(line)
without_from = x.group(1)
import_from = x.group(2)
else:
without_from = m.group(1)
for symbol in without_from.split(","):
symbol = symbol.strip()
if re_has_whitespace.match(symbol):
symbol = re.split(r"\s+", symbol)[0]
if import_from is None:
symbol = import_from
if import_from in self._metadata["symbols"]:
import_from = self._metadata["symbols"][import_from]
self.declare_symbol("imports", symbol, import_from)
else:
if symbol == "*":
if import_from in self._metadata["symbols"]:
import_from = self._metadata["symbols"][import_from]
self.declare_symbol("imports", "*." + import_from, import_from + ".*")
else:
if import_from in self._metadata["symbols"]:
import_from = self._metadata["symbols"][import_from]
import_from = import_from + "." + symbol
self.declare_symbol("imports", symbol, import_from)
return True
return super().process_line(line)
def save(self, build):
extractor = None
for line in self._lines:
if extractor is None and re_has_indent.match(line):
continue
if extractor is not None:
if not extractor.process_line(line):
extractor.save(build)
extractor = None
elif ModuleVariableExtractor.matches(line):
extractor = ModuleVariableExtractor(self, line)
elif ModuleFunctionExtractor.matches(line):
extractor = ModuleFunctionExtractor(self, line)
elif ModuleClassExtractor.matches(line):
extractor = ModuleClassExtractor(self, line)
elif ModuleTypeExtractor.matches(line):
extractor = ModuleTypeExtractor(self, line)
if extractor is not None:
more = extractor.process_line("")
assert (not more), "expected the extractor to complete"
extractor.save(build)
super().save(build)
if __name__ == "__main__":
parsed_args = parse_args(sys.argv[1:])
exit_code = validate_parsed_args(parsed_args)
if exit_code == 0:
exit_code = main(parsed_args)
sys.exit(exit_code)
| padresmurfa/yapl | v1/2_modules_from_package/cli.py | cli.py | py | 15,896 | python | en | code | 0 | github-code | 36 |
13459025825 | import argparse
class HackAssembler:
def __init__(self):
self.__comp_code = {
"0": "0101010",
"1": "0111111",
"-1": "0111010",
"D": "0001100",
"A": "0110000",
"!D": "0001101",
"!A": "0110001",
"-D": "0001111",
"-A": "0110011",
"D+1": "0011111",
"A+1": "0110111",
"D-1": "0001110",
"A-1": "0110010",
"D+A": "0000010",
"D-A": "0010011",
"A-D": "0000111",
"D&A": "0000000",
"D|A": "0010101",
"M": "1110000",
"!M": "1110001",
"-M": "1110011",
"M+1": "1110111",
"M-1": "1110010",
"D+M": "1000010",
"D-M": "1010011",
"M-D": "1000111",
"D&M": "1000000",
"D|M": "1010101",
}
self.__jump_code = ["", "JGT", "JEQ", "JGE", "JLT", "JNE", "JLE", "JMP"]
self.__defined_symbols = {
"SP": 0,
"LCL": 1,
"ARG": 2,
"THIS": 3,
"THAT": 4,
"R0": 0,
"R1": 1,
"R2": 2,
"R3": 3,
"R4": 4,
"R5": 5,
"R6": 6,
"R7": 7,
"R8": 8,
"R9": 9,
"R10": 10,
"R11": 11,
"R12": 12,
"R13": 13,
"R14": 14,
"R15": 15,
"SCREEN": 0x4000,
"KBD": 0x6000,
}
def translate(self, lines: list[str]) -> list[str]:
return self.__handle_instructions(
self.__handle_symbols(self.__handle_spaces(self.__handle_comments(lines)))
)
def __handle_symbols(self, lines: list[str]) -> list[str]:
symbols = self.__defined_symbols.copy()
results: list[str] = []
for line in lines:
if line[0] == "(" and line[-1] == ")":
symbols[line[1:-1]] = len(results)
else:
results.append(line)
counter = 16
for (idx, line) in enumerate(results):
if self.__is_a_instruction(line):
value: str = line[1:]
if value.isdigit():
continue
if value not in symbols:
symbols[value] = counter
counter += 1
results[idx] = line[0] + str(symbols[value])
return results
def __translate_dest(self, line: str) -> str:
result = 0
if "=" in line:
dest = line.split("=")[0]
if "M" in dest:
result |= 1
if "D" in dest:
result |= 1 << 1
if "A" in dest:
result |= 1 << 2
return format(result, "03b")
def __translate_comp(self, line: str) -> str:
st = line.index("=") + 1 if "=" in line else 0
nd = line.index(";") if ";" in line else len(line)
comp = line[st:nd]
return self.__comp_code.get(comp)
def __translate_jump(self, line: str) -> str:
jump = None
if ";" in line:
jump = line.split(";")[1]
result = self.__jump_code.index(jump or "")
return format(result, "03b")
def __is_a_instruction(self, line: str) -> bool:
return line[0] == "@"
def __translate_a_instruction(self, line: str) -> str:
return "0" + format(int(line[1:]), "015b")[-15:]
def __translate_c_instruction(self, line: str) -> str:
return (
"111"
+ self.__translate_comp(line)
+ self.__translate_dest(line)
+ self.__translate_jump(line)
)
def __handle_instructions(self, lines: list[str]) -> list[str]:
result: list[str] = []
for line in lines:
if self.__is_a_instruction(line):
result.append(self.__translate_a_instruction(line))
else:
result.append(self.__translate_c_instruction(line))
return result
def __handle_spaces(self, lines: list[str]) -> list[str]:
return ["".join(line.split()) for line in lines if line.strip()]
def __handle_comments(self, lines: list[str]) -> list[str]:
return [line.split("//")[0] for line in lines]
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Translate Hack assembly code to Hack binary code"
)
parser.add_argument("asm", help="filepath of Hack assembly code")
args = parser.parse_args()
filepath: str = args.asm
assert filepath.endswith(".asm"), f"{filepath} doesn't end with .asm"
output: str = filepath.rstrip(".asm") + ".hack"
assembler: HackAssembler = HackAssembler()
with open(filepath, "r") as input_file:
code: list[str] = input_file.read().splitlines()
with open(output, "w") as output_file:
output_file.write("\n".join(assembler.translate(code)))
| zhixiangli/nand2tetris | projects/06/hack_assembler.py | hack_assembler.py | py | 5,017 | python | en | code | 1 | github-code | 36 |
16779750056 | from kelimeler import sozluk
"""
eksik yada yanlış yazılan kelimeyi tespit ederek sözlükteki en yakın kelimelerin listesini döner.
"""
def duzelten(cumle):
duzelenCumle=[]#düzeltilen kelimelerin listesini içerir.
donenCumle=""
cumle=cumle.split()
for i in range(len(cumle)):
tek=0
sayac=0
for j in range(len(sozluk)):
if(tek==0):
for k in range(len(cumle[i])):
if(k==0 and cumle[i]==sozluk[j][0]):
duzelenCumle.append(cumle[i])
tek=1
sayac=sayac+1
elif cumle[i][:-k]==sozluk[j][0]:
duzelenCumle.append(cumle[i])
tek=1
sayac=sayac+1
if sayac==0:
donen=istatistik(cumle[i])
duzelenCumle.append(donen)
for i in range(len(duzelenCumle)):
for j in range(len(duzelenCumle[i])):
donenCumle+=duzelenCumle[i][j]+" "
return donenCumle
def istatistik(kelime):#eksik yada yanlış kelimeye en yakın olan kelimeleri bulur.
sayac=[]
duzelenler=[]#duzeltilen cumleleri tutar.
count=0
icerik=[]
for i in range(len(kelime)):
sayac+=kelime[i]
for j in range(len(sozluk)):
for k in range(min(len(sozluk[j][0]),len(sayac))):
for t in range(min(len(sozluk[j][0]),len(sayac))):
if t==k :
if sayac[t]==sozluk[j][0][k]:
count+=1
count=(count/min(len(sozluk[j][0]),len(sayac)))*100#benzerlik oranı için yüzdelik durumu belirler.
if (count>40 and len(sozluk[j][0])>=len(kelime)):#harflerin frekans benzerliğine göre %40 oranında benzer olanları seçer.
if (count<100):
duzelenler.append(sozluk[j][0])
count=0
icerik.append("]")
icerik.append("!*")
icerik.append(kelime)
icerik.append("için onerilenler;")
for i in range(len(duzelenler)):
icerik.append(duzelenler[i])
icerik.append("-")
icerik.append("]")
return icerik
| FerhatKartal/text_correction | duzelten.py | duzelten.py | py | 2,353 | python | tr | code | 0 | github-code | 36 |
15646853934 | import json
import os
import string
import requests
from selenium import webdriver
from selenium.webdriver.common.by import By
import urllib.request
options = webdriver.ChromeOptions()
options.add_experimental_option("excludeSwitches", ["enable-automation"])
options.add_experimental_option('useAutomationExtension', False)
options.add_experimental_option("prefs", {
"profile.password_manager_enabled": False, "credentials_enable_service": False, 'profile.default_content_settings.popups': 0})
def initCheckPoint() -> None:
with open('checkPoint.json', 'r') as f:
checkPoint = json.load(f)
for key in checkPoint:
checkPoint[key] = False
with open('checkPoint.json', 'w') as f:
json.dump(checkPoint, f)
return None
def loadCheckPoint() -> dict:
with open('checkPoint.json', 'r') as f:
checkPoint = json.load(f)
return checkPoint
def saveCheckPoint(checkPoint: dict) -> None:
with open('checkPoint.json', 'w') as f:
json.dump(checkPoint, f)
return None
def main():
# initCheckPoint()
url = 'https://judgegirl.csie.org/problems/domain/0'
# load dict of checkPoint in json file
checkPoint = loadCheckPoint()
# put r in a text file
if not checkPoint['get problemSet']:
with open('text.txt', 'w', encoding='UTF-8') as f:
r = requests.get(url)
# encode r into string
f.write(r.text)
# read text file
# save all text between <li class="pure-menu-item" and </li> into a another text file
with open('text.txt', 'r', encoding='UTF-8') as f:
with open('problemSet.txt', 'w', encoding='UTF-8') as f2:
for line in f:
if '<li class="pure-menu-item" data=' in line:
while '</li>' not in line:
f2.write(line)
line = f.readline()
f2.write(line)
# delete text file
os.remove('text.txt')
# read problemSet.txt and save all problemSet id into a another text file
with open('problemSet.txt', 'r', encoding='UTF-8') as f:
with open('problemSetId.txt', 'w', encoding='UTF-8') as f2:
for line in f:
if 'data="' in line:
f2.write(line[line.find('#'):-3])
f2.write(' ')
continue
if '</a></li>' in line:
# fine the first alpha index
start = 0
for i in range(len(line)):
if line[i].isalpha():
start = i
break
# replace all '/' with ''
target = line[start:-10]
target = target.replace('/', '')
if target.find(' ') > 4:
continue
f2.write(target)
f2.write('\n')
# delete problemSet.txt
os.remove('problemSet.txt')
checkPoint['get problemSet'] = True
saveCheckPoint(checkPoint)
# read problemSetId.txt
if not checkPoint['get problemId']:
with open('problemSetId.txt', 'r', encoding='UTF-8') as f:
# create a folder named the text after ' ' of each line
for line in f:
# test if the folder already exists
folderName = line[line.find(' ')+2:-1]
if not os.path.exists(folderName):
os.mkdir(folderName)
driver = webdriver.Chrome(
'chromedriver.exe', chrome_options=options)
driver.minimize_window()
# create a file named Description.txt in each folder
with open(folderName+'/Description.txt', 'w', encoding='UTF-8') as f2:
# goto the problemSet page
url = 'https://judgegirl.csie.org/problems/domain/0' + \
line[0:line.find(' ')]
driver.get(url)
# wait for the page to load
# driver.implicitly_wait(3)
elements = driver.find_elements(
By.CLASS_NAME, 'problem-item')
# write element into Description.txt
for element in elements:
print(element.text)
f2.write(element.text)
f2.write('\n')
driver.close()
driver.quit()
checkPoint['get problemId'] = True
saveCheckPoint(checkPoint)
url = 'https://judgegirl.csie.org/problem/0'
if not checkPoint['get problems']:
# go to each problemSet folder in problemSetId.txt
folderNames = list()
with open('problemSetId.txt', 'r', encoding='UTF-8') as f:
for line in f:
folderNames.append(line[line.find(' ')+2:-1])
problemSets = dict()
for folderName in folderNames:
with open(folderName + '/Description.txt', 'r', encoding='UTF-8') as f:
problemSets[folderName] = list()
for line in f:
problemSets[folderName].append(line[0:-1])
for key in problemSets:
for problem in problemSets[key]:
title = problem
id = problem[0:problem.find(' ') - 1]
# if the last character of title is a punctuation, delete it
if title[-1] in string.punctuation:
title = title[:-1]
# replace all '/' with ' '
title = title.replace('/', ' ')
if title in checkPoint and checkPoint[title]:
continue
print('Now downloading ' + title)
driver = webdriver.Chrome(
'chromedriver.exe', chrome_options=options)
driver.minimize_window()
# goto the problem page
url = 'https://judgegirl.csie.org/problem/0/' + id
driver.get(url)
# wait for the page to load
# driver.implicitly_wait(3)
if not os.path.exists(key+'/'+title):
os.mkdir(key+'/'+title)
with open(key+'/'+title+'/Description.txt', 'w', encoding='UTF-8') as f:
f.write(driver.find_element(By.CLASS_NAME, 'content').text)
# open the file with read and write permission
with open(key+'/'+title+'/Description.txt', 'r', encoding='UTF-8') as f:
start = end = 0
for index, line in enumerate(f):
if 'Task Description' in line:
start = index
if 'Submit' in line:
end = index
break
# goto th top of the file
f.seek(0)
# save the line between start and end into newFile
newFile = f.readlines()[start:end]
with open(key+'/'+title+'/Description.txt', 'w', encoding='UTF-8') as f2:
# clear the file
f2.truncate()
# write the new file
for line in newFile:
f2.write(line)
# find the photo and save it
photos = driver.find_elements(
By.CLASS_NAME, 'pure-img-responsive')
if len(photos) != 0:
for index, photo in enumerate(photos):
link = photo.get_attribute('src')
# save the phot
if 'https://judgegirl.csie.org/images/problems/' in link:
urllib.request.urlretrieve(link, key+'/'+title+'/'+link[44:])
if not os.path.exists(key+'/'+title+'/testCases'):
os.mkdir(key+'/'+title+'/testCases')
# find the elements that inside text is 'Download Testdata' by XPATH
link = 'https://judgegirl.csie.org/testdata/download/' + id
print(title)
print(url)
print(link)
# goto the testCase page
driver.get(link)
# wait for the page to load
# driver.implicitly_wait(3)
content = driver.find_element(By.CLASS_NAME, 'content')
menu = content.find_element(By.CLASS_NAME, 'pure-g')
cases = menu.find_elements(By.CLASS_NAME, 'pure-menu-link')
# download each testCase
for case in cases:
url = case.get_attribute('href')
file = requests.get(url)
with open(key+'/'+title+'/testCases/'+case.text, 'wb') as f:
f.write(file.content)
driver.close()
driver.quit()
checkPoint[title] = True
saveCheckPoint(checkPoint)
checkPoint['get problems'] = True
saveCheckPoint(checkPoint)
# check if all checkPoint is True
for key in checkPoint:
if not checkPoint[key]:
print(key+' is not finished')
return None
print('all checkPoint is finished')
return None
if __name__ == '__main__':
main() | fatbrother/crawler-test | main.py | main.py | py | 9,656 | python | en | code | 0 | github-code | 36 |
9512661257 | w, w_unit = input("Weight : ").split()
h, h_unit = input("Height : ").split()
w, h = float(w), float(h)
# w Convert
if w_unit == "lbs":
# Convert lbs to kg
w /= 2.205
# h Convert
if h_unit == "ft":
# Convert ft to m
h /= 3.2808399
elif h_unit == "cm":
# Convert cm to m
h /= 100
# Calculate BMI
bmi = w / (h ** 2)
if bmi < 18.5:
print("ผอม")
elif bmi < 23.0:
print("รุปร่างปกติ")
elif bmi < 25.0:
print("รูปร่างอ้วน")
elif bmi < 30.0:
print("อ้วนระดับ 1")
else:
print("อ้วนระดับ 2") | ratchanonp/comproglab | 64-1LAB4/6434480323LAB4P2.py | 6434480323LAB4P2.py | py | 621 | python | th | code | 0 | github-code | 36 |
6790532061 | from django.db import models
from django.conf import settings
from ..querysets.resource import ResourceQuerySet
class ResourceManager(models.Manager):
queryset_class = ResourceQuerySet
def get_queryset(self):
return self.queryset_class(self.model, using=self._db)
def filter_by_project(self, project):
return self.get_queryset().filter_by_project(project)
def filter_by_assignment(self, assignment):
return self.get_queryset().filter_by_object(assignment)
def create_resource(
self, user_from, root_name, name, extension, content_type, *args, **kwargs
):
new_node = self.create(
created_by=user_from,
name=name,
extension=extension,
mimetype=content_type,
_filename=root_name,
link=kwargs.get('link'),
description=kwargs.get('description'),
file_size=kwargs.get('file_size', None),
)
return new_node
def create_user_resource(self, user_from, team, related, **kwargs):
new_resource = self.model(
created_by=user_from,
**kwargs
)
new_resource.save(tag_original=settings.FILES_USER_TAG)
related.add_user_resource(user_from, team, new_resource)
return new_resource
| tomasgarzon/exo-services | service-exo-core/files/managers/resource.py | resource.py | py | 1,318 | python | en | code | 0 | github-code | 36 |
39916510161 | import os
import re
import xlwt
from tkinter import *
from tkinter.filedialog import askdirectory
from xlwt import Workbook
root=Tk()
root.withdraw()
path=askdirectory()
print(path)
file_names=os.listdir(str(path))
name_lists=[]
output=[]
for file in file_names:
file_path=path + '/' + file
f=open(file_path,'r',encoding='utf-8',errors='ignore')
data=f.read()
data=data.strip()
mhs=re.search(r'Summary((.|\s)*)',data)
if mhs:
data=mhs.group(1)
rows=data.split('\n')
for row in rows:
if row.strip() and row.strip()!='|':
mhs=re.search(r'(([a-zA-Z]+\s)+)\s+.*?(\d+\.\d+)',row)
if mhs:
output.append([file,mhs.group(1),mhs.group(3)])
else:
output.append([file,'N/A','N/A'])
book=Workbook(encoding='utf-8')
sht1=book.add_sheet('sheet1')
for i in range(len(output)):
for j in range(len(output[1])):
sht1.write(i,j,output[i][j])
book.save('./output_data.xls')
print("over")
| nigo81/python_spider_learn | TXT处理/readtxt.py | readtxt.py | py | 1,005 | python | en | code | 3 | github-code | 36 |
31829455558 | # -*- coding: utf-8 -*-
def validate(f):
names = ['narrow-bold', 'wide-bold', 'narrow-thin', 'wide-thin']
if f is None:
return
for n in names:
g = f[n]
print(g.name, len(g.contours))
for c in g.contours:
print(g.name, len(c))
if __name__ == "__main__":
f = CurrentFont()
validate(f) | LettError/responsiveLettering | ResponsiveLettering.roboFontExt/lib/mathShape/cmd_validateMathShape.py | cmd_validateMathShape.py | py | 357 | python | en | code | 152 | github-code | 36 |
4255274214 | class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
ans = 0
start = 0
tracker = {}
for end, char in enumerate(s):
if char in tracker:
start = max(start, tracker[char] + 1)
tracker[char] = end
ans = max(ans, end - start + 1)
return ans
| blhwong/algos_py | leet/length_of_longest_substring/main.py | main.py | py | 351 | python | en | code | 0 | github-code | 36 |
31524102898 | """
Test the lookup_specs_chain
NOTE: this just makes sure the chain executes properly but DOES NOT assess the quality of the agent's analysis. That is done in the ipython notebooks in the evals/ folder
"""
import pytest
import json
from meche_copilot.get_equipment_results import get_spec_lookup_data, get_spec_page_validations
from meche_copilot.chains.analyze_specs_chain import AnalyzeSpecsChain
from meche_copilot.chains.helpers.specs_retriever import SpecsRetriever
from meche_copilot.schemas import *
from meche_copilot.utils.chunk_dataframe import chunk_dataframe
from meche_copilot.utils.config import load_config, find_config
# Used for verbose output of langchain prompts and responses
from langchain.callbacks import StdOutCallbackHandler
@pytest.mark.skip(reason="TODO - desiding if I want to use analyze specs chain or lookup specs chain...probably analyze specs chain")
def test_lookup_specs_chain(session:Session):
sess = session
config = session.config
# a piece of equipment (idx 2: fan eq)
eq = sess.equipments[2]
# empty_eq_str_srcA, empty_eq_str_srcB = get_spec_lookup_data(eq)
empty_eq_df = eq.instances_to(ScopedEquipment.IOFormats.df)
spec_defs_df = eq.spec_defs_to(ScopedEquipment.IOFormats.df)
concated = pd.concat([spec_defs_df.T, empty_eq_df])
retriever = SpecsRetriever(doc_retriever=config.doc_retriever, source=eq.sourceA)
relavent_docs = retriever.get_relevant_documents(query="")
relavent_ref_docs_as_dicts = [doc.dict() for doc in relavent_docs]
relavent_ref_docs_as_string = json.dumps(relavent_ref_docs_as_dicts) # Convert to JSON string
# lookup specs for source A
lookup_chain = AnalyzeSpecsChain(
doc_retriever=sess.config.doc_retriever,
spec_reader=sess.config.spec_reader,
callbacks=[StdOutCallbackHandler()]
)
result_sourceA = lookup_chain.run({
"source": eq.sourceA,
# "refresh_source_docs": False
# "spec_def_df": spec_defs_df,
"spec_res_df": concated,
})
from langchain.schema import AIMessage, HumanMessage, SystemMessage
messages = [
SystemMessage(
content=f"For each key in results_json, find the corresponding spec in the Context using the definition and replace 'None' with correct value. Context: {relavent_ref_docs_as_string}"
),
HumanMessage(
content=f"results_json={concated.iloc[:, 0:5].to_json()}"
),
]
lookup_chain.chat(messages)
result_sourceA
result_sourceA_validated = get_spec_page_validations(val_pg=result_sourceA, ref_docs=eq.sourceA.ref_docs) | fuzzy-tribble/meche-copilot | tests/unit_tests/chains/get_lookup_specs_chain_test.py | get_lookup_specs_chain_test.py | py | 2,625 | python | en | code | 1 | github-code | 36 |
27193740899 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import glob
import logging
import argparse
from ngsmetavirus.config import *
from ngsmetavirus.common import check_path, mkdir, read_tsv
from dagflow import DAG, Task, do_dag
LOG = logging.getLogger(__name__)
__version__ = "1.0.0"
__author__ = ("Xingguo Zhang",)
__email__ = "invicoun@foxmail.com"
__all__ = []
def create_merge_data_task(prefix, read1, read2, work_dir, job_type="local"):
if len(read1)==1:
comm1 = """\
ln -s {read1} {prefix}.raw.R1.fq.gz
ln -s {read2} {prefix}.raw.R2.fq.gz
""".format(read1=read1[0], read2=read2[0], prefix=prefix)
else:
comm1 = """\
cat {read1} >{prefix}.raw.R1.fq.gz
cat {read2} >{prefix}.raw.R2.fq.gz
""".format(read1=" ".join(read1), read2=" ".join(read2), prefix=prefix)
task = Task(
id="merge_data_%s" % prefix,
work_dir=work_dir,
type=job_type,
option="-pe smp 1",
script="""
{comm1}
""".format(comm1=comm1)
)
read1 = os.path.join(work_dir, "%s.raw.R1.fq.gz" % prefix)
read2 = os.path.join(work_dir, "%s.raw.R2.fq.gz" % prefix)
return task, read1, read2
def create_mngs_task(prefix, read1, read2, reference, nohost, dtype, atype,
job_type, work_dir, out_dir, trim=0, project="", id=""):
if nohost:
nohost = "--nohost"
else:
nohost = ""
if reference:
rx = "--reference %s" % reference
else:
rx = ""
task = Task(
id="mngs_%s" % prefix,
work_dir=work_dir,
type="local",
option="-pe smp 1",
script="""
{root}/ngsmetavirus.py all \\
--prefix {prefix} --read1 {read1} --read2 {read2} \\
--dtype {dtype} --atype {atype} {rx} \\
--project {project} --id {id} \\
--trim {trim} --thread 6 --job_type {job_type} {nohost} \\
--work_dir {work} --out_dir {out}
""".format(root=ROOT,
prefix=prefix,
read1=read1,
read2=read2,
rx=rx,
nohost=nohost,
dtype=dtype,
atype=atype,
trim=trim,
project=project,
id=id,
job_type=job_type,
work=work_dir,
out=out_dir
)
)
return task
def run_mngs_multi(input, reference, nohost, dtype, atype, trim, job_type,
concurrent, refresh, work_dir, out_dir, project="", id=""):
input = check_path(input)
work_dir = mkdir(work_dir)
out_dir = mkdir(out_dir)
if not reference:
pass
else:
try:
reference = check_path(check_path)
except:
reference = check_path("%s.3.ht2" % reference)
reference = reference.strip(".3.ht2")
else:
raise Exception("Reference genome %s does not exist" % reference)
data = {}
for line in read_tsv(input):
if line[0] not in data:
data[line[0]] = [[], []]
data[line[0]][0].append(check_path(line[1]))
data[line[0]][1].append(check_path(line[2]))
dag = DAG("mngs_multi")
for prefix in data:
prefix_work = mkdir(os.path.join(work_dir, prefix))
data_task, read1, read2 = create_merge_data_task(
prefix=prefix,
read1=data[prefix][0],
read2=data[prefix][1],
work_dir=prefix_work
)
dag.add_task(data_task)
task = create_mngs_task(
prefix=prefix,
read1=read1,
read2=read2,
reference=reference,
nohost=nohost,
dtype=dtype,
atype=atype,
trim=trim,
job_type=job_type,
project=project,
id=id,
work_dir=prefix_work,
out_dir=mkdir(os.path.join(out_dir, prefix))
)
dag.add_task(task)
task.set_upstream(data_task)
do_dag(dag, concurrent, refresh)
return 0
def mngs_multi(args):
run_mngs_multi(
input=args.input,
reference=args.reference,
nohost=args.nohost,
dtype=args.dtype,
atype=args.atype,
trim=args.trim,
work_dir=args.work_dir,
out_dir=args.out_dir,
concurrent=args.concurrent,
refresh=args.refresh,
job_type=args.job_type,
project=args.project,
id=args.id
)
def add_mngs_multi_args(parser):
parser.add_argument("input", metavar='FILE', type=str,
help="Input the reads list.")
parser.add_argument("-ref", "--reference", metavar="FILE", type=str, default="",
help="Input the host's reference database.")
parser.add_argument('--nohost', action='store_true',
help='Input the reference database is not the host.')
parser.add_argument("-dt", "--dtype", metavar='STR', type=str,
choices=["mgi", "illumina", "other"], default="illumina",
help="Set up the sequencing platform of the data, default=illumina.")
parser.add_argument("-at", "--atype", metavar='STR', type=str,
choices=["metagenome", "metaviral", "rnaviral"], default="metagenome",
help="""Set the type of analysis(metagenome, metavirus, rnaviral),\
default=metagenome.""")
parser.add_argument("--trim", metavar="INT", type=int, default=5,
help="Set trim length, default=5")
parser.add_argument("--project", metavar="STR", type=str, required=True,
help="Input project name.")
parser.add_argument("--id", metavar="STR", type=str, required=True,
help="Input project id.")
parser.add_argument("--concurrent", metavar="INT", type=int, default=10,
help="Maximum number of jobs concurrent (default: 10).")
parser.add_argument("--refresh", metavar="INT", type=int, default=30,
help="Refresh time of log in seconds (default: 30).")
parser.add_argument("--job_type", choices=["sge", "local"], default="local",
help="Jobs run on [sge, local] (default: local).")
parser.add_argument("--work_dir", metavar="DIR", type=str, default=".",
help="Work directory (default: current directory).")
parser.add_argument("--out_dir", metavar="DIR", type=str, default=".",
help="Output directory (default: current directory).")
return parser
def main():
logging.basicConfig(
stream=sys.stderr,
level=logging.INFO,
format="[%(levelname)s] %(message)s"
)
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""
attention:
ngsmetavirus.py.py multi input.list
File format:
name R1 R2
version: %s
contact: %s <%s>\
""" % (__version__, " ".join(__author__), __email__))
parser = add_mngs_multi_args(parser)
args = parser.parse_args()
mngs_multi(args)
if __name__ == "__main__":
main()
| zxgsy520/metavirus | ngsmetavirus/mngs_multi.py | mngs_multi.py | py | 6,833 | python | en | code | 1 | github-code | 36 |
11883950060 | # lcp Core
# iMagineLab - Living Character Program
from lcp.core.module_loader import ModuleLoader
from lcp.core.lcp_system_configurator import SystemConfigurator
import time
class LCPCore(object):
__version = "0.1"
def __init__(self):
print("LCP Core - Version", self.__version)
print(">> Initialising system...")
self.__SystemConfig = SystemConfigurator()
self.__SystemConfig.load_config("..\\..\\..\\resources\\config.ini")
print(">> Loading character configuration...")
# lcp character configuration
print(">> Start module loader...")
self.__MLoader = ModuleLoader(self.__SystemConfig.get_module_config('ModuleLoader'))
self.__MLoader.load_modules(self.__SystemConfig)
def start(self):
print(">> Booting core...")
print(">> Booting modules...")
self.__MLoader.start_modules()
time.sleep(1)
print(">> System ready")
while 1:
time.sleep(1)
if __name__ == "__main__":
LCPCore = LCPCore()
LCPCore.start()
| huybthomas/LCP-Core-Old | src/lcp/core/lcp_core.py | lcp_core.py | py | 1,069 | python | en | code | 0 | github-code | 36 |
74958840742 |
class Solution:
def isPalindrome(self, src: int) -> bool:
"""
处理回文,不采用字串转型的方式。
如果小于正整数则返回 Fasle,否则透过取得不断除以 10 取得商数来判断循环是否到底。
而获得的余数的方式放入阵列,待完成后透过阵列来做寻访 [::-1] 判断是否相符
"""
slices = []
if src < 0:
return False
while(src != 0):
remainder = src % 10
src = src // 10
slices.append(remainder)
if slices[::-1] == slices:
return True
return False
| kokokuo/oh-leetcode | 9-PalindromeNumber.py | 9-PalindromeNumber.py | py | 650 | python | zh | code | 1 | github-code | 36 |
4062581538 | def main():
cities = placeRecordsIntoList("Cities.txt")
# Sort list by percentage population growth.
cities.sort(key=lambda city: (city[3] - city[2])/city[2], reverse=True)
createNewFile(cities) # Create file of cities and their % growth.
def placeRecordsIntoList(fileName):
infile = open(fileName, 'r')
listOfRecords = [line.rstrip() for line in infile]
infile.close()
for i in range(len(listOfRecords)):
listOfRecords[i] = listOfRecords[i].split(',')
listOfRecords[i][2] = eval(listOfRecords[i][2]) # population in 2000
listOfRecords[i][3] = eval(listOfRecords[i][3]) # population in 2010
return listOfRecords
def createNewFile(cities):
outfile = open("OrderedCities.txt", 'w')
for city in cities:
outfile.write(city[0] + ',' +
str(round(100*((city[3] - city[2])/city[2]),1)) + "\n")
outfile.close()
main()
| guoweifeng216/python | python_design/pythonprogram_design/Ch5/5-PP-7.py | 5-PP-7.py | py | 929 | python | en | code | 0 | github-code | 36 |
71300192743 | import itertools
import tqdm
import subprocess
def add_to_seq(seq, feature, label):
if label[0] == 'B':
seq.append(feature)
elif label[0] == 'I':
if len(seq) > 0:
seq[-1] += feature
else:
seq.append(feature)
elif label[0] == 'S':
seq.append(feature)
def cal(predict_seq, right_seq):
r"""
:return: 分词正确的个数,错误个数,总单词个数,该句子是否分词正确
"""
# flag 表示全部正确
flag = 1
num_right_word = 0
if len(predict_seq) != len(right_seq):
flag = 0
for feat in predict_seq:
if feat in right_seq:
num_right_word += 1
else:
flag = 0
return num_right_word, len(predict_seq) - num_right_word, len(right_seq), flag
def evaluate(model, dataset_loader, idx2feature, idx2label, device, log_file):
r"""
计算数据集上的F1, P, R值
:return: F1, accuracy
"""
model.eval()
num_sentence = 0
num_right_sentence = 0
num_all_word = 0
num_right_word = 0
num_error_word = 0
for _, batch in tqdm.tqdm(enumerate(itertools.chain.from_iterable(dataset_loader))):
batch = tuple(t.to(device) for t in batch)
features, labels, masks = batch
scores, paths = model(features, masks)
num_sentence += features.shape[0]
length = features.shape[1]
for i, (sentence, label) in enumerate(zip(features, labels)):
predict_seq = []
right_seq = []
for j, tensor_feat in enumerate(sentence):
if j == length or masks[i][j] == 0:
# 会有一个<eof>标志
nums = cal(predict_seq, right_seq)
num_right_word += nums[0]
num_error_word += nums[1]
num_all_word += nums[2]
num_right_sentence += nums[3]
break
else:
feature = idx2feature[tensor_feat.item()]
predict_label = idx2label[paths[i][j].item()]
right_label = idx2label[label[j].item()]
add_to_seq(predict_seq, feature, predict_label)
add_to_seq(right_seq, feature, right_label)
P = num_right_word / (num_error_word + num_right_word)
R = num_right_word / (num_all_word)
F1 = (2 * P * R) / (P + R)
ER = num_error_word / num_all_word
print(
'标准词数:%d个,词数正确率:%f个,词数错误率:%f' % (num_all_word, num_right_word / num_all_word, num_error_word / num_all_word))
print('标准行数:%d,行数正确率:%f' % (num_sentence, num_right_sentence / num_sentence))
print('Recall: %f' % (R))
print('Precision: %f' % (P))
print('F1 MEASURE: %f' % (F1))
print('ERR RATE: %f' % (ER))
with open(log_file, 'a') as f:
f.write(
'标准词数:%d个,词数正确率:%f个,词数错误率:%f\n' % (
num_all_word, num_right_word / num_all_word, num_error_word / num_all_word))
f.write('标准行数:%d,行数正确率:%f\n' % (num_sentence, num_right_sentence / num_sentence))
f.write('Recall: %f\n' % (R))
f.write('Precision: %f\n' % (P))
f.write('F1 MEASURE: %f\n' % (F1))
f.write('ERR RATE: %f\n\n\n' % (ER))
return P, R, F1, ER
def evaluate_with_perl(gold_file, predict_file, log=None, epoch=0, loss=None, dev=True):
r"""
这个效率高
:param gold_file:
:param predict_file:
:return:
"""
perl_path = r'/home/yxu/Seg_ner_pos/icwb2-data/scripts/score'
word_list = r'/home/yxu/Seg_ner_pos/icwb2-data/gold/pku_training_words.utf8'
p = subprocess.Popen(['perl', perl_path, word_list, gold_file, predict_file], stdout=subprocess.PIPE)
output = p.stdout.read()
output = output.decode(encoding='utf8')
outputs = output.split('\n')
p.kill()
res = outputs[-15:]
dev_R, dev_P, dev_F1 = float(res[-8].split('\t')[-1]), float(res[-7].split('\t')[-1]), float(
res[-6].split('\t')[-1])
if log is not None:
with open(log, 'a') as f:
f.write('EPOCH : %d\n' % epoch)
if dev:
f.write('Dev\n')
else:
f.write('Train\n')
if loss is not None:
f.write('Epoch loss : %f\n' % loss)
for j in res:
print(j)
f.write(j + '\n')
else:
for j in res:
print(j)
return dev_R, dev_P, dev_F1
def predict_write(model, dataset_loader, idx2feature, idx2label, device, tmp_file='./tmp', origin_texts=None):
r"""
返回一个临时的预测文件
:param model:
:param dataset_loader:
:param idx2feature:
:param idx2label:
:param device:
:param tmp_file:
:return:
"""
# !!
model.eval()
with open(tmp_file, 'w') as f:
for idx, batch in tqdm.tqdm(enumerate(itertools.chain.from_iterable(dataset_loader))):
batch = tuple(t.to(device) for t in batch)
features, labels, masks = batch
features_v, labels_v, masks_v = features.transpose(0, 1), labels.transpose(0, 1), masks.transpose(0, 1)
scores, predict_labels = model.predict(features_v, masks_v)
batch_size = labels.shape[0]
# 原始文本内容,避免最终结果出现<unk>
if origin_texts:
origin_text = origin_texts[idx * batch_size:(idx + 1) * batch_size]
for j in range(batch_size):
if origin_texts:
origin_line = origin_text[j]
feature, predict_label, mask = features[j], predict_labels[j], masks[j]
line = ''
length = feature.shape[0]
for k in range(length):
if k + 1 == length or mask[k + 1].item() == 0:
break
else:
if origin_texts:
content = origin_line[k]
else:
content = idx2feature[feature[k].item()]
if idx2label[predict_label[k].item()][0] in ('B', 'S') and k != 0:
line += ' ' + content
else:
line += content
f.write(line + '\n')
return tmp_file
def read_line(f):
'''
读取一行,并清洗空格和换行
'''
line = f.readline()
return line.strip()
def evaluate_by_file(real_text_file, pred_text_file, prf_file, epoch):
file_gold = open(real_text_file, 'r', encoding='utf8')
file_tag = open(pred_text_file, 'r', encoding='utf8')
line1 = read_line(file_gold)
N_count = 0 # 将正类分为正或者将正类分为负
e_count = 0 # 将负类分为正
c_count = 0 # 正类分为正
e_line_count = 0
c_line_count = 0
while line1:
line2 = read_line(file_tag)
list1 = line1.split(' ')
list2 = line2.split(' ')
count1 = len(list1) # 标准分词数
N_count += count1
if line1 == line2:
c_line_count += 1 # 分对的行数
c_count += count1 # 分对的词数
else:
e_line_count += 1
count2 = len(list2)
arr1 = []
arr2 = []
pos = 0
for w in list1:
arr1.append(tuple([pos, pos + len(w)])) # list1中各个单词的起始位置
pos += len(w)
pos = 0
for w in list2:
arr2.append(tuple([pos, pos + len(w)])) # list2中各个单词的起始位置
pos += len(w)
for tp in arr2:
if tp in arr1:
c_count += 1
else:
e_count += 1
line1 = read_line(file_gold)
R = float(c_count) / N_count
P = float(c_count) / (c_count + e_count)
F = 2. * P * R / (P + R)
ER = 1. * e_count / N_count
print("result:")
print('标准词数:%d个,词数正确率:%f个,词数错误率:%f' % (N_count, c_count / N_count, e_count / N_count))
print('标准行数:%d,行数正确率:%f,行数错误率:%f' % (c_line_count + e_line_count, c_line_count / (c_line_count + e_line_count),
e_line_count / (c_line_count + e_line_count)))
print('Recall: %f' % (R))
print('Precision: %f' % (P))
print('F MEASURE: %f' % (F))
print('ERR RATE: %f' % (ER))
# print P,R,F
f = open(prf_file, 'a', encoding='utf-8')
f.write('result-(epoch:%s):\n' % epoch)
f.write('标准词数:%d,词数正确率:%f,词数错误率:%f \n' % (N_count, c_count / N_count, e_count / N_count))
f.write('标准行数:%d,行数正确率:%f,行数错误率:%f \n' % (c_line_count + e_line_count, c_line_count / (c_line_count + e_line_count),
e_line_count / (c_line_count + e_line_count)))
f.write('Recall: %f\n' % (R))
f.write('Precision: %f\n' % (P))
f.write('F MEASURE: %f\n' % (F))
f.write('ERR RATE: %f\n' % (ER))
f.write('====================================\n')
return P, R, F
| YaooXu/Chinese_seg_ner_pos | evaluate.py | evaluate.py | py | 9,381 | python | en | code | 5 | github-code | 36 |
43319739352 | times = int(input())
a, d =[],[]
counta = 100
countd = 100
for i in range(times):
ap,dp = input().split(" ")
a.append(int(ap))
d.append(int(dp))
if int(a[i]) > int(d[i]):
countd -= int(a[i])
counta = counta
elif int(a[i]) < int(d[i]):
countd = countd
counta -= int(d[i])
elif int(a[i]) < int(d[i]):
counta = counta
countd = countd
print (counta)
print (countd)
| Aanjneya/CCC-UWaterloo-Solutions | 2014/Junior/2014 - J3.py | 2014 - J3.py | py | 464 | python | en | code | 0 | github-code | 36 |
14783455392 | '''
ME 598 CUDA
Homework 2
Author: Hien (Ryan) Nguyen
Last modified: 01/28/2018
'''
import numpy as np # import scientific computing library
import matplotlib.pyplot as plt # import plotting library
from numba import cuda
import math
import time
from mpl_toolkits import mplot3d
''' Question 2 functions '''
def f2D (x, y):
return math.sin ( np.pi *x)* math.sinh ( np.pi *y)/ math.sinh ( np.pi )
def fArray2D (x, y):
nx = x.size
ny = y.size
f = np.empty ((nx ,ny), dtype = np.float32)
for i in range (nx):
for j in range (ny):
f[i,j] = f2D (x[i], y[j])
return f
@cuda.jit ( device = True )
def pf2D (x, y):
return math.sin ( np.pi *x)* math.sinh ( np.pi *y)/ math.sinh ( np.pi )
@cuda.jit ('void (f4 [:] , f4 [:] , f4 [: ,:])')
def pfKernel2D (d_x , d_y , d_f):
i , j = cuda.grid (2)
nx , ny = d_f.shape
if i < nx and j < ny:
d_f[i,j] = pf2D (d_x[i], d_y[j])
def pfArray2D (x, y, TPBX, TPBY):
nx = x.size
ny = y.size
d_x = cuda.to_device(x)
d_y = cuda.to_device(y)
d_f = cuda.device_array((nx, ny), dtype=np.float32)
gridDims = ((nx + TPBX - 1) // TPBX,
(ny + TPBY - 1) // TPBY)
blockDims = (TPBX, TPBY)
startTime = time.time()
pfKernel2D[gridDims, blockDims](d_x, d_y, d_f)
kTime = (time.time() - startTime) * 1000
print ("Kernal call time is: ", kTime)
return d_f.copy_to_host()
def question2():
print ()
print ("---------- Question 2 ----------")
TPBX = 8
TPBY = 32
NX = np.linspace(100, 1000, 10)
NY = np.linspace(100, 1000, 10)
sTime = [0]*len(NX)
pTime = [0]*len(NX)
accel = [0]*len(NX)
for i in range(len(NX)):
print ("Array size: ", NX[i])
x = np.linspace (0,1,NX[i] , dtype = np.float32)
y = np.linspace (0,1,NY[i] , dtype = np.float32)
startTime = time.time()
fs = fArray2D (x, y)
sTime[i] = (time.time() - startTime) * 1000
print ("Series processing time: ", sTime[i])
startTime = time.time()
fp = pfArray2D(x, y, TPBX, TPBY)
pTime[i] = (time.time() - startTime) * 1000
print ("Parallel processing time: ", pTime[i])
accel[i] = sTime[i]/pTime[i]
print ("Accel is: ", accel[i])
plt.figure(1)
plt.subplot(211)
plt.plot(NX, sTime, 'r--', label='series runtime')
plt.plot(NX, pTime, 'g^', label='parallel_runtime')
plt.legend()
plt.title("Series and Parallel Runtime vs Array Size")
plt.subplot(212)
plt.plot(NX, accel)
plt.title("Acceleration vs Array Size")
plt.show()
''' Question 3 functions '''
def question3():
print ()
print ("---------- Question 3 ----------")
TPBX = 32
TPBY = 32
NX = 255
NY = 255
x = np.linspace(0, 1, NX, dtype=np.float32)
y = np.linspace(0, 1, NY, dtype=np.float32)
fp = pfArray2D(x, y, TPBX, TPBY)
print ("32 is the largest number of thread a block can have."
" Anything larger than that will produce the following error:"
" numba.cuda.cudadrv.driver.CudaAPIError: [1] Call to"
" cuLaunchKernel results in CUDA_ERROR_INVALID_VALUE")
''' Question 4 functions '''
def question4():
print ()
print ("---------- Question 4 ----------")
print ("Change in aspect ratio has very little affect on the kernel"
" execution time or kernal call")
''' Question 5 functions '''
def question5():
print ()
print ("---------- Question 5 ----------")
arrayDimX = 255
arrayDimY = 255
array = [[0]*arrayDimX] * arrayDimY
x = np.linspace(0, 2*math.pi, arrayDimX)
y = np.linspace(0, 2*math.pi, arrayDimY)
array = make_matrix(x, y)
X, Y = np.meshgrid(x, y)
plt.contourf(X, Y, array)
plt.show()
print ("Compute L2:")
res = pnorm(array, 2)
print ("Result is: ", res)
print ("Compute L4:")
res = pnorm(array, 4)
print ("Result is: ", res)
print ("Compute L6:")
res = pnorm(array, 6)
print ("Result is: ", res)
print ("Compute L1000:")
res = pnorm(array, 1000)
print ("Result is: ", res)
print ("The value of norm approaches 1 which is norm infinity as p increases")
def make_matrix(x, y):
TPBX = 8
TPBY = 8
nx = np.array(x).shape[0]
ny = np.array(y).shape[0]
d_x = cuda.to_device(np.array(x))
d_y = cuda.to_device(np.array(y))
d_out = cuda.device_array((nx, ny))
gridDims = ((nx + TPBX - 1) // TPBX,
(ny + TPBY - 1) // TPBY)
blockDims = (TPBX, TPBY)
make_matrix_kerneld[gridDims, blockDims](d_x, d_y, d_out)
return d_out.copy_to_host()
@cuda.jit
def make_matrix_kerneld(d_x, d_y, d_out):
i , j = cuda.grid (2)
nx = d_x.shape[0]
ny = d_y.shape[0]
if i < nx and j < ny:
d_out[i, j] = math.sin(2*math.pi*d_x[i])*math.sin(2*math.pi*d_y[j])
@cuda.jit
def norm_kernel(d_array, p):
i , j = cuda.grid (2)
nx , ny = d_array.shape
if i < nx and j < ny:
d_array[i,j] = (d_array[i,j] ** p)
def pnorm(array, p):
TPBX = 8
TPBY = 8
nx, ny = np.array(array).shape
d_array = cuda.to_device(np.array(array))
gridDims = ((nx + TPBX - 1) // TPBX,
(ny + TPBY - 1) // TPBY)
blockDims = (TPBX, TPBY)
norm_kernel[gridDims, blockDims](d_array, p)
res = 0
d_arrayFlat = d_array.copy_to_host().flatten()
for i in range(d_arrayFlat.shape[0]):
res += d_arrayFlat[i]
return res ** (1/p)
''' Question 6 '''
def question6():
print ()
print ("---------- Question 6 ----------")
print ("For IVPs problems, there is no direct way to parallelize "
"the computation over a grid of time intervals because current "
"value depends on previous values of each states and thus to get "
"value at time k, we need to already compute value of all states "
"at time k-1")
print ("For IVPs problems, there is a way to parallelize over a "
"grid of initial conditions because the iteration process for "
"each group of initial conditions are independent")
nt = 1000
t = np.linspace(0, 10, nt)
dt = 1/nt
x_i = np.linspace(-3, 3, 50)
v_i = np.linspace(-3, 3, 50)
X,V = np.meshgrid(x_i, v_i)
for subprob in ["6c", "6d", "6e"]:
print ("Subproblem ", subprob)
r = iterate(x_i, v_i, dt, nt, subprob)
fig = plt.figure(6)
ax = plt.axes(projection='3d')
ax.scatter3D(X, V, r)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('r')
plt.show()
# 6c
def iterate(x_i, v_i, dt, nt, prob):
TPBX = 16
TPBV = 16
nx = np.array(x_i).shape[0]
nv = np.array(v_i).shape[0]
d_xi = cuda.to_device(np.array(x_i))
d_vi = cuda.to_device(np.array(v_i))
d_x = cuda.device_array((nx, nv, nt))
d_v = cuda.device_array((nx, nv, nt))
d_r = cuda.device_array((nx, nv))
gridDims = ((nx + TPBX - 1) // TPBX,
(nv + TPBV - 1) // TPBV)
blockDims = (TPBX, TPBV)
if prob == "6c":
iterate_kernel_6c[gridDims, blockDims](d_xi, d_vi, d_x, d_v, dt, nt, d_r)
elif prob == "6d":
iterate_kernel_6d[gridDims, blockDims](d_xi, d_vi, d_x, d_v, dt, nt, d_r)
elif prob == "6e":
iterate_kernel_6e[gridDims, blockDims](d_xi, d_vi, d_x, d_v, dt, nt, d_r)
return d_r.copy_to_host()
# 6d
@cuda.jit
def iterate_kernel_6d(d_xi, d_vi, d_x, d_v, dt, nt, d_r):
i, j = cuda.grid(2)
nx = d_xi.shape[0]
nv = d_vi.shape[0]
if i < nx and j < nv:
d_x[i, j, 0] = d_xi[i]
d_v[i, j, 0] = d_vi[j]
for k in range(nt-1):
d_v[i, j, k+1] = d_v[i, j, k] + (- d_x[i, j, k] - 0.1 * d_v[i, j, k]) * dt
d_x[i, j, k+1] = d_x[i, j, k] + d_v[i, j, k] * dt
d_r[i,j] = (d_v[i, j, nt-1] ** 2 + d_x[i, j, nt-1] **2) ** 0.5 /((d_xi[i]**2 + d_vi[j]**2)**0.5)
# 6c
@cuda.jit
def iterate_kernel_6c(d_xi, d_vi, d_x, d_v, dt, nt, d_r):
i, j = cuda.grid(2)
nx = d_xi.shape[0]
nv = d_vi.shape[0]
if i < nx and j < nv:
d_x[i, j, 0] = d_xi[i]
d_v[i, j, 0] = d_vi[j]
for k in range(nt-1):
d_v[i, j, k+1] = d_v[i, j, k] - d_x[i, j, k] * dt
d_x[i, j, k+1] = d_x[i, j, k] + d_v[i, j, k] * dt
d_r[i,j] = (d_v[i, j, nt-1] ** 2 + d_x[i, j, nt-1] **2) ** 0.5 /((d_xi[i]**2 + d_vi[j]**2)**0.5)
# 6e
@cuda.jit
def iterate_kernel_6e(d_xi, d_vi, d_x, d_v, dt, nt, d_r):
i, j = cuda.grid(2)
nx = d_xi.shape[0]
nv = d_vi.shape[0]
if i < nx and j < nv:
d_x[i, j, 0] = d_xi[i]
d_v[i, j, 0] = d_vi[j]
for k in range(nt-1):
d_v[i, j, k+1] = d_v[i, j, k] + (- d_x[i, j, k] + 0.1*(1-d_x[i, j, k]**2) * d_v[i, j, k]) * dt
d_x[i, j, k+1] = d_x[i, j, k] + d_v[i, j, k] * dt
d_r[i,j] = (d_v[i, j, nt-1] ** 2 + d_x[i, j, nt-1] **2) ** 0.5 /((d_xi[i]**2 + d_vi[j]**2)**0.5)
def main():
question2()
question3()
question4()
question5()
question6()
# call to execute main
if __name__ == '__main__':
main() | ryannguyen94/CUDA | HW2/hw2.py | hw2.py | py | 9,498 | python | en | code | 0 | github-code | 36 |
127603653 | # encoding=utf8
import requests
from lxml import etree
class cityAreaCode():
def __init__(self):
self.url = "http://www.ip33.com/area/2019.html"
def get(self):
page = requests.get(self.url)
page.encoding = 'utf-8'
_element = etree.HTML(page.text)
divs = _element.xpath('//div[@class="ip"]')[1:]
for div in divs:
divtext = etree.tostring(div, encoding="utf-8", pretty_print=False).decode("utf-8")
_element1 = etree.HTML(divtext)
h4s = _element1.xpath('//div/h4')
lis = _element1.xpath('//div/ul/li')
for li in lis:
litext = etree.tostring(li, encoding="utf-8", pretty_print=False).decode("utf-8")
_element2 = etree.HTML(litext)
h5s = _element2.xpath('//li/h5')
lilis = _element2.xpath('//li/ul/li')
for lili in lilis:
print('(省名称)',h4s[0].text.replace(' ','(省代码)'), '(市/区名称)', h5s[0].text.replace(' ','(市/区代码)'), "(地区名称)", lili.text.replace(' ','(地区代码)'))
if __name__ == '__main__':
cityAreaCode = cityAreaCode()
cityAreaCode.get()
| lazyting/climbworm | python/CityAreaCode.py | CityAreaCode.py | py | 1,228 | python | en | code | 1 | github-code | 36 |
74678159463 | '''
Created on 07/02/2018
@author: Carolina
'''
import unittest
from shapes import Shape, Rectangle
class Test(unittest.TestCase):
def test_shape(self):
shape = Shape ((0,0,0), 'wood')
self.assertEqual('Color: (0,0,0) Material: wood Max_Temp: 20')
shape2 = Shape ((0,0,0), 'wood')
self.assertEqual (shape, shape2)
shape3 = Shape ((0,23,0), 'woods')
self.assertNotEqual (shape, shape3)
def test_rectangle (self):
rectangle = Rectangle(10, 20, (0,0,0), 'wood')
self.assertEqual('Rectangle -> Comp. 10 lag. 20 Color: (0,0,0) Material: wood Max_Temp: 20'), str(rectangle)
self.assertEqual('', str(rectangle))
self.assertEqual(200, rectangle.get_area())
rectangle2 = Rectangle(10, 30, (0,0,0), 'wood')
self.assertEqual(300, rectangle.get_area())
self.assertNotEqual(rectangle2, rectangle)
rectangle3 = Rectangle (10, 20,(0,0,0), 'wood')
self.assertEqual(rectangle3, rectangle)
pass
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | carolinanconceicao/day_3 | day_3/test_shapes.py | test_shapes.py | py | 1,157 | python | en | code | 0 | github-code | 36 |
34415438640 | from timeit import timeit
def runLengthEncoding(string):
# Write your code here.
res = []
fp = 0
sp = 1
ln = 1
while sp <= len(string):
if sp == len(string) or string[sp] != string[fp] or ln == 9:
res.append(f'{ln}{string[fp]}')
fp = sp
sp += 1
ln = 1
else:
sp += 1
ln += 1
return "".join(res)
def runLengthEncoding_list(string):
# Write your code here.
s_list = list(string)
res = []
fp = 0
sp = 1
ln = 1
while sp <= len(s_list):
if sp == len(s_list) or s_list[sp] != s_list[fp] or ln == 9:
res.append(f'{ln}{s_list[fp]}')
fp = sp
sp += 1
ln = 1
else:
sp += 1
ln += 1
return "".join(res)
# print(runLengthEncoding("AAAAAABBCCCDDDDDDDDRRRRADS"))
string = ("AAAAAABBCCCDDDDDDDDRRRRADSAAAAAABBCCCDDDDDDDDRRRRADSAAAA" +
"AABBCCCDDDDDDDDRRRRADSAAAAAABBCCCDDDDDDDDRRRRADSAAAAAABB" +
"CCCDDDDDDDDRRRRADSAAAAAABBCCCDDDDDDDDRRRRADSAAAAAABBCCCD" +
"DDDDDDDRRRRADSAAAAAABBCCCDDDDDDDDRRRRADSAAAAAABBCCCDDDDD" +
"DDDRRRRADSAAAAAABBCCCDDDDDDDDRRRRADSDDDDDDDRRRRADSDDDDDD")
itr = 10_000
for foo in [runLengthEncoding, runLengthEncoding_list]:
t = timeit(stmt="foo(string)", number=itr, globals=globals())
print(f"{foo.__name__} runed in {t:.6f} seconds")
| serb00/AlgoExpert | Strings/Easy/012_run_lenght_encoding.py | 012_run_lenght_encoding.py | py | 1,432 | python | en | code | 0 | github-code | 36 |
21788142764 | import numpy as np
from scipy.constants import G
from scipy.interpolate import interp1d
from astropy.constants import kpc
import general as ge
class zhao(object):
""" Class for generating a potential for a spherical dark matter halo,
using the data generated by the model of Zhao (2009).
Attributes:
fName: The name of the file containing the data generated by the
model of Zhao. The data should be generated using the code
provided at: http://202.127.29.4/dhzhao/mandc.html
data: The loaded data from the text file; the data is stored in
a 2D numpy array. For more information on which data is
stored, see the README file generated using the code.
h0: Little h, hubble parameter at z=0 divided by 100, unitless.
Value from the Planck collaboration (2020).
red: The redshift values at which the properties of the dark
matter halo are computed.
mass: The virial mass of the dark matter halo in units of solar
masses, computed at the redshifts given by red.
conc: The concentration of the halo (r_vir / r_s) at the given
redshifts.
virR: The virial radius at the redshift values of the dark
matter halo in meters.
rhoS: The density at the scale radius in kg/m^3.
rS: The scale radius of the dark matter halo in meters.
time: The lookback time corresponding to the redshift values in
years.
"""
def __init__(self, fName):
""" Initializing the potential according to the model of Zhao
Input:
fName (string)
Returns:
zhao (object)
"""
self.fName = fName # File name
data = self.load_data()
h0 = 0.6766
t0 = data[:,-1][0] / h0
# Unpacking data
self.red = data[:,0] # Redshift
self.mass = data[:,1] / h0 # Virial mass
self.conc = data[:,2] # Concentration
self.virR = 1e3 * kpc.value * data[:,4] / h0 # Virial radius
self.rhoS = ge.conv_inv_dens(data[:,7]/1e18) * h0 * h0 # rho_s
self.rS = 1e3 * kpc.value * data[:,8] / h0 # r_s
self.time = t0 - data[:,-1] / h0 # Age of Universe
def load_data(self):
""" Loading data from a generated data file
Input:
-
Returns:
data: array containing the properties of the dark matter
halo (2D numpy array).
"""
with open(self.fName) as f:
data = np.loadtxt((x.replace(' ', ' ') for x in f), skiprows=1)
return data
def find_z_ind(self, zV):
""" Input a redshift and find the indices corresponding to the closest
redshift value(s) of the generated data. If zV is an array then
the closest indices for all the redshift values are determined and
returned.
Input:
zV: The redshift value(s) for which the closest index has
to be found (float or numpy array).
Returns:
The indices corresponding to the closest redshift values
(integer or numpy array).
"""
if type(zV) != np.ndarray and type(zV) != list and type(zV) != tuple:
return ge.find_closest(self.red, zV)[0]
return np.asarray([ge.find_closest(self.red, z)[0] for z in zV])
def rs_rhos_at_z(self, zV):
""" Find the scale radius (r_s) and the density at the scale radius
(rho_s) for a given redshift value. This is done by finding the
closest redshift value to the input redshift value(s), NOT by
interpolating.
Input:
zV: redshift(s) at which r_s and rho_s will be determined
(float or numpy array).
Returns:
Value of r_s at zV (float or numpy array).
Value of rho_s at zV (float or numpy array).
"""
zInd = self.find_z_ind(zV) # Correct z index
return self.rS[zInd], self.rhoS[zInd]
def mass_at_r(self, zV, r):
""" The mass of the dark matter halo as function of distance from the
center of the dark matter halo at a given redshift.
Input:
zV: redshift(s) at which the mass as function of radius
is determined (float or numpy array).
r: the distances from the center of the halo at which
the mass will be calculated (float or numpy array).
Returns:
mass as function of r and z (float or numpy array (1D or 2D))
"""
rS, rhoS = self.rs_rhos_at_z(zV) # r_s and rho_s
mass = [rhoS[i] * rS[i] * (np.log(1+r/rS[i]) - r / (rS[i] + r))
for i in range(len(rS))]
return 16 * np.pi * np.asarray(mass)
def simp_profile(self, x):
""" The NFW profile density profile for the dark matter halo. This
function gives rho/rho_s as function of r/r_s. Therefore you do
not need to specify the parameters r_s and rho_s. Moreover, this
profile is time independent.
Input:
x: r/r_s values, dimensionless (numpy array).
Returns:
rho/rho_s for the given x values (numpy array).
"""
return 4 / (x * np.power(1+x, 2))
def nfw_profile(self, zV, r):
""" A time dependent NFW density profile. With the input of the
desired redshift value(s), a time dependent density profile
as function of radius is output. r_s and rho_s are determined
using the model of van den Bosch.
Input:
zV: the redshift values at which the density profile
is computed (float or numpy array).
r: distance from the center of the halo (float or
numpy array).
Returns:
time dependent NFW profile (float or numpy array(1D or 2D))
"""
zInd = self.find_z_ind(zV) # Selecting z ind
rS = self.rS # r_s
rhoS = self.rhoS # rho_s
if type(zInd) != np.ndarray: # Single z value
denom = r * np.power(1 + r/rS[zInd], 2) / rS[zInd]
return 4 * rhoS[zInd] / denom
selrS, selrhoS = rS[zInd], rhoS[zInd] # Slicing lists
# Multiple z values
frac = [selrhoS[ind] / (r * np.power(1 + r/selrS[ind], 2) / selrS[ind])
for ind in range(len(selrS))]
return 4 * np.asarray(frac)
def pot_nfw(self, zV, r):
""" The gravitational potential corresponding to the NFW
density profile. This is obtained by solving the Poisson
equation. For the NFW profile there exists an analytical
solution.
Input:
zV: the redshift values at which the potential is
computed (float or numpy array).
r: distance from the center of the halo (float or
numpy array).
Returns:
gravitational potential (float or numpy array(1D or 2D))
"""
rhoS = self.rhoS # rho_s
rS = self.rS # r_s
zInd = self.find_z_ind(zV) # Finding correct z
if type(zInd) != np.ndarray: # Single z value
# Need to interpolate due to coarse grid
interpRS = interp1d(self.red, self.rS) # Creating r_s int. object
selrS = interpRS(zV) # Interpolating r_s
interpRhoS = interp1d(self.red, self.rhoS) # Creating rho_s int. object
selrhoS = interpRhoS(zV) # Interpolating rho_s
part1 = -16 * np.pi * G * selrhoS * selrS * selrS
part2 = np.log(1 + r/selrS) / (r/selrS)
return part1 * part2
# Multiple z values
selrS, selrhoS = rS[zInd], rhoS[zInd] # Slicing lists
part1 = -16 * np.pi * G * selrhoS * selrS * selrS # First part
part2 = [np.log(1 + r/rsV) / (r/rsV) for rsV in selrS] # Second part
phi = [part1[ind] * part2[ind] for ind in range(len(zInd))] # Potential
return np.asarray(phi)
| Evd-V/Bachelor-thesis | zhao.py | zhao.py | py | 9,478 | python | en | code | 0 | github-code | 36 |
9710179885 | import random
from random import randint
import pygame
from essais.essai_dijkstra_damier import title
from lib_dijkstra import DijkstraManager, Point, pyrect_to_point, point_to_pyrect
verbose = False
class Entity(pygame.sprite.Sprite):
def __init__(self, name, x, y, screen=None):
super().__init__()
self.name = name
self.sprite_sheet = pygame.image.load(f'../sprites/{name}.png')
self.image = self.get_image(0, 0)
self.image.set_colorkey([0, 0, 0])
self.rect = self.image.get_rect() # position du sprite
self.position = [x, y]
self.screen = screen if screen else None
# Mon sprite mesure 32 * 32
self.images = {
'down': self.get_image(0, 0),
'left': self.get_image(0, 32),
'right': self.get_image(0, 64),
'up': self.get_image(0, 96)
}
self.feet = pygame.Rect(0, 0, self.rect.width * 0.5, 16)
self.old_position = self.position.copy()
self.speed = 2
def save_location(self):
self.old_position = self.position.copy()
def change_animation(self, attitude):
# ('up', 'down', 'left', 'right')
self.image = self.images[attitude]
self.image.set_colorkey((0, 0, 0))
def move_right(self):
self.position[0] += self.speed
def move_left(self):
self.position[0] -= self.speed
def move_up(self):
self.position[1] -= self.speed
def move_down(self):
self.position[1] += self.speed
def update(self):
self.rect.topleft = self.position
self.feet.midbottom = self.rect.midbottom
def move_back(self):
self.position = self.old_position
self.rect.topleft = self.position
self.feet.midbottom = self.rect.midbottom
def get_image(self, x, y):
image = pygame.Surface([32, 32])
image.blit(self.sprite_sheet, (0, 0), (x, y, 32, 32))
return image
class Player(Entity):
def __init__(self, x, y):
super().__init__('player', x, y)
class NPC(Entity):
def __init__(self, name, map_manager, map_name, screen=None):
super().__init__(name, 500, 550, screen)
self.name = name #
self.change_animation("left")
self.map_manager = map_manager
self.map_name = map_name
self.debug_count =0
self.move_direction = None
# Les zones issues de la carte tmx. Elles sont désignées par un nom de type robin_path1.
# J'appelle cette zone une area. Elle est de type pygame.Rect
self.areas = [] # Les areas : liste de pygame.Rect
self.areas_nb = None
self.current_area_idx = None # ndint(0, self.nb_areas-1) # index de area
self.next_area_idx = None
# Entre 2 zones, on définit une promenade / walk. Le chemin de la promenade est trouvé selon un algorithme
# simple ou un algorithme de Dijkstra.
self.use_dijkstra = True
# les points de la carte simplifiée pour résoudre la promenade/ walk.
self.djik = None # Objet pour résoudre le chemin selon Dijkstra.
# Les points ci dessous sont utilisé pour guider le mouvement dans la promenade.
self.prev_point = None # Le Point d'où lon vient. Sera initialisé par init_dijkstra
self.next_point = None # Le Point où l'on va
self.next_point_rect: pygame.Rect = None # Son équivalent en pygame.rect
self.next_dir = None
# Il faut penser à lancer les méthodes de début après création de NPC:
# par exemple define_first_target()
def calculate_next_area_idx(self):
while True:
rnd = randint(0, self.areas_nb - 1)
if rnd != self.current_area_idx:
self.next_area_idx = rnd
break
def modify_speed(self):
self.speed = self.speed + randint(-1, 1)
if self.speed == 0:
self.speed = 1
elif self.speed == 4:
self.speed = 3
def calculate_move_direction(self):
"""Algorithme très primaire. Il a besoin de déterminer la direction générale à prendre."""
target_point = self.areas[self.next_area_idx].center
feet_point = self.feet.center
rect = pygame.Rect(feet_point[0], feet_point[1],
target_point[0] - feet_point[0], target_point[1] - feet_point[1])
x, y, w, h = rect
if w > 0:
if h > 0:
self.move_direction = 'SE'
else:
self.move_direction = 'NE'
else: # W est neg
if h > 0:
self.move_direction = 'SW'
else:
self.move_direction = 'NW'
print(f"Nouvelle cible : {self.next_area_idx}, direction : {self.move_direction}")
def calculate_dijkstra(self, verbose=False):
"""Lit la carte simplifiée.
L'algorithme utilise une version réduite de la carte. La réduction est de 1 ou 2 fois la taille des
tuiles.
Convertit une zone (area) en un Point de la carte simplifiée.
Donc, on convertit ce que j'appelais area (zone) en Point
"""
map = self.map_manager.maps[self.map_name].simple_map
self.dijk = DijkstraManager(map)
start_area = self.areas[self.current_area_idx]
start_point = pyrect_to_point(self.map_manager.maps[self.map_name], start_area, 32)
next_area = self.areas[self.next_area_idx]
map_name = self.map_manager.maps[self.map_name]
end_point = pyrect_to_point(map_name, next_area, 32)
if verbose:
print(f"Il faut aller du point {start_point} au point {end_point}")
self.dijk.dijkstra(start_point, verbose=0)
self.dijk.format_path(start_point, end_point, verbose=True)
self.prev_point = start_point
self.dijk.give_next_instruction() # IMPORTANT : on élimine la dernière valeur
self.next_point, self.next_dir = self.dijk.give_next_instruction()
self.next_point_rect = pygame.Rect(point_to_pyrect(map_name, self.next_point))
print("Fin de calcul du Dijkstra")
print(f"{self.next_dir} point_actuel: {self.rect} next_point: {self.next_point} ; next_point_rect : {self.next_point_rect}")
def define_first_target(self):
self.current_area_idx = 0 # index de area
# Pour le run, utiliser ces lignes
self.calculate_next_area_idx()
# self.calculate_move_direction()
# Pour une mise au point, utiliser ces lignes
# self.next_pyrect_idx = 2
# self.move_direction = 'SE'
def teleport_npc(self):
first_area = self.areas[self.current_area_idx]
self.position[0] = first_area.x
self.position[1] = first_area.y
self.save_location()
def move(self):
self.save_location() # Tentative de résolution d'un GROS BUG
self.debug_count += 1
if self.use_dijkstra:
self.move_dij()
else:
self.move_classical()
def move_dij(self):
"""Mouvement automatique. Algorithme type Djikstra à ma façon.
Cette fonction est en cours d'écriture"""
sens = self.next_dir
if sens == 'R':
self.move_right()
elif sens == 'L':
self.move_left()
elif sens == 'B':
self.move_down()
elif sens == 'T':
self.move_up()
elif sens is None:
pass
else:
raise ValueError(f"{sens} : error code letter")
if self.rect.colliderect(self.next_point_rect):
print(" *************** COLISION **************")
self.prev_point = self.next_point # ne sert à rien pour l'instant
self.next_point, self.next_dir = self.dijk.give_next_instruction()
if self.next_point:
self.next_point_rect = pygame.Rect(point_to_pyrect(self.map_name, self.next_point))
else:
print("********** Arrivé ! ************")
# Trouver une nouvelle cible au NPC
self.current_area_idx = self.next_area_idx
self.calculate_next_area_idx()
self.calculate_dijkstra()
print(f"{self.debug_count}, {sens} actuel : point_actuel: {self.prev_point} rect: {self.rect} next_point: {self.next_point} ; next_point_rect : {self.next_point_rect}")
print(f"next_dir devient {self.next_dir}")
pass
def move_classical(self):
"""Mouvement automatique. Algorithme primaire."""
feet_rect = self.feet
target_rect = self.areas[self.next_area_idx]
feet_to_target_rect = pygame.Rect(feet_rect.x, feet_rect.y,
target_rect.x - feet_rect.x, target_rect.y - feet_rect.y)
move_vert = None
move_horz = None
if self.move_direction == 'SE':
move_horz = self.move_right
move_vert = self.move_down
self.change_animation("right")
elif self.move_direction == 'NW':
move_horz = self.move_left
move_vert = self.move_up
self.change_animation("left")
elif self.move_direction == 'SW':
move_horz = self.move_left
move_vert = self.move_down
self.change_animation("left")
elif self.move_direction == 'NE':
move_horz = self.move_right
move_vert = self.move_up
self.change_animation("right")
if feet_to_target_rect.height == 0:
feet_to_target_rect.height = 5
move_vert()
else:
# odd n'est sans doute pas le bon terme.
try:
odd_horiz_mvt = feet_to_target_rect.width / (feet_to_target_rect.height + feet_to_target_rect.width)
except ZeroDivisionError:
odd_horiz_mvt = 0.95
if verbose:
print(f"{feet_to_target_rect}, {self.name} Odd ratio : {odd_horiz_mvt}")
if odd_horiz_mvt == 0:
move_vert()
else:
rnd = random.random()
# print(f"La valeur aléatoire est {rnd} ; limite de probabilité vaut {odd_horiz_mvt} : ", end = '')
if rnd > odd_horiz_mvt:
move_vert()
else:
move_horz()
if self.rect.colliderect(target_rect):
self.current_area_idx = self.next_area_idx
self.calculate_next_area_idx()
self.calculate_move_direction()
| bermau/PW_19_pygamon | src/player.py | player.py | py | 10,618 | python | fr | code | 0 | github-code | 36 |
73379175144 | from functools import wraps
from flask import url_for, redirect, session
# 登陆限制装饰器
# 用于需要登陆的页面,如果没有登陆则要求登陆(跳转至登陆页面)
def login_required(func):
@wraps(func)
def wapper(*args, **kwargs):
if session.get('user_id'):
return func(*args, **kwargs)
else:
return redirect(url_for('login'))
return wapper
| BobXGY/bobqa | decorators.py | decorators.py | py | 419 | python | zh | code | 0 | github-code | 36 |
25626046403 | def repeatedStringMatch (A: str, B: str) -> int:
# lenA = len(A)
# lenB = len(B)
#
#
#
# if lenA > lenB:
# if B in A:
# return 1
# else:
# a = A
# count = 1
# FLAG = True
# while lenA < 1000:
# if FLAG:
# a = a + A
# FLAG = False
# else:
# a = A + a
# FLAG = True
# count += 1
# lenA = len(a)
# if B in a:
# return count
#
# return -1
for i in range (int(len (B) / len (A)), int(len (B) / len (A)) + 3):
if B in i * A:
return i
return -1
print(repeatedStringMatch('a*1000000000000000000','aaaaaaaaaaaaaaaaaaaaaaaa'))
#repeatedStringMatch('abaabaa','abaababaab') | Akashdeepsingh1/project | LeetcodeRandom/substring.py | substring.py | py | 824 | python | en | code | 0 | github-code | 36 |
25106892699 | import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import preprocessing
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import tensorflowjs as tfjs
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data'
column_names = ['MPG', 'Cylinders', 'Displacement', 'Horsepower', 'Weight',
'Acceleration', 'Model Year', 'Origin']
raw_dataset = pd.read_csv(url, names=column_names,
na_values='?', comment='\t',
sep=' ', skipinitialspace=True)
dataset = raw_dataset.copy()
dataset.tail()
dataset.isna().sum()
dataset = dataset.dropna()
dataset['Origin'] = dataset['Origin'].map({1: 'USA', 2: 'Europe', 3: 'Japan'})
dataset = pd.get_dummies(dataset, prefix='', prefix_sep='')
dataset.tail()
train_dataset = dataset.sample(frac=0.8, random_state=0)
test_dataset = dataset.drop(train_dataset.index)
train_features = train_dataset.copy()
test_features = test_dataset.copy()
train_labels = train_features.pop('MPG')
test_labels = test_features.pop('MPG')
normalizer = preprocessing.Normalization()
normalizer.adapt(np.array(train_features))
first = np.array(train_features[:1])
with np.printoptions(precision=2, suppress=True):
print('First example:', first)
print()
print('Normalized:', normalizer(first).numpy())
horsepower_model = keras.Sequential([
tf.keras.layers.LayerNormalization(
axis=-1, dtype='float64', epsilon=0.001, input_shape=[1]
),
layers.Dense(2, activation='relu'),
layers.Dense(1)
])
horsepower_model.compile(
optimizer=tf.optimizers.Adam(learning_rate=0.1),
loss='mean_absolute_error')
history = horsepower_model.fit(
train_features['Horsepower'], train_labels,
epochs=100,
# suppress logging
verbose=1,
# Calculate validation results on 20% of the training data
validation_split = 0.2)
| RashmitShrestha/mlModels | nn.py | nn.py | py | 1,993 | python | en | code | 0 | github-code | 36 |
74309241063 | import time, os, boto3, json, decimal
from boto3.dynamodb.conditions import Key
from helpers import send_to_datastream
from helpers import _get_body
from helpers import _get_response
from helpers import DecimalEncoder
try:
dynamodb = boto3.resource('dynamodb')
phase_status_table = dynamodb.Table(os.getenv('PHASE_STATUS_TABLE'))
except Exception as e:
print(e)
def post_phase_status(event, context):
body = _get_body(event)
try:
site = body['site']
message = body['message']
except Exception as e:
return _get_response(400, 'Unable to parse all required arguments. ')
timestamp = time.time()
payload = {
"site": site,
"timestamp": timestamp,
"message": message,
}
# Send to datastream
topic = "phase_status"
send_to_datastream(site, payload, topic)
# save in database
# Convert floats into decimals for dynamodb
payload["ttl"] = timestamp + 86400 # ttl = one day
dynamodb_entry = json.loads(json.dumps(payload, cls=DecimalEncoder), parse_float=decimal.Decimal)
table_response = phase_status_table.put_item(Item=dynamodb_entry)
return _get_response(200, 'Phase status broadcasted to sites successfully.')
def get_phase_status(event, context):
try:
site = event['pathParameters']['site']
except Exception as e:
return _get_response(400, 'Missing path parameter site')
max_age_seconds = event.get('queryStringParameters', {}).get('max_age_seconds', 3600) # default max age is 1 hour
timestamp_cutoff = int(time.time() - int(max_age_seconds))
phase_status = phase_status_table.query(
Limit=3,
ScanIndexForward=False, # sort by most recent first
KeyConditionExpression=Key('site').eq(site) & Key('timestamp').gt(timestamp_cutoff)
)
return _get_response(200, phase_status['Items'])
if __name__=="__main__":
phase_status_table = dynamodb.Table('phase-status-dev')
payload = json.dumps({
"site": "tst",
"message": "a phase message 2",
})
#post_phase_status({"body": payload}, {})
event = {
"pathParameters": {
"site": "tst"
},
"queryStringParameters": {
"max_age_seconds": "3600"
}
}
context = {}
ps = get_phase_status(event, context)
print(ps['body'])
| LCOGT/photonranch-status | phase_status.py | phase_status.py | py | 2,369 | python | en | code | 0 | github-code | 36 |
15695220227 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.stats
import wbgapi as wb
import seaborn as sns
def world(ind, code, years):
'''
this function returns original data, transposed data and world data for above indicators
Parameters
----------
ind : index
code : country code
years : total number of years
Returns
-------
data : original data
data_t : transposed data
worlddata : complete world data
'''
data = wb.data.DataFrame(ind, code, mrv=years)
data_t = data.T
worlddata = wb.data.DataFrame(ind, mrv=years)
return data, data_t, worlddata
def modify(data):
'''
this method can be used to clean up and generate modified data
Parameters
----------
data : the data to be modified
Returns
-------
data_mod1 : modified data with mean
data_mod3 : modified data after indexing and renaming
'''
data_mod1 = data.mean()
data_mod2 = pd.DataFrame(data_mod1)
data_mod2.reset_index(level=0, inplace=True)
data_mod3 = data_mod2.rename(columns={"index": "year", 0: "mean"})
return data_mod1, data_mod3
def box(x, y):
'''
box plot comparing the countries- INDIA, UNITED KINGDOM, CHINA with world
Parameters
----------
x : data for x-axis
y : Tdata for y-axis
Returns
-------
None.
'''
fig = plt.figure(figsize=(4, 3))
ax = fig.add_axes([0, 0, 1, 1])
cc = ax.boxplot(x)
ax.set_xlabel("countries")
ax.set_ylabel("CO2 EMISIONS(% change)")
ax.set_title("CO2 EMMISIONS COMPARISIONS")
ax.set_xticks([1, 2, 3, 4])
ax.set_xticklabels(y)
plt.show()
return
country_codes = ["PAK", "GBR", "CHN", "NAC", "IND"] # country codes
wb.series.info('EN.ATM.GHGT.KT.CE') # getting info from world bank api
indicator_id = {"EN.ATM.GHGT.KT.CE", "EN.ATM.CO2E.KT",
"AG.LND.ARBL.ZS", "AG.LND.AGRI.ZS"} # indicators to access data
#creating dictionary to access indicators.
AG = {"AG.LND.AGRI.ZS": "AGRICULTURAL LAND(%)"}
ABL = {"AG.LND.ARBL.ZS": "ARABLE LAND (%)"}
CO2 = {"EN.ATM.CO2E.KT": "CO2 EMISSIONS(KT)"}
GHG = {"EN.ATM.GHGT.KT.CE": "TOTAL GREENHOUSE GAS EMISSIONS(KT)"}
wb.series.info(indicator_id)
#accessing data by calling "world" function
AG, AG_T, AG_world = world(AG.keys(), country_codes, 30)
AG_T.describe()
#accessing data by calling "world" function
Co2, CO2_T, CO2_world = world(CO2.keys(), country_codes, 30)
CO2_T.describe()
#accessing data by calling "world" function
ABL, ABL_T, ABL_world = world(ABL.keys(), country_codes, 30)
ABL_T.describe()
#accessing data by calling "world" function
GHG, GHG_T, GHG_world = world(GHG.keys(), country_codes, 30)
GHG_T.describe()
#modified data for Co2
co2_mod, co2_W_mod = modify(CO2_world)
Ghg_mod, Ghg_W_mod = modify(GHG_world)
ag_mod, ag_W_mod = modify(AG_world)
abl_mod, abl_W_mod = modify(ABL_world)
abl_W_mod.describe()
c = CO2_T.rename(columns={"index": "year", 0: "mean"})
co2_t = c.rename_axis("year")
a = AG_T.rename(columns={"index": "year", 0: "mean"})
ag_t = a.rename_axis("year")
ag = ABL_T.rename(columns={"index": "year", 0: "mean"})
agl_t = ag.rename_axis("year")
g = GHG_T.rename(columns={"index": "year", 0: "mean"})
ghg_t = g.rename_axis("year")
# generate line plot of foreast cover over arable land for whole world
fig, ax = plt.subplots(figsize=[7, 3])
ax.plot(abl_W_mod["year"], abl_W_mod["mean"], marker="*")
ax.set_ylabel("Arable land (% of land area)", fontsize=7)
ax.set_xlabel("Year", fontsize=16)
plt.xticks(rotation=90)
ax1 = ax.twinx()
ax1.plot(ag_W_mod["year"], ag_W_mod["mean"], color="RED", marker="o")
ax1.set_ylabel("Agricultural land (% of land area)",
fontsize=7)
plt.title("Time series plot of ARABLE LAND and AGRICULTURAL LAND (% of total land)")
plt.show()
#geberate box plot
data = [CO2_T["IND"], CO2_T["NAC"], CO2_T["CHN"], co2_W_mod["mean"]]
coun = ["INDIA", "UNITED KINGDOM", "CHINA", "world"]
box(data, coun)
#violin plot for Green house gas emissian for countries, INDIA, UNITED KINGDOM, CHINA
fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3)
ax1.violinplot(GHG_T["IND"], showmedians=True, points=10)
ax1.set_xticks([1])
ax1.set_ylabel("GREEN HOUSE GAS EMISSION")
ax1.set_xticklabels(["INDIA"])
ax2.violinplot(GHG_T["GBR"], showmedians=True, points=100)
ax2.set_xticks([1])
ax2.set_xticklabels(["UK"])
ax3.violinplot(GHG_T["CHN"], showmedians=True, points=500)
ax3.set_xticks([1])
ax3.set_xticklabels(["CHINA"])
plt.show()
#Heat map of greenhouse gases
rs = np.random.RandomState(0)
FORW = pd.DataFrame(rs.rand(8, 8))
corr = GHG_T.corr()
plt.figure(figsize=(6, 7))
sns.heatmap(corr, annot=True)
plt.show()
| sunithasomasundaran/ads1_statistics_and_trends_sunitha | assignment2.py | assignment2.py | py | 4,686 | python | en | code | 0 | github-code | 36 |
34212052955 | # https://www.acmicpc.net/problem/1260
# solution
# 1) 주어진 입력을 인접행렬로 저장한다
# 2) 현재 정점 그때그때 출력하는 dfs를 돌린다
# 3) bfs 하고 path를 출력한다
# TIL
# adj_mat = [([0,] * n),]*n -> 이런식으로 초기화 하면안됨. copy라 원소의 id값 다 같아지는 문제
# 파이썬 입력으로 input()은 굉장히 느리다. sys.stdin.readline() 사용
# dfs의 경우 최단거리 찾는 문제(탐색 여러번 반복)와 단순히 탐색하는 문제(한번만 탐색) 구분해서 풀자
## 최단거리 찾는 경우만 여러번 반복해야해서 visited 리셋필요
# 반면 bfs의 path는 그 자체로 최단거리(자체적으로 여러개 path로 탐색)
# bfs 제대로 이해하자
## 1) bfs는 재귀없이 반복만
## 2) 방문과 발견 시점의 분리
## 3) 큐에 중복된 정점 안들어가게 조심
## 4) 정점 방문 표시는 반드시 큐에 넣을때 해야함
from collections import deque
import sys
def dfs(v):
global adj_mat, visited
print(v+1, end=' ')
for n_v, is_conn in enumerate(adj_mat[v]): # 다음으로 방문할 next_v를 찾기위해 인접행렬의 v번째 row 순회한다
if v == n_v:continue # 같은 v에 대해선 탐색하지 않음
if is_conn == 1 and visited[n_v] == 0: # 현재 v와 연결된 next_v에 방문하지 않았으면 방문한다
visited[n_v] = 1 # next_v에 방문했음을 마킹
dfs(n_v)
def bfs(start):
global adj_mat
visited = [0 for _ in range(n)]
path = deque([])
q = deque([start,]) # 첫 정점 q에 push하며 시작
visited[start] = 1 # q에 push 순간 방문표시
while len(q) > 0:
# 방문(q에서 pop하는 일)
v = q.popleft()
path.append(v)
# 발견(q에 push하는 일)
for n_v,is_conn in enumerate(adj_mat[v]):# 방문할 next_v를 찾기위해 인접행렬의 v번째 row 순회한다
if v == n_v: continue # 같은 v에 대해선 탐색하지 않음
if is_conn == 1 and visited[n_v] == 0: # v 다음에 방문할수있고, 현재 q에 없는 next_v 있으면
q.append(n_v) # 발견한 next_v를 큐에 추가
visited[n_v] = 1 # q에 push 순간 방문표시
for v in path:
print(v+1, end=' ')
if __name__ == "__main__":
n,m,v = tuple(map(int,sys.stdin.readline().split()))
v = v-1 # 인덱스로 탐색위해 정점 번호를 하나 줄여준다(v가 1부터 시작하기 때문)
# 인접행렬 초기화 및 저장
adj_mat = [[0 for _ in range(n)] for _ in range(n)]
for _ in range(m):
r,c = tuple(map(int,sys.stdin.readline().split()))
adj_mat[r-1][c-1] = 1
adj_mat[c-1][r-1] = 1
# dfs
visited = [0 for _ in range(n)]
visited[v] = 1
dfs(v)
print('\n',end='')
# bfs
bfs(v)
| chankoo/problem-solving | graph/1260-DFS와BFS.py | 1260-DFS와BFS.py | py | 2,933 | python | ko | code | 1 | github-code | 36 |
42243182620 | import matplotlib.pyplot as plt
import numpy as np
import dill as pickle
sens_to_plot = ['20180314_grav_noshield_cant-0mV_allharm.npy', \
'20180314_grav_shieldin-nofield_cant-0mV_allharm.npy', \
'20180314_grav_shieldin-1V-1300Hz_cant-0mV_allharm.npy', \
'20180314_grav_shieldin-2V-2200Hz_cant-0mV_allharm.npy']
labs = ['No Shield', 'Shield', 'Shield - 1300Hz', 'Shield - 2200Hz']
plot_just_current = False
sens_dat = []
for sens_file in sens_to_plot:
lambdas, alphas, diagalphas = np.load('/sensitivities/' + sens_file)
sens_dat.append(alphas)
alpha_plot_lims = (1000, 10**13)
lambda_plot_lims = (10**(-7), 10**(-4))
limitdata_path = '/sensitivities/decca1_limits.txt'
limitdata = np.loadtxt(limitdata_path, delimiter=',')
limitlab = 'No Decca 2'
limitdata_path2 = '/sensitivities/decca2_limits.txt'
limitdata2 = np.loadtxt(limitdata_path2, delimiter=',')
limitlab2 = 'With Decca 2'
fig, ax = plt.subplots(1,1,sharex='all',sharey='all',figsize=(5,5),dpi=150)
if not plot_just_current:
for i, sens in enumerate(sens_dat):
ax.loglog(lambdas, sens, linewidth=2, label=labs[i])
ax.loglog(limitdata[:,0], limitdata[:,1], '--', \
label=limitlab, linewidth=3, color='r')
ax.loglog(limitdata2[:,0], limitdata2[:,1], '--', \
label=limitlab2, linewidth=3, color='k')
ax.grid()
ax.set_xlim(lambda_plot_lims[0], lambda_plot_lims[1])
ax.set_ylim(alpha_plot_lims[0], alpha_plot_lims[1])
ax.set_xlabel('$\lambda$ [m]')
ax.set_ylabel('|$\\alpha$|')
ax.legend(numpoints=1, fontsize=9)
#ax.set_title(figtitle)
plt.tight_layout()
plt.show()
| charlesblakemore/opt_lev_analysis | scripts/mod_grav/plot_sensitivity.py | plot_sensitivity.py | py | 1,634 | python | en | code | 1 | github-code | 36 |
44480283459 | import Collatz
import one
import twonums
import drag
import infinite
import biology
def func88(which):
if which == str(1):
num1 = int(input('insert the first number: '))
one.grat(num1).use()
elif which == str(2):
num1 = int(input('insert the first number: '))
num2 = int(input('insert the second number: '))
Collatz.grat(num1,num2).use()
elif which == str(3):
num1 = int(input('insert the first number: '))
num2 = int(input('insert the second number '))
num3 = int(input('insert the third number '))
drag.grat(num1,num2,num3).use()
elif which == str(4):
num1 = int(input('insert the first number: '))
num2 = int(input('insert the number to reach '))
twonums.grat(num1,num2).use()
elif which == str(5):
num1 = input('insert an infinite amount of numbers: ')
infinite.grat(num1).use()
elif which == str(6):
subject = str(input('Choose a subject\n1:equations\n2:others\n3:science\n4:vocabulary\n5:search all directories\nnumber:'))
num1 = input('insert a question: ')
biology.show(num1).result(subject)
else:
print('WRONG NUMBER ENTERED')
func88(input('''1:FIND COMMON FACTORS OF ONE NUMBERS\n2:FIND FACTORS OF TWO NUMBERS\n3:FIND THE COMMON FACTOR OF THREE NUMBERS\n4:FIND TWO NUMBERS THAT MULTIPLY TO A NUMBER AND ADD TO A SECOND NUMBER\n5:FIND A COMMON FACTOR OF AN INFINATE AMOUNT OF NUMBERS\n6:ANSWRS DATABASE\nINSERT A NUMBER: '''))
| Tanticion/Importent-Projects | School_Proj/mathsolver.py | mathsolver.py | py | 1,528 | python | en | code | 1 | github-code | 36 |
5154312569 | from sklearn.datasets import make_circles
from sklearn.datasets import make_blobs
from sklearn.datasets import make_moons
from sklearn.model_selection import train_test_split
class Circles(object):
def __init__(self):
self.X, self.labels = make_circles(n_samples=300, noise=0.1, random_state=5622, factor=0.6)
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.labels,
test_size=0.3, random_state=5622)
class DataBlobs:
def __init__(self, centers, std=1.75):
self.X, self.labels = make_blobs(n_samples=300, n_features=2, cluster_std=std, centers=centers,
shuffle=False, random_state=5622)
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.labels,
test_size=0.3, random_state=5622)
class DataMoons(object):
def __init__(self):
self.X, self.labels = make_moons(n_samples=300, noise=0.05, shuffle=False, random_state=5622)
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.labels,
test_size=0.3, random_state=5622)
import os
import pickle
import numpy as np
import pandas as pd
import json
import random
from sklearn.model_selection import train_test_split
current_folder = os.path.dirname(os.path.abspath(__file__))
class Concrete(object):
def __init__(self):
rawdata = pd.read_csv('data/Concrete_Data.csv').to_numpy()
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(rawdata[:, :-1], rawdata[:, -1],
test_size=0.2, random_state=5622)
class Digits(object):
def __init__(self):
loaded = np.load(os.path.join(current_folder, "mnist.npz"))
self.images = images = loaded["images"].reshape(-1, 28 * 28)
self.labels = labels = loaded["labels"]
train_size = 1000
valid_size = 500
test_size = 500
self.X_train, self.y_train = images[:train_size], labels[:train_size]
self.X_valid, self.y_valid = images[train_size: train_size + valid_size], labels[
train_size: train_size + valid_size]
self.X_test, self.y_test = (images[train_size + valid_size:train_size + valid_size + test_size],
labels[train_size + valid_size: train_size + valid_size + test_size])
class BinaryDigits:
"""
Class to store MNIST data for images of 9 and 8 only
"""
def __init__(self):
loaded = np.load(os.path.join(current_folder, "mnist.npz"))
images = loaded["images"].reshape(-1, 28 * 28)
labels = loaded["labels"]
labels = labels % 2
train_size = 1000
valid_size = 500
test_size = 500
self.X_train, self.y_train = images[:train_size], labels[:train_size]
self.X_valid, self.y_valid = images[train_size: train_size + valid_size], labels[
train_size: train_size + valid_size]
self.X_test, self.y_test = (images[train_size + valid_size:train_size + valid_size + test_size],
labels[train_size + valid_size: train_size + valid_size + test_size])
class IMDB:
"""
Class to store IMDB dataset
"""
def __init__(self):
with open(os.path.join(current_folder, "movie_review_data.json")) as f:
self.data = data = json.load(f)
X = [d['text'] for d in data['data']]
y = [d['label'] for d in data['data']]
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y, test_size=0.3, shuffle=True,
random_state=42)
| peterrrock2/ML_coursework | Homework/Hw4/data/__init__.py | __init__.py | py | 4,114 | python | en | code | 0 | github-code | 36 |
74586144105 | from collections import defaultdict
def solution(id_list, report, k):
table = defaultdict(int) # 누가 몇번 신고 받은지 체크하는 테이블
answer = [0] * len(id_list)
for repo in set(report):
table[repo.split()[1]] += 1
# k번 이상 신고 받은 사람(report.split(' ')[1])인 경우
# 신고한 사람(report.split(' ')[0]) 메일 발송 횟수 1 추가
for repo in set(report):
if table[repo.split(' ')[1]] >= k:
answer[id_list.index(repo.split(' ')[0])] += 1
return answer | ycs1m1yk/TWS | Programmers/92334_sjh.py | 92334_sjh.py | py | 551 | python | ko | code | 2 | github-code | 36 |
30394243625 | """
FILES to YAML
"""
import argparse
import json
import json.decoder
import os
from pathlib import Path
import yaml
import yaml.scanner
def walk_thru(startdir: str) -> list:
p = Path(startdir)
a = [str(el).replace('\\', '/').replace(startdir, '') for el in p.rglob('*')]
return a
def read_file_content(filepath: str):
with open(filepath) as file:
content = file.read()
try:
template = yaml.safe_load(content)
return template
except yaml.scanner.ScannerError:
pass
try:
template = json.loads(content)
return template
except json.decoder.JSONDecodeError:
pass
return content
def dir_tree_dict(startdir: str) -> dict:
d = {}
for item in dirs_and_files:
p = d
for x in item.split('/'):
if os.path.isdir(startdir + item):
p = p.setdefault(x, {})
else:
content = read_file_content(startdir + item)
p = p.setdefault(x, content)
return d
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('startdir', type=str)
args = parser.parse_args()
startdir = args.startdir + '/'
dirs_and_files = walk_thru(startdir)
res = dir_tree_dict(startdir)
res = {startdir.split('/')[-2]: res}
with open('week5/res.yaml', 'w', newline='') as newfile:
yaml.dump(res, newfile, default_flow_style=False)
| MaksimPashkovsky/python-labs | week5/task1.py | task1.py | py | 1,452 | python | en | code | 0 | github-code | 36 |
20081039110 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
# !pip install numpy
# !pip install pandas
# !pip install matplotlib
# !pip install sklearn
# !pip install dmba
# !pip install statsmodels
# !pip install yellowbrick
# In[2]:
import pandas as pd
import numpy as np
df = pd.read_csv("data/medical_clean.csv")
outcome = 'TotalCharge'
df = df.drop(['CaseOrder', 'Customer_id', 'Interaction', 'UID', 'City',
'State', 'County', 'Zip', 'Lat', 'Lng', 'Interaction', 'TimeZone',
'Additional_charges'], axis=1)
cat_columns = df.select_dtypes(exclude="number").columns
# Give categorical columns a numeric value
for col in cat_columns:
df[col] = pd.Categorical(df[col])
df[col] = df[col].cat.codes
df.head()
# In[3]:
# export prepared data
df.to_csv('data/medical_prepared.csv')
# In[4]:
df['Complication_risk']
# # Univariate Analysis
# In[5]:
import matplotlib.pyplot as plt
import seaborn as sns
# In[6]:
# perform univariate analysis on all columns
for col in df.columns:
plt.hist(df[col])
plt.title(col)
path = 'plots/univariate-%s.png'%col
plt.gcf().savefig(path)
# # Bivariate Analysis
#
# Since we are predicting Initial_days we will include Initial_days in our bivariate analysis of the features
# In[7]:
for col in df:
if col != outcome:
df.plot(kind='scatter', x=outcome, y=col)
path = 'plots/bivariate-%s-%s.png'%(outcome,col)
plt.gcf().savefig(path)
# ## Correlation Matrix
# In[8]:
correl = df.corr()
display(correl)
# In[9]:
abs(df.corr())[outcome].sort_values(ascending=False)
# In[10]:
fig, ax = plt.subplots(figsize=(15,15))
heatmap = sns.heatmap(correl, xticklabels = correl.columns, yticklabels = correl.columns, cmap='RdBu')
heatmap.get_figure().savefig('plots/heatmap.png')
# # Regression Models
#
# We start with a regression model with all of the features
# In[11]:
import statsmodels.api as sm
# In[12]:
X = df.loc[:,df.columns!=outcome]
y = df[outcome]
# In[13]:
Xc = sm.add_constant(X)
initial_model = sm.OLS(y,Xc)
results = initial_model.fit()
results.summary()
# In[ ]:
# ## Data Reduction
# In[14]:
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
linear_regression = LinearRegression(normalize=False,fit_intercept=True)
Xc = sm.add_constant(X)
def r2_est(X,y):
return r2_score(y,linear_regression.fit(X,y).predict(X))
r2_impact = list()
for j in range(X.shape[1]):
selection = [i for i in range(X.shape[1]) if i!=j]
r2_impact.append(((r2_est(Xc,y) - r2_est(Xc.values [:,selection],y)) ,Xc.columns[j]))
best_variables = list()
for imp, varname in sorted(r2_impact, reverse=True):
if imp >= 0.0005:
best_variables.append(varname)
print ('%6.5f %s' % (imp, varname))
# New dataset with reduced features
df_reduced = df[best_variables]
df_reduced.head()
# In[ ]:
# In[ ]:
# In[15]:
# export reduced data
df_reduced.to_csv('data/medical_reduced.csv')
# In[ ]:
# In[16]:
X_reduced = df_reduced.loc[:,df_reduced.columns!=outcome]
Xc_reduced = sm.add_constant(X_reduced)
model_reduced = sm.OLS(y,Xc_reduced)
results = model_reduced.fit()
results.summary()
# In[ ]:
# ## Residuals
# In[17]:
from sklearn.linear_model import Lasso, LassoCV, Ridge, RidgeCV
from sklearn.model_selection import train_test_split
from yellowbrick.regressor import AlphaSelection, PredictionError, ResidualsPlot
# In[18]:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
model = Ridge()
visualizer = ResidualsPlot(model)
visualizer.fit(X_train, y_train)
visualizer.score(X_test, y_test)
residual = visualizer.poof()
residual.get_figure().savefig('plots/residual-plot.png')
# In[19]:
model = Lasso()
visualizer = PredictionError(model)
visualizer.fit(X_train, y_train)
visualizer.score(X_test, y_test)
prediction_error = visualizer.poof()
prediction_error.get_figure().savefig('plots/prediction_error.png')
# In[ ]:
| cjhammons/Multiple-Linear-Regression-on-Medical-Data | submission/multiple-linear-regression.py | multiple-linear-regression.py | py | 4,028 | python | en | code | 0 | github-code | 36 |
71960613545 | import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import confusion_matrix
import itertools
from PIL import Image as PIL
def show_image(image, cmap=None):
plt.figure(figsize=(12, 12))
plt.imshow(image, cmap=cmap)
plt.show()
def show_images(images, labels=None):
if labels:
fig = plt.figure(figsize=(12, 12))
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.2, wspace=0.2)
else:
fig = plt.figure(figsize=(12, 12))
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
for i in range(64):
region = PIL.open(images[i])
ax = fig.add_subplot(8, 8, i + 1, xticks=[], yticks=[])
ax.imshow(region)
if labels:
ax.set_xlabel(labels[i])
plt.show()
def show_importance(clf, columns):
feat_num = len(columns)
importances = clf.feature_importances_
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(feat_num):
print("{}. feature {} : {} ({})".format(f + 1, indices[f], columns[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(feat_num), importances[indices],
color="r", align="center")
plt.xticks(range(feat_num), columns, rotation=270)
plt.xlim([-1, feat_num])
plt.show()
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, '{:.2f}'.format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
print("Normalized confusion matrix")
else:
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
print('Confusion matrix, without normalization')
# print(cm)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def plot_confusion_matrix_wrapper(y_test, y_pred, classes):
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=classes)
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=classes, normalize=True)
plt.show()
| fukuta0614/active_learning | shared/PIA/analysis.py | analysis.py | py | 3,289 | python | en | code | 0 | github-code | 36 |
30133687535 | #!/ust/bin/python3
import fire
import sys
from functools import reduce
import os.path
red = '\033[0;31m'
green = '\033[0;32m'
yellow = '\033[0;33m'
blue = '\033[0;34m'
purple = '\033[0;35m'
cyan = '\033[0;36m'
white = '\033[0;37m'
end = '\033[0m'
host_file = '/etc/hosts'
new_file_path = None
def log_error(*msg):
if msg is None:
msg_str = ''
else:
msg_str = reduce(lambda x, y: str(x) + ' ' + str(y), msg)
print('%s%s%s%s' % (red, 'ERROR | ', msg_str, end))
def log_info(msg):
print('%s%s%s%s' % (green, 'INFO | ', msg, end))
def print_param(verb, args, comment):
print(' %s%-5s %s%-12s %s%s' % (green, verb, yellow, args, end, comment))
def help():
print('run: python3 app.py %s %s<verb>%s <args>%s' % ('', green, yellow, end))
print_param('-h', '', 'help')
print_param('-a', 'group file', 'add host group')
print_param('-r', 'group file', 'replace host group')
print_param('-on', 'group', 'uncomment host group')
print_param('-off', 'group', 'comment host group')
def get_group_start(value) -> str:
return '## op [ ' + str(value) + ' ]'
def get_group_end(value) -> str:
return '## ed [ ' + str(value) + ' ]'
def has_contain_group(group) -> bool:
with open(host_file) as file:
lines = file.readlines()
for line in lines:
if get_group_start(group) in line:
return True
return False
def append_group(group, file_path):
contained = has_contain_group(group)
if contained:
log_error('group already exist', group)
return
assert_file(file_path)
host = open(host_file, 'a')
host.write('\n' + get_group_start(group) + '\n\n')
with open(file_path) as file:
lines = file.readlines()
for line in lines:
host.write(line)
print(line, end='')
print()
host.write('\n\n' + get_group_end(group) + '\n')
log_info('append group sucessful')
# add # for content in group
def comment_content(result_lines, line, content_flag):
if content_flag and not line.startswith('#'):
result_lines.append('#' + line)
else :
result_lines.append(line)
# remove # for content in group
def uncomment_content(result_lines, line, content_flag):
if content_flag and line.startswith('#'):
result_lines.append(line[1:])
else:
result_lines.append(line)
# read origin file, write back origin file with the result list
def replace_content(group, content_func=None, logic_func=None):
if not has_contain_group(group):
log_error('group not exist')
return
result_lines = []
with open(host_file) as file:
lines = file.readlines()
result_lines = content_func(group, lines, logic_func)
write_to_hosts(result_lines)
# func value, trans into replace_content
def open_close_group(group, lines, logic_func) -> []:
content_flag = False
result_lines=[]
for line in lines:
if get_group_start(group) in line:
content_flag = True
result_lines.append(line)
continue
if get_group_end(group) in line:
content_flag = False
result_lines.append(line)
continue
if logic_func is None:
log_error('must have logic func')
sys.exit()
logic_func(result_lines, line, content_flag)
return result_lines
# func value , trans into replace_content
def replace_group_content(group, lines, logic_func=None):
result_lines = []
content_flag = False
for line in lines:
if get_group_start(group) in line:
content_flag = True
result_lines.append(line)
continue
if get_group_end(group) in line:
content_flag = False
print(new_file_path)
with open(new_file_path, 'r') as file:
new_lines = file.readlines()
for new_line in new_lines:
result_lines.append(new_line)
result_lines.append('\n')
result_lines.append(line)
continue
if not content_flag:
result_lines.append(line)
return result_lines
def write_to_hosts(lines):
if lines is None:
return
total_content = ''.join(lines)
with open(host_file, 'w+') as file:
file.write(total_content)
def assert_file(file_path):
if not os.path.exists(file_path):
log_error('file not found:', file_path)
sys.exit(1)
def assert_param(args, count):
if len(args) < count:
log_error('invalid param, at least need', count)
sys.exit(1)
def main(verb=None, *args):
assert_file(host_file)
if verb == '-h':
help()
sys.exit(0)
if verb == '-a':
assert_param(args, 2);
append_group(group=args[0], file_path=args[1])
if verb == '-r':
assert_param(args, 2);
global new_file_path
new_file_path=args[1]
replace_content(group=args[0], content_func=replace_group_content)
if verb == '-on':
assert_param(args, 1);
replace_content(group=args[0], content_func=open_close_group, logic_func=uncomment_content)
if verb == '-off':
assert_param(args, 1);
replace_content(group=args[0], content_func=open_close_group, logic_func=comment_content)
fire.Fire(main)
| Kuangcp/Script | python/tool/switch-host-group/app.py | app.py | py | 5,437 | python | en | code | 10 | github-code | 36 |
21678791673 | # Reverse a linked list from position m to n. Do it in-place and in one-pass.
# For example:
# Given 1->2->3->4->5->NULL, m = 2 and n = 4,
# return 1->4->3->2->5->NULL.
# Note:
# Given m, n satisfy the following condition:
# Definition for singly-linked list.
# 1. 逗号输出与赋值
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
def __repr__(self):
if self:
return "{} -> {}".format(self.val, self.next)
class Solution(object):
def reverseBetween(self, head, m, n):
"""
:type head: ListNode
:type m: int
:type n: int
:rtype: ListNode
"""
if m == n:
return head
dummy = ListNode(0)
dummy.next = head
prev = dummy
for i in range(m - 1):
prev = prev.next
reverse = None
cur = prev.next
for i in range(n - m + 1):
# next = cur.next
# cur.next = reverse
# reverse = cur
# cur = next
cur.next, reverse, cur = reverse, cur, cur.next
# reverse, cur, cur.next = cur, cur.next, reverse
prev.next.next, prev.next = cur, reverse
# prev.next.next = cur
# prev.next = reverse
return dummy.next
if __name__ == "__main__":
cur, cur.next, cur.next.next, cur.next.next.next = ListNode(1), ListNode(2), ListNode(2), ListNode(1)
cur.next.next.next.next = ListNode(5)
print(cur)
print(Solution().reverseBetween(cur,2,4)) | WangsirCode/leetcode | Python/reverse-likned-lis-ii.py | reverse-likned-lis-ii.py | py | 1,569 | python | en | code | 0 | github-code | 36 |
6792942210 | import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
print("Current TF Version Is [%s]" % (tf.__version__))
print("Package Loaded")
mnist = input_data.read_data_sets('data/', one_hot = True)
n_hidden_1 = 256
n_hidden_2 = 128
n_hidden_3 = 64
n_input = 784
n_classes = 10
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])
stddev = 0.1
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1], stddev=stddev)),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2], stddev=stddev)),
'h3': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3], stddev=stddev)),
'out': tf.Variable(tf.random_normal([n_hidden_3, n_classes], stddev=stddev))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'b3': tf.Variable(tf.random_normal([n_hidden_3])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
print("Network Ready")
def multilayer_perceptron(_x, _weights, _biases):
_layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(_x, _weights['h1']), _biases['b1']))
_layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(_layer_1, _weights['h2']), _biases['b2']))
_layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(_layer_2, _weights['h3']), _biases['b3']))
return (tf.matmul(_layer_3, _weights['out']) +_biases['out'])
pred = multilayer_perceptron(x, weights, biases)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits =pred))
optm = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
corr = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accr = tf.reduce_mean(tf.cast(corr, "float"))
#init = tf.global_variables_initializer()
init = tf.initialize_all_variables()
#init = tf.global_norm()
init
print("Function Ready")
training_epoch = 20
batch_size = 100
display_step = 4
sess = tf.Session()
sess.run(init)
for epoch in range(training_epoch):
avg_cost = 0.
total_batch = int(mnist.train.num_examples/batch_size)
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
feeds = {x : batch_xs, y : batch_ys}
sess.run(optm, feed_dict= feeds)
avg_cost += sess.run(cost, feed_dict=feeds)
avg_cost = avg_cost/ total_batch
if(epoch +1) % display_step == 0:
print("Epoch: %03d/%03d cost : %.9f" % (epoch, training_epoch, avg_cost))
feeds = {x : batch_xs, y : batch_ys}
training_acc =sess.run(accr, feed_dict=feeds)
print ("Training Accuracy: %.3f" %training_acc)
feeds = {x : mnist.test.images, y : mnist.test.labels}
test_acc = sess.run(accr, feed_dict=feeds)
print("Training Accuracy: %.3f" % training_acc)
print("Optimization Finished") | MiloSi/python_tensorflow_study | multi_layer_perceptron.py | multi_layer_perceptron.py | py | 2,867 | python | en | code | 0 | github-code | 36 |
9915851905 | class Inventory:
def __init__(self, __capacity: int):
self.__capacity = __capacity
self.items = []
def add_item(self, item: str):
if len(self.items) < self.__capacity:
self.items.append(item)
else:
return "not enough room in the inventory"
def get_capacity(self):
return self.__capacity
def __repr__(self):
left_capacity = self.__capacity - len(self.items)
items = ', '.join(self.items)
return f"Items: {items}.\nCapacity left: {left_capacity}"
# Part below is test code from example
inventory = Inventory(2)
inventory.add_item("potion")
inventory.add_item("sword")
print(inventory.add_item("bottle"))
print(inventory.get_capacity())
print(inventory)
#################################### TASK CONDITION ############################
'''
6. Inventory
Create a class Inventory. The __init__ method should accept only
the __capacity: int (private attribute) of the inventory.
You can read more about private attributes here. Each inventory should
also have an attribute called items - empty list, where all the items will be stored.
The class should also have 3 methods:
• add_item(item: str) - adds the item in the inventory if there is space for it. Otherwise, returns
"not enough room in the inventory"
• get_capacity() - returns the value of __capacity
• __repr__() - returns "Items: {items}.\nCapacity left: {left_capacity}".
The items should be separated by ", "
_______________________________________________
Example
Test Code (no input data in this task)
inventory = Inventory(2)
inventory.add_item("potion")
inventory.add_item("sword")
print(inventory.add_item("bottle"))
print(inventory.get_capacity())
print(inventory)
Output
not enough room in the inventory
2
Items: potion, sword.
Capacity left: 0
'''
| qceka88/Fundametals-Module | 22 Objects and Classes - Exercise/06inventory.py | 06inventory.py | py | 1,872 | python | en | code | 8 | github-code | 36 |
19826180596 | """
--- Day 9: Smoke Basin ---
https://adventofcode.com/2021/day/9
"""
from aoc import *
inputs = puzzle_input(9, 2021, sample=False).split('\n')
inputs = [[int(x) for x in i] for i in inputs]
def valid_point(x, y):
return 0 <= x <= len(inputs) - 1 and 0 <= y <= len(inputs[0]) - 1
def is_lowest(x, y):
return inputs[x][y] < min(
inputs[x + dx][y + dy] for (dx, dy) in [(1, 0), (-1, 0), (0, 1), (0, -1)]
if valid_point(x + dx, y + dy)
)
lowest = [[(x, y) for y in range(len(inputs[0])) if is_lowest(x, y)] for x in range(len(inputs))]
lowest = reduce(lambda a, b: a + b, lowest, []) # Flattened 2d array into 1d array
print(f'Part 1: {sum(inputs[x][y] + 1 for (x, y) in lowest)}')
def flood_fill(x, y):
amt = 1
for (dx, dy) in [(1, 0), (-1, 0), (0, 1), (0, -1)]:
pt = (x + dx, y + dy)
if valid_point(*pt) and 9 != inputs[x + dx][y + dy] > inputs[x][y]:
amt += flood_fill(*pt)
inputs[x+dx][y+dy] = 9 # set seen locations to 9 to prevent overlap
return amt
flooded = (flood_fill(x, y) for (x, y) in lowest)
print(f'Part 2: {mult(sorted(flooded, reverse=True)[:3])}')
| BricksAndPieces/AdventOfCode | 2021/days/day09.py | day09.py | py | 1,160 | python | en | code | 1 | github-code | 36 |
31219990233 | import logging
import os
import sys
from logging import Logger
from typing import Any, Dict, List
import datasets
import torch
import transformers
import wandb
from transformers import TrainingArguments
from dp_arguments import DataTrainingArguments, ModelArguments
LABEL_DICT = {}
LABEL_DICT['ner'] = ['CARDINAL', 'DATE', 'EVENT', 'FAC', 'GPE', 'LANGUAGE',
'LAW', 'LOC', 'MONEY', 'NORP', 'ORDINAL', 'ORG', 'PERCENT', 'PERSON', 'PRODUCT',
'QUANTITY', 'TIME', 'WORK_OF_ART']
LABEL_DICT['pos'] = ['$', "''", ',', '-LRB-', '-RRB-', '.', ':', 'ADD', 'AFX',
'CC', 'CD', 'DT', 'EX', 'FW', 'HYPH', 'IN', 'JJ', 'JJR', 'JJS', 'LS', 'MD',
'NFP', 'NN', 'NNP', 'NNPS', 'NNS', 'PDT', 'POS', 'PRP', 'PRP$', 'RB', 'RBR',
'RBS', 'RP', 'SYM', 'TO', 'UH', 'VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ',
'WDT', 'WP', 'WP$', 'WRB', '``']
LABEL_DICT['const'] = ['ADJP', 'ADVP', 'CONJP', 'EMBED', 'FRAG', 'INTJ', 'LST',
'META', 'NAC', 'NML', 'NP', 'NX', 'PP', 'PRN', 'PRT', 'QP', 'RRC', 'S', 'SBAR',
'SBARQ', 'SINV', 'SQ', 'TOP', 'UCP', 'VP', 'WHADJP', 'WHADVP', 'WHNP', 'WHPP',
'X']
LABEL_DICT['coref'] = ['False', 'True']
LABEL_DICT['srl'] = ['ARG0', 'ARG1', 'ARG2', 'ARG3', 'ARG4', 'ARG5', 'ARGA',
'ARGM-ADJ', 'ARGM-ADV', 'ARGM-CAU', 'ARGM-COM', 'ARGM-DIR', 'ARGM-DIS', 'ARGM-DSP',
'ARGM-EXT', 'ARGM-GOL', 'ARGM-LOC', 'ARGM-LVB', 'ARGM-MNR', 'ARGM-MOD', 'ARGM-NEG',
'ARGM-PNC', 'ARGM-PRD', 'ARGM-PRP', 'ARGM-PRR', 'ARGM-PRX', 'ARGM-REC', 'ARGM-TMP',
'C-ARG0', 'C-ARG1', 'C-ARG2', 'C-ARG3', 'C-ARG4', 'C-ARGM-ADJ', 'C-ARGM-ADV',
'C-ARGM-CAU', 'C-ARGM-COM', 'C-ARGM-DIR', 'C-ARGM-DIS', 'C-ARGM-DSP', 'C-ARGM-EXT',
'C-ARGM-LOC', 'C-ARGM-MNR', 'C-ARGM-MOD', 'C-ARGM-NEG', 'C-ARGM-PRP', 'C-ARGM-TMP',
'R-ARG0', 'R-ARG1', 'R-ARG2', 'R-ARG3', 'R-ARG4', 'R-ARG5', 'R-ARGM-ADV', 'R-ARGM-CAU',
'R-ARGM-COM', 'R-ARGM-DIR', 'R-ARGM-EXT', 'R-ARGM-GOL', 'R-ARGM-LOC', 'R-ARGM-MNR',
'R-ARGM-MOD', 'R-ARGM-PNC', 'R-ARGM-PRD', 'R-ARGM-PRP', 'R-ARGM-TMP']
for task in LABEL_DICT:
LABEL_DICT[task] = {label: "label" + str(i) for i, label in enumerate(LABEL_DICT[task])}
def convert_gate_to_mask(gates, num_of_heads=None):
if num_of_heads is not None:
head_mask = torch.zeros_like(gates)
current_heads_to_keep = gates.view(-1).sort(descending=True)[1]
current_heads_to_keep = current_heads_to_keep[:num_of_heads]
head_mask = head_mask.view(-1)
head_mask[current_heads_to_keep] = 1.0
head_mask = head_mask.view_as(gates)
else:
head_mask = (gates > 0.5).float()
return head_mask
class STEFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input, k):
threshold = input.sort(descending=True)[0][k]
return (input > threshold).float()
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
def transform_dict(config_dict: Dict, expand: bool = True):
"""
General function to transform any dictionary into wandb config acceptable format
(This is mostly due to datatypes that are not able to fit into YAML format which makes wandb angry)
The expand argument is used to expand iterables into dictionaries so that these configs can be used when compare across runs
"""
ret: Dict[str, Any] = {}
for k, v in config_dict.items():
if v is None or isinstance(v, (int, float, str)):
ret[k] = v
elif isinstance(v, (list, tuple, set)):
# Need to check if item in iterable is YAML-friendly
t = transform_dict(dict(enumerate(v)), expand)
# Transform back to iterable if expand is False
ret[k] = t if expand else [t[i] for i in range(len(v))]
elif isinstance(v, dict):
ret[k] = transform_dict(v, expand)
else:
# Transform to YAML-friendly (str) format
# Need to handle both Classes, Callables, Object Instances
# Custom Classes might not have great __repr__ so __name__ might be better in these cases
vname = v.__name__ if hasattr(v, '__name__') else v.__class__.__name__
ret[k] = f"{v.__module__}:{vname}"
return ret
def hardmax2(t):
idx = t.argmax(dim=-1).view(-1)
_t = 1
for i in t.shape[:-1]:
_t *= i
_range = torch.arange(_t, device=t.device)
step = t.shape[-1]
_range *= step
idx += _range
res = torch.zeros_like(t).view(-1)
res[idx] = 1.
return res.view(t.shape)
def hardmax(X):
M, _ = torch.max(X, dim=-1, keepdim=True)
A = (M == X).float()
A /= torch.sum(A, dim=-1, keepdim=True)
return A
# To test hardmax functions
# pre_x = [[-10, 2, 2, 2], [-100, 1, 0, 1]]
# X = torch.Tensor(pre_x)
# print(hardmax(X))
#
# for num_dims in range(1, 6):
# pre_x = [[-10, 2, 2, 2], [-100, 1, 0, 1]]
# for _ in range(num_dims - 1):
# pre_x = [pre_x]
# X = torch.Tensor(pre_x)
# print(X)
# print(hardmax2(X), '\n')
def bimodal_normal(x: torch.Tensor, mu: float, sigma: float) -> None:
"""
Inits the weights (in-place) with the bimodal normal distribution (symmetric).
:param x: input tensor
:param mu: mean of the normal distribution
:param sigma: standard deviation of the normal distribution
"""
x.normal_(mean=mu, std=sigma)
# size = x.size()
# mask = torch.randint(0, 2, size=size) * 2 - 1 # Randomly flip half the values to their opposite sign
# x *= mask
def rescale_norm(x: torch.Tensor, norm: float) -> torch.Tensor:
"""
Rescales the input tensor (in-place) to have the specified norm.
:param x: input tensor
:param norm: norm to rescale to
"""
return x / torch.norm(x) * norm
def get_total_gpus() -> int:
"""
Get total number of GPUs in the server
:return: number of GPUs
"""
import subprocess
sp = subprocess.Popen(['nvidia-smi', '--list-gpus'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out_str = sp.communicate()
out_list = out_str[0].decode("utf-8").split('\n')
# Subtract one as the last line is empty
num_gpus = len(out_list) - 1
print(f"... {num_gpus} GPUs found")
return num_gpus
def get_idle_gpus(num_gpus: int = 2) -> List[int]:
"""
Get idle GPUs in the server
:param num_gpus: requested number of GPUs
:return: list of idle GPU IDs
"""
import operator
import subprocess
total_gpus = get_total_gpus()
if num_gpus > total_gpus:
raise ValueError(f'Requested number of GPUs ({num_gpus}) exceeds available GPUs ({total_gpus})')
sp = subprocess.Popen(
['nvidia-smi', '--format=csv', '--query-gpu=utilization.gpu'], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
out_str = sp.communicate()
out_list = out_str[0].decode("utf-8").split('\n')
gpu_utilization = []
for i, gpu in enumerate(out_list[1:-1]):
utilization = int(gpu.split(' ')[0])
gpu_utilization.append((i, utilization))
sorted_gpus = sorted(gpu_utilization, key=operator.itemgetter(1))
idle_gpus = [gpu[0] for gpu in sorted_gpus[:num_gpus]]
return idle_gpus
def set_gpu_env(num_gpus: int = 1):
"""
Set GPU environments in the server
:param num_gpus: number of GPUs to use
:return: PyTorch device
"""
import os
import torch
idle_gpus = get_idle_gpus(num_gpus)
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(map(str, idle_gpus))
print(f"... Available GPUs {idle_gpus}")
# list available GPUs
gpu_list = [torch.cuda.get_device_name(i) for i in range(torch.cuda.device_count())]
print(f"... {len(gpu_list)} visible 'logical' GPUs: {gpu_list}")
# Set up GPUs for multi-GPU training
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"... using {device}")
return device
def compute_metrics(eval_pred):
accuracy, _ = eval_pred
accuracy = accuracy.sum(axis=0)
accuracy = accuracy[0] / accuracy[1]
return {"accuracy": accuracy}
def setup_logger(training_args: TrainingArguments) -> Logger:
logger: Logger = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}\n device: {training_args.device}\n n_gpu: {training_args.n_gpu} \n"
f"distributed training: {bool(training_args.local_rank != -1)}\n 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
return logger
def setup_wandb(training_args: TrainingArguments, model_args: ModelArguments, data_args: DataTrainingArguments) -> str:
serial = f"Epoch{int(training_args.num_train_epochs)}-LR{training_args.learning_rate}-"
if model_args.randomized:
serial += "Randomized-"
else:
serial += "Pretrained-"
if model_args.dev:
serial += "Dev"
else:
serial += "Test"
# WanDB setup
if model_args.use_mlp:
wandb_proj_name = f"ConvergedProbe-{data_args.task}-DPMLP-Dim{model_args.mlp_dim}-Layer{model_args.mlp_layers}"
else:
wandb_proj_name = f"ConvergedProbe-{data_args.task}-DPLR-Dim{model_args.mlp_dim}-Layer{model_args.mlp_layers}"
if model_args.onehot:
wandb_proj_name += "-OneHot"
os.environ["WANDB_PROJECT"] = wandb_proj_name
wandb.init(
project=wandb_proj_name,
name=serial,
)
return serial
def record_num_of_params(model, logger: Logger) -> None:
num_trainable_params = model.num_parameters(only_trainable=True)
num_total_params = model.num_parameters()
logger.info(f"Number of parameters to train (without adapters): {num_trainable_params}")
logger.info(f"Total number of parameters (without adapters): {num_total_params}")
wandb.run.summary["num_trainable_params"] = num_trainable_params
wandb.run.summary["num_total_params"] = num_total_params
| yileitu/probing-via-prompting | utils.py | utils.py | py | 9,989 | python | en | code | null | github-code | 36 |
15602167748 | import nukescripts
sn = nuke.selectedNode()
#make a panel that we use to apply presets
class buttonPanel(nukescripts.PythonPanel):
def knobChanged(self,knob):
#manually set presets because nuke sets other knobs to default and I don't want that
for knob_name, setting in nuke.getUserPresetKnobValues( nuke.selectedNode().Class() , knob.name()).items():
nuke.selectedNode()[knob_name].fromScript(setting)
self.ok()
panel = buttonPanel('Presets','presets')
panel.setMinimumSize(100, 100)
for preset in nuke.getUserPresets(sn):
python_button = nuke.PyScript_Knob(preset,preset)
python_button.setFlag(nuke.STARTLINE)
panel.addKnob(python_button)
confirm = panel.showModal() | KieranOwenShepherd/NukeTools | QuickScripts/NODE_quick_presets.py | NODE_quick_presets.py | py | 744 | python | en | code | 1 | github-code | 36 |
5411923106 | import requests
import json
from config import currency
class APIException(Exception):
pass
class Converter:
@staticmethod
def get_price(base, sym, amount):
try:
base_key = currency[base.lower()]
except KeyError:
raise APIException(f'Валюта {base} не найдена!')
try:
sym_key = currency[sym.lower()]
except KeyError:
raise APIException(f'Валюта {sym} не найдена!')
if base_key == sym_key:
raise APIException(f'Невозможно конвертировать одинаковые валюты: {base}!')
try:
int(amount)
except ValueError:
raise APIException(f'Неправильно ввели количество: {amount}')
url = f'https://api.apilayer.com/fixer/convert?to={sym_key}&from={base_key}&amount={amount}'
payload = {}
headers = {
"apikey": "6TESL4S9m67q6gZqaBFdf4CqRcAw8t8Z"
}
r = requests.request("GET", url, headers=headers, data=payload)
response = json.loads(r.content)
result = round(response['result'], 2)
return result | kopitski/SkillFactory | Exchange_bot/extensions.py | extensions.py | py | 1,243 | python | en | code | 0 | github-code | 36 |
24754731347 | import dash_bootstrap_components as dbc
from explainerdashboard.custom import *
class FeaturesImportanceTab(ExplainerComponent):
"""
A class for creating a 'Feature Impact' tab in an explainer dashboard.
"""
def __init__(self, explainer, title="Feature Impact", name=None,
hide_descriptions=True, hide_selector=True, hide_popout=True, **kwargs):
"""
Initialize a FeaturesImportanceTab instance.
Args:
- explainer (ClassifierExplainer): Explainer instance containing dataset and trained model.
- title (str): Title of the component.
- name (str): The name of the component.
- hide_descriptions (bool): Whether to display descriptions of the variables.
- hide_selector (bool): Whether to display a selector or hide it.
- **kwargs: Optional keyword arguments.
"""
# Call the parent constructor
super().__init__(explainer, title, name)
# Setting attributes
self.importances = ImportancesComponent(explainer, name=self.name+"0", title="Feature Impact", subtitle="Average impact on predicted dropout",
hide_selector=hide_selector, hide_descriptions=hide_descriptions, hide_popout=hide_popout,)
if not self.explainer.descriptions:
self.hide_descriptions=True
def layout(self):
"""
Layout of the component.
Returns:
- The layout of the component wrapped in a Bootstrap card.
"""
# Create a Bootstrap container
return dbc.Container([
dbc.Row([
# Display ImportancesComponent
dbc.Col([
self.importances.layout(),
]),
], class_name="mt-4"),
], fluid=True) | 0Kan0/Academic-Failure-Prediction-Tool | src/tabs/FeaturesImportancesTab.py | FeaturesImportancesTab.py | py | 1,837 | python | en | code | 0 | github-code | 36 |
18825417053 | import random
def shuffle(A):
"""Fisher-Yates shuffle."""
for i in range(len(A)):
j = random.randint(i, len(A) - 1)
A[i], A[j] = A[j], A[i]
def random_sample(m, n):
"""Returns a random sample of m integers from [1,...,n] as a list."""
if m == 0:
return []
else:
S = random_sample(m - 1, n - 1)
i = random.randint(1, n)
if i in S:
S.append(n)
else:
S.append(i)
return S
# if __name__ == '__main__':
# A = list(range(10))
# for i in range(10):
# shuffle(A)
# print(A)
# for i in range(10):
# print(random_sample(10, 100))
| tzyl/algorithms-python | algorithms/random/randomize.py | randomize.py | py | 701 | python | en | code | 2 | github-code | 36 |
34445125382 | #!/usr/bin/env python
import os
import sys
import fileinput
import datetime
import logging
logFile = 'setup.log'
logging.basicConfig( filename = logFile,filemode = 'w', level = logging.INFO,format = '%(asctime)s - %(levelname)s: %(message)s', datefmt = '%m/%d/%Y %I:%M:%S %p' )
#import poaMenu
def getVarFromFile(fileName):
import imp
f = open(fileName)
global data
data = imp.load_source('data', '', f)
f.close()
def replace(oldStr,newStr,fileName):
for i, line in enumerate(fileinput.input(fileName, inplace=1)):
sys.stdout.write(line.replace(oldStr, newStr))
# Get all params from config.txt (later we will also add a menu)
getVarFromFile('config.txt')
logging.info('Updating your VM')
os.system ('sudo apt-get update && sudo apt-get -y upgrade')
logging.info('Installing python-pip')
os.system ('sudo apt-get install python-pip')
logging.info('Installing git')
os.system ('sudo apt install git')
## Install ansible
logging.info('Installing Ansible')
os.system ('sudo pip install ansible')
## Install packages
logging.info('Installing boto and boto3')
os.system ('sudo pip install boto')
os.system ('sudo pip install boto3')
## Make sure you have latest AWS CLI
logging.info('Making sure you have latest AWS CLI')
os.system ('pip install awscli --upgrade --user')
## Download and configure playbook
logging.info('Downloading Ansible playbook')
os.chdir ('/home/ubuntu')
os.system ('git clone https://github.com/poanetwork/deployment-playbooks.git')
os.chdir ('deployment-playbooks')
# for core mainnet
# os.system('git checkout core')
# OR for sokol testnet
#os.system ('git checkout sokol')
text = "Selecting correct branch based on specified network type: [" + data.networkType + "]"
logging.info(text)
cmd = "git checkout " + data.networkType
os.system (cmd)
# check that you ended up on a correct branch (look where the `*` is)
os.system ('git branch')
## Prepare SSH keys (asummes you already have SSH keys for remote server)
logging.info('Coping SSH keys')
os.system ('cat ~/.ssh/id_rsa.pub > files/admins.pub')
#os.system ('cp files/admins.pub files/ssh_validator.pub')
cmd = "cp files/admins.pub files/ssh_" +data.nodeType+ ".pub"
#os.system ('cmd')
text = 'Configuring based on node type: [' +data.nodeType+ ']'
logging.info(text)
#os.system ('cat group_vars/all.network group_vars/validator.example > group_vars/all')
cmd = 'cat group_vars/all.network group_vars/'+data.nodeType+'.example > group_vars/all'
os.system (cmd)
## Start replacing params (This cloud be improved with foreach loops and key/value match and replace)
#scriptpath = os.path.dirname(__file__)
#scriptpath = "/home/ubuntu"
#fileName = os.path.join(scriptpath, 'deployment-playbooks/group_vars/all')
os.chdir ('/home/ubuntu/deployment-playbooks/group_vars')
logging.info("Updating files with your information...")
#testFile=open(filename)
fileName = "all"
##------------------------------------------------------------------
oldStr = 'access_key: "INSERT KEY HERE"'
newStr = 'access_key: "' + data.access_key + '"'
replace(oldStr,newStr,fileName)
##------------------------------------------------------------------
oldStr = 'secret_key: "INSERT SECRET HERE"'
newStr = 'secret_key: "' + data.secret_key + '"'
replace(oldStr,newStr,fileName)
##------------------------------------------------------------------
oldStr = 'awskeypair_name: "keypairname"'
newStr = 'awskeypair_name: "' + data.awskeypair_name + '"'
replace(oldStr,newStr,fileName)
##------------------------------------------------------------------
oldStr = 'NODE_FULLNAME: "INSERT NODENAME"'
newStr = 'NODE_FULLNAME: "' + data.NODE_FULLNAME + '"'
replace(oldStr,newStr,fileName)
##------------------------------------------------------------------
oldStr = 'NODE_ADMIN_EMAIL: "INSERT@EMAIL"'
newStr = 'NODE_ADMIN_EMAIL: "' + data.NODE_ADMIN_EMAIL + '"'
replace(oldStr,newStr,fileName)
##------------------------------------------------------------------
oldStr = 'NETSTATS_SERVER: "INSERT FULL URL"'
newStr = 'NETSTATS_SERVER: "' + data.NETSTATS_SERVER + '"'
replace(oldStr,newStr,fileName)
##------------------------------------------------------------------
oldStr = 'NETSTATS_SECRET: "INSERT SECRET"'
newStr = 'NETSTATS_SECRET: "' + data.NETSTATS_SECRET + '"'
replace(oldStr,newStr,fileName)
##------------------------------------------------------------------
##------------------------------------------------------------------
oldStr = 'MINING_KEYFILE: \'INSERT HERE\''
newStr = 'MINING_KEYFILE: \'' + data.MINING_KEYFILE + '\''
replace(oldStr,newStr,fileName)
##------------------------------------------------------------------
oldStr = 'MINING_ADDRESS: "INSERT HERE"'
newStr = 'MINING_ADDRESS: "' + data.MINING_ADDRESS + '"'
replace(oldStr,newStr,fileName)
##------------------------------------------------------------------
oldStr = 'MINING_KEYPASS: "INSERT HERE"'
newStr = 'MINING_KEYPASS: "' + data.MINING_KEYPASS + '"'
replace(oldStr,newStr,fileName)
##------------------------------------------------------------------
oldStr = 'vpc_subnet_id = "subnet-ID-number"'
newStr = 'vpc_subnet_id: ' + data.vpc_subnet_id
replace(oldStr,newStr,fileName)
##------------------------------------------------------------------
oldStr = 'allow_validator_ssh: true'
newStr = 'allow_validator_ssh: ' + data.allow_validator_ssh
replace(oldStr,newStr,fileName)
##------------------------------------------------------------------
oldStr = 'allow_validator_p2p: true'
newStr = 'allow_validator_p2p: ' + data.allow_validator_p2p
replace(oldStr,newStr,fileName)
##------------------------------------------------------------------
oldStr = 'associate_validator_elastic_ip: false'
newStr = 'associate_validator_elastic_ip: ' + data.associate_validator_elastic_ip
replace(oldStr,newStr,fileName)
##------------------------------------------------------------------
## Create hosts file and add server IP
## We will need to add params here to specify correct node type
logging.info('Creating HOSTS file')
os.chdir ('/home/ubuntu/deployment-playbooks')
#cmd = "echo [validator] > hosts"
cmd = "echo ["+data.nodeType+"] > hosts"
os.system(cmd)
cmd = "echo " + data.SERVER_IP + " >> hosts"
os.system(cmd)
# run this script to configure the instance (might want to use paramiko - ssh via python)
logging.info('Running Ansible playbook and deploying')
os.system ('ansible-playbook -i hosts site.yml')
print ("Done\n==========\n")
logging.info('Done!')
## End of script
#### Additional items for improvements:
## Menu:
###-------------------------------------
#print ("Enter AWS access_key:")
#access_key = input( "> " )
#print ("Enter your FULL NAME. \n This would be visible to other members of the network")
#NODE_FULLNAME = input( "> " )
#print ("Enter your email. \n This would be visible to other members of the network")
#NODE_ADMIN_EMAIL = input( "> " )
#print ("Enter NETSTATS_SERVER - this should be a url provided to you by the Master of Ceremony")
#NETSTATS_SERVER = input( "> " )
#print ("Enter NETSTATS_SECRET - this should be a secret code provided to you by the Master of Ceremony")
#NETSTATS_SECRET = input( "> " )
#print ("Enter MINING_KEYFILE - this should be a secret code provided to you by the Master of Ceremony")
#MINING_KEYFILE = input( "> " )
## Also we could add a function to generate SSH keys and upload to remote server
# ssh-keygen -t rsa -b 4096 -C "your_email@example.com" | maratP/poa-devops | poa-node-setup.py | poa-node-setup.py | py | 7,598 | python | en | code | 1 | github-code | 36 |
5045789896 | #!/usr/bin/python
# coding:utf8
#
# 配置文件
#
config = {}
config['url'] = 'https://127.0.0.1:15789'
# token,验证服务端与客户端传输数据是否匹配
config['token'] = 'hahaha'
# 实现加密混淆的字符串,长度应该是16或32或64
config['CryptoKey'] = 'jikjhg457hgdetyh'
# 主机名
config['hostname'] = 'mzs-mac'
| mzs0207/automationTool | Client/config.py | config.py | py | 345 | python | en | code | 3 | github-code | 36 |
17498596227 | import time
import tkinter as tk
import tkinter.ttk as ttk
import tkinter.messagebox as tkmsg
from ttkthemes import ThemedTk
from PIL import ImageTk, Image
from geoCosiCorr3D.geoImageCorrelation.geoCorr_utils import splitcall, project_path, clamp
import geoCosiCorr3D.geoImageCorrelation.geoCorr_utils as utils
def run(window_constructor):
"""The entrypoint for this library. Creates a tk window and executes the mainloop.
Args:
constructor (Window): The window constructor for the main window of the program.
"""
root = get_root()
window_constructor(None, root)
root.mainloop()
def get_root():
"""Initialize tk and styles.
Returns:
ThemedTk: the root of the application.
"""
root = ThemedTk(theme='yaru')
style = ttk.Style(root)
# text box with red text
style.element_create("plain.field", 'from', 'yaru', 'Entry.field')
style.layout("Red.TEntry",
[('Entry.plain.field', {'children': [(
'Entry.background', {'children': [(
'Entry.padding', {'children': [(
'Entry.textarea', {'sticky': 'nswe'})],
'sticky': 'nswe'})], 'sticky': 'nswe'})],
'border': '2', 'sticky': 'nswe'})])
style.configure("Red.TEntry", foreground='red', fieldbackground='#f0dfdf')
# progress bar with text over it
style.layout('text.Horizontal.TProgressbar', [
('Horizontal.Progressbar.trough', {'sticky': 'ew', 'children':
[("Horizontal.Progressbar.pbar", {'side': 'left', 'sticky': 'ns'})]}),
('Horizontal.Progressbar.label', {'sticky': 'nswe'})])
style.configure('text.Horizontal.TProgressbar', anchor='center') # , foreground='orange')
style.layout('text.Vertical.TProgressbar', [
('Vertical.Progressbar.trough', {'sticky': 'ns', 'children':
[("Vertical.Progressbar.pbar", {'side': 'top', 'sticky': 'ew'})]}),
('Vertical.Progressbar.label', {'sticky': 'nswe'})])
style.configure('text.Vertical.TProgressbar', anchor='center') # , foreground='orange')
# dark colored frame used as a separator
style.configure('Dark.TFrame', background='DarkGray')
style.layout("DEBUGFRAME", [('Frame', {})])
style.configure('DEBUGFRAME', background='red')
return root
def findInFrame(frame, name):
"""Searches for an object named `name` recursively in `frame`'s children."""
for _, obj in frame.children.items():
if str(obj) == name: return obj
if isinstance(obj, ttk.Frame):
o = findInFrame(obj, name)
if o: return o
def reset_option_menu(menu, menu_var, options, index=None):
"""reset the values in the option menu
if index is given, set the value of the menu to
the option at the given index
"""
start_value = menu_var.get()
menu = menu["menu"]
menu.delete(0, "end")
for string in options:
menu.add_command(label=string, command=lambda value=string: menu_var.set(value))
if index is not None:
menu_var.set(options[index])
elif start_value not in options:
menu_var.set(options[0])
@splitcall
def disable_button(button):
"""Creates a callback that disables the specified button."""
if hasattr(button, 'info_tip'):
button._old_message = button.info_tip.text
button.info_tip.text = 'Not all parameter requirements\nto press this button have been met.'
button['state'] = tk.DISABLED
@splitcall
def enable_button(button):
"""Creates a callback that enables the specified button."""
if hasattr(button, '_old_message'):
button.info_tip.text = button._old_message
button['state'] = tk.NORMAL
# _entries holds the output _entries in form {'prompt.paramName': entry}
_entries = {}
get_entries = lambda: _entries
entries_stack = []
invalids_stack = []
def step_in():
"""Saving option selection (aka pressing Okay instead of Cancel) is done as a stack.
This adds one more layer to the stack."""
global _entries, invalids
entries_stack.append(_entries)
_entries = _entries.copy()
invalids_stack.append(invalids)
invalids = {}
def step_out(save):
"""This moves back one layer from the stack.
Args:
save (bool): Save changes into _entries if True.
"""
global _entries, invalids
current = _entries
_entries = entries_stack.pop(-1)
invalids = invalids_stack.pop(-1)
if not save: return
for name, entry in current.items():
if name in _entries:
_entries[name].set(entry.get())
else:
_entries[name] = SimpleContainer(entry.get())
# map of widget to error message tooltip
invalids = {}
def addInvalid(entry, message, invalidated):
"""Adds an entry to the invalids list, and calles the invalidated event if this is the first invalid item."""
if len(invalids) == 0:
invalidated()
if entry not in invalids:
tip = entry.info_tip
tip.text = message
invalids[entry] = None
invalids[entry] = tip
entry.configure(style='Red.TEntry')
def removeInvalid(entry, validated):
"""Removes an entry to the invalids list, and calles the validated event if this was the last invalid item."""
if entry in invalids:
invalids[entry].reset_msg()
del invalids[entry]
if len(invalids) == 0: validated()
entry.configure(style='TEntry')
class SimpleContainer:
"""A simple container holds a value with a getter or setter. Exists to be used interchangeably with IntVar, StrVar, etc."""
def __init__(self, value): self.value = value
def set(self, value): self.value = value
def get(self): return self.value
def __str__(self): return f"SimpleContainer: {self.value}"
__repr__ = __str__
def open_local_image(path):
"""Returns an openened ImageTk relative to the path of the current script."""
image = Image.open(project_path(path))
return ImageTk.PhotoImage(image)
# tkinter objects
class ToolTip:
"""
Tooltip recipe from
http://www.voidspace.org.uk/python/weblog/arch_d7_2006_07_01.shtml#e387
"""
def __init__(self, widget, text):
self.widget = widget
self.tipwindow = None
self.id = None
self.x = self.y = 0
self.text = text
self.eid = self.widget.bind('<Enter>', self.showtip, add='+')
self.lid = self.widget.bind('<Leave>', self.hidetip, add='+')
def showtip(self, _):
"""Display text in tooltip window."""
if self.tipwindow or not self.text:
return
x, y, _, _ = self.widget.bbox("insert")
x = x + self.widget.winfo_rootx() + self.widget.winfo_width()
y = y + self.widget.winfo_rooty()
self.tipwindow = tw = tk.Toplevel(self.widget)
tw.wm_overrideredirect(1)
tw.wm_geometry("+%d+%d" % (x, y))
try:
# For Mac OS
tw.tk.call("::tk::unsupported::MacWindowStyle",
"style", tw._w,
"help", "noActivates")
except tk.TclError:
pass
label = tk.Label(tw, text=self.text, justify=tk.LEFT,
relief=tk.SOLID, borderwidth=1)
label.pack(ipadx=1)
def hidetip(self, _):
tw = self.tipwindow
self.tipwindow = None
if tw:
tw.destroy()
def destroy(self):
self.widget.unbind('<Enter>', self.eid)
self.widget.unbind('<Leave>', self.lid)
if self.tipwindow: self.tipwindow.destroy()
class TimedToolTip:
"""
ToolTip that appears on hover after a certain amount of time.
Tooltip recipe from
https://stackoverflow.com/a/36221216
"""
def __init__(self, widget, text='widget info'):
self.waittime = 500 # miliseconds
self.wraplength = 999 # pixels
self.widget = widget
self.text = text
self.start_text = text
self.enterid = self.widget.bind("<Enter>", self.enter, add='+')
self.leaveid = self.widget.bind("<Leave>", self.leave, add='+')
self.leavepid = self.widget.bind("<ButtonPress>", self.leave, add='+')
self.id = None
self.tw = None
def enter(self, event=None):
self.schedule()
def leave(self, event=None):
self.unschedule()
self.hidetip()
def schedule(self):
self.unschedule()
self.id = self.widget.after(self.waittime, self.showtip)
def unschedule(self):
id = self.id
self.id = None
if id:
self.widget.after_cancel(id)
def showtip(self, event=None):
if not self.text: return
line_count = self.text.count('\n') + 1
x = y = 0
x, y, cx, cy = self.widget.bbox("insert")
x += self.widget.winfo_rootx() + 5
y += self.widget.winfo_rooty() - 15 * line_count - 10
# creates a toplevel window
self.tw = tk.Toplevel(self.widget)
# Leaves only the label and removes the app window
self.tw.wm_overrideredirect(True)
self.tw.wm_geometry("+%d+%d" % (x, y))
label = tk.Label(self.tw, text=self.text, justify='left',
background="#ffffff", relief='solid', borderwidth=1,
wraplength=self.wraplength)
label.pack(ipadx=1)
def hidetip(self):
tw = self.tw
self.tw = None
if tw:
tw.destroy()
def reset_msg(self):
self.text = self.start_text
def destroy(self):
if self.tw: self.tw.destroy()
self.widget.unbind("<Enter>", self.enterid)
self.widget.unbind("<Leave>", self.leaveid)
self.widget.unbind("<ButtonPress>", self.leavepid)
# Modified from https://gist.github.com/novel-yet-trivial/3eddfce704db3082e38c84664fc1fdf8
class VerticalScrolledFrame:
"""
A vertically scrolled Frame that can be treated like any other Frame
ie it needs a master and layout and it can be a master.
:width:, :height:, :bg: are passed to the underlying Canvas
:bg: and all other keyword arguments are passed to the inner Frame
note that a widget layed out in this frame will have a self.master 3 layers deep,
(outer Frame, Canvas, inner Frame) so
if you subclass this there is no built in way for the children to access it.
You need to provide the controller separately.
"""
def __init__(self, master, **kwargs):
width = kwargs.pop('width', None)
height = kwargs.pop('height', None)
self.outer = ttk.Frame(master, **kwargs)
self.vsb = ttk.Scrollbar(self.outer, orient=tk.VERTICAL)
self.vsb.pack(fill=tk.Y, side=tk.RIGHT)
self.canvas = tk.Canvas(self.outer, highlightthickness=0, width=width, height=height, background='#f5f6f7')
self.canvas.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
self.canvas['yscrollcommand'] = self.vsb.set
# mouse scroll does not seem to work with just "bind"; You have
# to use "bind_all". Therefore to use multiple windows you have
# to bind_all in the current widget
self.canvas.bind("<Enter>", self._bind_mouse)
self.canvas.bind("<Leave>", self._unbind_mouse)
self.vsb['command'] = self.canvas.yview
self.inner = ttk.Frame(self.canvas)
# pack the inner Frame into the Canvas with the topleft corner 4 pixels offset
self.wid = self.canvas.create_window(0, 0, window=self.inner, anchor='nw')
self.canvas.bind('<Configure>', self._on_canvas_configure)
self.inner.bind("<Configure>", self._on_frame_configure)
self.outer_attr = set(dir(ttk.Widget))
def __getattr__(self, item):
if item in self.outer_attr:
# geometry attributes etc (eg pack, destroy, tkraise) are passed on to self.outer
return getattr(self.outer, item)
else:
# all other attributes (_w, children, etc) are passed to self.inner
return getattr(self.inner, item)
def _on_canvas_configure(self, event):
width = event.width
self.canvas.itemconfig(self.wid, width=width)
def _on_frame_configure(self, event=None):
x1, y1, x2, y2 = self.canvas.bbox("all")
height = self.canvas.winfo_height()
self.canvas.config(scrollregion=(0, 0, x2, max(y2, height)))
def _bind_mouse(self, event=None):
self.canvas.bind_all("<4>", self._on_mousewheel)
self.canvas.bind_all("<5>", self._on_mousewheel)
self.canvas.bind_all("<MouseWheel>", self._on_mousewheel)
def _unbind_mouse(self, event=None):
self.canvas.unbind_all("<4>")
self.canvas.unbind_all("<5>")
self.canvas.unbind_all("<MouseWheel>")
def _on_mousewheel(self, event):
"""Linux uses event.num; Windows / Mac uses event.delta"""
if event.num == 4 or event.delta > 0:
self.canvas.yview_scroll(-1, "units")
elif event.num == 5 or event.delta < 0:
self.canvas.yview_scroll(1, "units")
def __str__(self):
return str(self.outer)
class Window:
"""Base class of all my windows that allows it to load child windows and contains helper functions for creating forms"""
def __init__(self, parent, top_level, title, multichild=False):
"""Creates a base frame for a window.
Args:
top_level (tk.TopLevel): The toplevel or window to draw onto.
title (str): The title of the window.
multichild (bool, optional): If true, this window can have multiple children. If false, this window supports the _entries stack for input parameters. Defaults to False.
"""
root_frame = ttk.Frame(top_level)
root_frame.pack(fill='both', expand=True)
self.root = root_frame
self.top_level = top_level
top_level.title(title)
self.children = None
self.parent = parent
if multichild: self.children = []
def new_window(self, constructor, *params, on_close=None, **kargs):
"""Generates a new window using the specified `Window`-descendant constructor, passing in the specified params and kargs.
Args:
constructor (constructor): The type of the specified window.
on_close (f()): An additional callback to run when the window closes.
Returns:
Window: The child.
"""
child = tk.Toplevel(self.root)
if isinstance(self.children, list):
child = constructor(self, child, *params, **kargs)
self.children.append(child)
else:
step_in()
child = constructor(self, child, *params, **kargs)
self.top_level.withdraw()
self.children = child
child.close_callback = on_close
child.top_level.protocol("WM_DELETE_WINDOW", self.child_close(child, False))
return child
def embed_window(self, master, constructor, *params, **kargs):
"""Embeds a Window onto master with specified parameters.
Args:
constructor: The Window constructor.
Returns:
Window: The embedded window.
"""
if not hasattr(master, 'title'): master.title = lambda _: None
window = constructor(self, master, *params, **kargs)
return window
@splitcall
def child_close(self, child, to_save):
"""Closes the current child window and bring this one back into view."""
if child.close_callback:
child.close_callback()
if isinstance(self.children, list):
self.children.remove(child)
child.top_level.destroy()
return
step_out(to_save)
self.top_level.deiconify()
self.children.top_level.destroy()
self.children = None
@splitcall
def back(self, to_save):
"""Go back, aka close the current window and return to the previous"""
self.parent.child_close(self.parent.children, to_save)()
def load_template(self, text):
"""Creates the standard template with self.params_f and self.buttons_f with an ok and cancel button."""
self.params_f = ttk.LabelFrame(self.root, text=text)
self.params_f.pack(padx=5, pady=5, fill="both")
buttons_f = ttk.Frame(self.root)
buttons_f.pack(fill=tk.X, ipadx=5, ipady=5)
ok_b = ttk.Button(buttons_f, text="Ok", command=self.back(True))
ok_b.pack(side=tk.RIGHT, padx=10, ipadx=10)
self.register_on_invalidate(disable_button(ok_b), self.params_f)
self.register_on_validate(enable_button(ok_b), self.params_f)
ttk.Button(buttons_f, text="Cancel", command=self.back(False)).pack(side=tk.RIGHT, ipadx=10)
def make_frame(self, **kargs):
"""Create and pack a basic frame with some default values set."""
grid = 'row' in kargs or 'column' in kargs
kargs.setdefault('master', self.root)
kargs.setdefault('text', None)
if grid:
kargs.setdefault('sticky', 'news')
else:
kargs.setdefault('fill', 'both')
kargs.setdefault('padx', 5)
kargs.setdefault('pady', 5)
f = ttk.LabelFrame(kargs['master'], text=kargs['text']) if kargs['text'] else ttk.Frame(kargs['master'])
del kargs['master']
del kargs['text']
if grid:
f.grid(**kargs)
else:
f.pack(**kargs)
return f
def make_run_bar(self, command, param_f, run_tip, start_msg, complete_msg, horizontal=True):
"""Create a run bar that runs (on a separate thread) a command.
Args:
command ((0 -> ())): The callback to run on a separate thread upon press of the 'Run' button.
param_f (Frame): The 'Run' button will only be enabled when this frame is validated.
run_tip (str): The message on the run button's hover.
start_msg (str): The message to show when run is pressed.
complete_msg (str): The message to show when command is completed.
"""
def prog(val):
val = int(clamp(val, 1, 100))
run_p['value'] = val
ttk.Style().configure(style, text=str(val) + ' %')
# if val == 100:
# tkmsg.showinfo('Complete', message=complete_msg)
def run():
command(callback=thread_callback)
ttk.Style().configure(style, text='0 %')
tkmsg.showinfo('Starting...', start_msg)
def thread_callback():
time.sleep(0.1)
tkmsg.showinfo('Complete', message=complete_msg)
utils.__mark_progress__ = prog
if horizontal:
b_side, p_side = tk.RIGHT, tk.LEFT
fill = 'x'
padx = ipadx = 10
pady = ipady = 0
style = 'text.Horizontal.TProgressbar'
orient = 'horizontal'
else:
b_side, p_side = tk.BOTTOM, tk.TOP
fill = 'y'
padx = ipadx = 0
pady = ipady = 10
style = 'text.Vertical.TProgressbar'
orient = 'vertical'
self.runbar_f = self.make_frame(expand=1, padx=(0, 5), pady=(0, 5))
run_b = ttk.Button(self.runbar_f, text="Run", command=run)
run_b.pack(side=b_side, padx=padx, ipadx=ipadx, pady=pady, ipady=ipady)
run_b.info_tip = TimedToolTip(run_b, text=run_tip)
run_p = ttk.Progressbar(self.runbar_f, style=style, orient=orient)
ttk.Style().configure(style, text='')
run_p.pack(side=p_side, padx=padx, ipadx=ipadx, pady=pady, ipady=ipady, fill=fill, expand=True)
run_p['value'] = 0
self.register_on_invalidate(disable_button(run_b), param_f)
self.register_on_validate(enable_button(run_b), param_f)
def register_on_validate(self, function, frame):
"""Adds an additional function to the end of the `validated` event handler"""
if hasattr(frame, 'on_validated'):
old = frame.on_invalidated
def chained():
old()
function()
else:
chained = function
frame.on_validated = chained
def register_on_invalidate(self, function, frame):
"""Adds an additional function to the end of the `invalidated` event handler"""
if hasattr(frame, 'on_invalidated'):
old = frame.on_invalidated
def chained():
old()
function()
else:
chained = function
frame.on_invalidated = chained
def redirect_validation(self, from_frame, to_frame):
"""Redirect validation events from from_frame to to_frame"""
self.register_on_validate(lambda *params, **kargs: to_frame.on_validated(*params, **kargs), from_frame)
self.register_on_invalidate(lambda *params, **kargs: to_frame.on_invalidated(*params, **kargs), from_frame)
| SaifAati/Geospatial-COSICorr3D | geoCosiCorr3D/geoCosiCorr3D_GUI/geoImageCorrelation_GUI/tk_utils.py | tk_utils.py | py | 20,960 | python | en | code | 37 | github-code | 36 |
70389328743 |
import constants as vals
import funcs as fun
def findingDepth(rpt, rpt2, tipThumb,tipThumb2, kThumb,kThumb2, tipIndex,tipIndex2,kIndex,kIndex2):
focal=1380 #pixels, I found this online
disparityTipThumb=fun.distanceVec([rpt[tipThumb][0]],[rpt[tipThumb][1]],\
[rpt2[tipThumb2][0]],[rpt2[tipThumb2][1]])[0]
disparityKThumb=fun.distanceVec([rpt[kThumb][0]],[rpt[kThumb][1]],\
[rpt2[kThumb2][0]],[rpt2[kThumb2][1]])[0]
disparityTipIndex=fun.distanceVec([rpt[tipIndex][0]],[rpt[tipIndex][1]],\
[rpt2[tipIndex2][0]],[rpt2[tipIndex2][1]])[0]
disparityKIndex=fun.distanceVec([rpt[kIndex][0]],[rpt[kIndex][1]],\
[rpt2[kIndex2][0]], [rpt2[kIndex2][1]])[0]
disparityList=[disparityTipThumb,disparityKThumb,disparityTipIndex,disparityKIndex]
depthList=[]
for i in xrange(len(disparityList)):
if disparityList[i]<1:
depth=0
else:
depth=1.0*focal/disparityList[i]*13.4 #cm, the b value
vals.depthBuff[i].put(depth)
#Some notes about depth measurement
# Z = f * b/d
#where f is the focal length, b is the baseline, or distance between the cameras, and d the disparity between corresponding points.
#1cm=37.79527559055 pixels
#35cm=1322.834645669pixels
#6cm= 226.7716535433pixels
#35cm=f*6cm/disparity
#f=35cm/6cm*disparity
#lower >fingers >upper
def checkAllInBox():
for i in xrange(4):
if vals.depthBuff[i].mean()<vals.boxLimit or vals.depthBuff[i].mean()>vals.boxLimitBottom:#if a single one is out of box, false
return False;
return True;
def checkIndexInBox():
return vals.depthBuff[2].mean()>= vals.boxLimit
#return vals.depthBuff[2].mean()<vals.boxLimit or vals.depthBuff[2].mean()>vals.boxLimitBottom
def checkAllAboveBox():
for i in xrange(4):
if vals.depthBuff[i].mean()>=vals.boxLimit: #if a single one is in box, false
return False;
return True;
def meanDepth():
averageDepth=0
for i in xrange(4):
averageDepth+=vals.depthBuff[i].mean()
averageDepth=averageDepth/4
return averageDepth
"""
The following is code so that nothing will function if the depth of
each LED is below 25. I chose 25 as the magic number as around that
depth, the LEDs tend to go crazy because the Wiimotes cannot track them
accurately. I haven't tested this value for other setups but mine.
"""
def checkAllAboveTwentyFive():
for i in xrange(4):
if vals.depthBuff[i].mean()<=25: #if a single one is <25, false
return False;
return True; | julian-ramos/fingers | doDepth.py | doDepth.py | py | 2,671 | python | en | code | 0 | github-code | 36 |
14086949836 | import streamlit as st
from pages.common.queries import run_query
from pages.common.presenter import display_result
from pages.common.utils import convert_template
from pages.common.plotting import get_figure
import logging
logging.getLogger("pybatfish").setLevel(logging.WARNING)
APP = """This is a Streamlit app that enables the user to run network analysis
queries using [Batfish](https://www.batfish.org/).
The app allows the user to select a Batfish question by category and name.
The app runs the selected question and displays the results in a table.
All answered questions are saved.
Find more information about Batfish questions
[here](https://batfish.readthedocs.io/en/latest/index.html).
"""
# Start Page Here
st.set_page_config(layout="wide")
st.header("Network Analysis")
# st.markdown(APP)
# Get selected questions
qlist = st.session_state.get("qlist")
if "activesnap" in st.session_state:
st.subheader(f"Snapshot: {st.session_state.activesnap['name']}")
# Run selected questions
if qlist:
qs = convert_template(qlist)
q_names = [q["name"] for q in qs]
tabs = st.tabs(q_names)
for idx, tab in enumerate(tabs):
with tab:
if qs[idx].get("options"):
st.write(f"**Options:** {qs[idx]['options']}")
answer = run_query(qs[idx])
display_result(qs[idx]["fun"], answer)
# Plot some answers
if qs[idx]["fun"] in ["layer3Edges", "userProvidedLayer1Edges"]:
_, col, _ = st.columns([1, 2, 1])
fig = get_figure(answer.frame())
col.pyplot(fig)
else:
st.warning("Select some questions to proceed.")
else:
st.warning("Please add a snapshot to continue.")
| martimy/Bat-Q | pages/2_Analysis.py | 2_Analysis.py | py | 1,801 | python | en | code | 1 | github-code | 36 |
69930126823 | import pygame
import BulletClass
from EnemiesControllerClass import EnemiesController
class PlayerShip:
ShipSpeed = 7
# Static variable containing all the bullets the ship has fired
BULLET_RESOURCE = "../Resources/Images/bullet.png"
Bullets = []
def __init__(self, imageLocation, screenSize):
self.shipSprite = pygame.image.load(imageLocation)
self.shipSprite = pygame.transform.scale(self.shipSprite, (50, 50))
self.shipRect = self.shipSprite.get_rect()
self.shipRect.x = 320
self.shipRect.y = 400
self.bulletSound = pygame.mixer.Sound("../Resources/Sounds/bullet_sound.wav")
self.screenSize = screenSize
def move(self, x, y):
# Next two if statements constrain the ship to within the bounds of the screen
if x < 0 and (self.shipRect.x + x) < 0:
x = PlayerShip.ShipSpeed
if x > 0 and ((self.shipRect.x+self.shipRect.w) + x) > self.screenSize[0]:
x = -PlayerShip.ShipSpeed
speed = [x, y]
self.shipRect = self.shipRect.move(speed)
def display(self, screen):
screen.blit(self.shipSprite, self.shipRect)
def shoot(self):
# Create a bullet
xLoc = self.shipRect.x + self.shipRect.w/4
yLoc = self.shipRect.y
bullet = BulletClass.Bullet(xLoc, yLoc, PlayerShip.BULLET_RESOURCE, "player")
PlayerShip.Bullets.append(bullet)
self.bulletSound.play()
def moveBulletsAndDisplay(self, screen):
collided = self.checkCollisionForEnemyBullets()
if (collided):
return True
for bullet in PlayerShip.Bullets:
# If the bullet is off screen, remove it from the bullet list
if bullet.bulletRect.y < 0:
PlayerShip.Bullets.remove(bullet)
else: # Otherwise proceed as normal
bullet.move("player")
bullet.display(screen)
def checkCollisionForEnemyBullets(self):
for enemy in EnemiesController.EnemyList:
# Check for actual collision with the enemy
if (enemy.enemyRect.colliderect(self.shipRect)):
EnemiesController.EnemyList.remove(enemy)
print("You lose sucka!")
return True
# Check for a collision with that enemies bullets
for bullet in enemy.bullets:
if (bullet.bulletRect.colliderect(self.shipRect)):
enemy.bullets.remove(bullet)
print("You lose sucka!")
return True
| ErikTillberg/Space_Invaders_Clone | Game/PlayerShipClass.py | PlayerShipClass.py | py | 2,586 | python | en | code | 0 | github-code | 36 |
11620940756 | import tkinter as tk
# Tipografia
FONT_FAMILY0=("Unispace",9)
FONT_FAMILY1=("Unispace", 13)
FONT_FAMILY2=("Unispace", 15)
FONT_FAMILY2=("Unispace", 15)
FONT_FAMILY3=("Unispace",17)
FONT_FAMILY4=("Unispace",19)
COLOR_AMARILLO="#fbf236"
# Button
BUTTON_LAYOUT=dict(fill="both",expand=True)
# print(*BUTTON_LAYOUT)
# GUI IMAGES
##INICIO
FONDO_INICIO = "images/fondo_login.gif"
MARCO_INICIO = "images/marco-27X34.png"
##PERSONALIZACIÓN
MARCO_PERSONALIZACION="images/marco-354x220_200%.png"
##MENU
# Razas
RAZAS=("Humanos","Enanos","Elfos")
#clases
CLASES=("Guerrero","Cazador","Bárbaro")
#Rutas imagenes personaje
RUTA_IMAGEN_PELO = ("images/pelo2-removebg-preview.png")
RUTA_IMAGEN_CUERPO = ("images/cuerpo.png")
RUTA_IMAGEN_OJOS = ("images/ojos.png")
RUTA_IMAGEN_ROPA = ("images/ropa.png")
RUTA_IMAGEN_ESPADA= ("")
BG_MARCO_TIENDA="#8f563b" | AldoVR03/Prototipo | presentacion/constants.py | constants.py | py | 849 | python | es | code | 0 | github-code | 36 |
28522269867 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.variables.variable import Variable
from opus_core.misc import unique
from variable_functions import my_attribute_label
from numpy import zeros
from opus_core.logger import logger
class occupied_spaces(Variable):
"""units occupied by consumers, the unit is defined by unit_name in building_types
table (either building_sqft or residential_units)
This is the same as units_occupied at this moment
"""
_return_type="int32"
def dependencies(self):
return ["urbansim_parcel.building.building_sqft",
"unit_name=building.disaggregate(building_type.unit_name)"
]
def compute(self, dataset_pool):
buildings = self.get_dataset()
results = zeros(buildings.size(), dtype=self._return_type)
unit_names = unique(buildings["unit_name"])
logger.log_note("Unit names: %s" % unit_names)
for unit_name in unit_names:
#should not count parcel_sqft
if unit_name == "parcel_sqft":
logger.log_warning("occupied_spaces: Skipping unit name %s" % unit_name)
continue
if unit_name == '':
logger.log_warning("occupied_spaces: Skipping empty unit name")
continue
vname = "occupied_" + unit_name
self.add_and_solve_dependencies(["urbansim_parcel.building." + vname], dataset_pool)
matched = buildings["unit_name"] == unit_name
results[matched] = buildings[vname][matched].astype(self._return_type)
return results
def post_check(self, values, dataset_pool=None):
# size = dataset_pool.get_dataset("building").size()
self.do_check("x >= 0")
from opus_core.tests import opus_unittest
from opus_core.datasets.dataset_pool import DatasetPool
from opus_core.storage_factory import StorageFactory
from numpy import array
from opus_core.tests.utils.variable_tester import VariableTester
class Tests(opus_unittest.OpusTestCase):
def test_my_inputs(self):
tester = VariableTester(
__file__,
package_order=['urbansim_parcel','urbansim'],
test_data={
"building":{"building_id": array([1,2,3,4,5,6,7,8,9,10]),
"unit_name": array(["building_sqft","residential_units","residential_units","residential_units",
'building_sqft',"residential_units","building_sqft", "parcel_sqft",
"residential_units","residential_units",]),
"building_sqft": array([1,0,0,0,1,3,3,1,2,2])*1000,
"residential_units": array([0,3,1,2,0,1,0,1,2,4])
#occupied sqft 1400,0,0,0,0,0,1200,0,0,0
#occupied units 0,3,3,0,0,1,1, 0,1,1
},
"job":{"job_id": array([1,2,3,4,5,6,7,8,9,10]),
"building_id": array([1,1,1,4,5,7,7,7,9,10]),
"home_based_status": array([0,0,0,1,1,0,0,0,1,1]),
"sqft_imputed": array([3,3,1,2,2,1,2,3,2,4]) * 200
},
"household":{"household_id": array([1,2,3,4,5,6,7,8,9,10]),
"building_id": array([2,2,2,3,3,3,6,7,9,10]),
},
}
)
should_be = array([1000, 3, 1, 0, 0, 1, 1200, 0, 1, 1])
tester.test_is_close_for_variable_defined_by_this_module(self, should_be)
if __name__=='__main__':
opus_unittest.main() | psrc/urbansim | urbansim_parcel/building/occupied_spaces.py | occupied_spaces.py | py | 3,952 | python | en | code | 4 | github-code | 36 |
74429979942 | import streamlit as st
from fastai.vision.all import *
import plotly.express as px
import pathlib
from streamlit_option_menu import option_menu
from apps import home, app, contact
temp = pathlib.PosixPath
with st.sidebar:
navbar = option_menu("Main Menu", ["Home", "Project", "Contact"],
icons=['house', 'code-slash', "person-lines-fill"],
menu_icon="cast",
default_index=0,
styles={
"container": {"padding": "0!important", "background-color": "#0AAAB3"},
"icon": {"color": "orange", "font-size": "25px"},
"nav-link": {"font-size": "25px", "text-align": "left", "margin":"0px", "--hover-color": "#C8F3DB"},
"nav-link-selected": {"background-color": "green"},
}
)
if navbar == "Home":
home = home.app_func
home()
if navbar == "Project":
project = app.app_func
project()
if navbar == "Contact":
contact = contact.app_func
contact()
with st.sidebar:
st.sidebar.title("About")
st.title("Farkhod Khojikurbonov")
st.image("image/farkhod.jpg", width=150)
st.sidebar.info(
"""
Github: \nhttps://github.com/farkhod-developer
\nTelegram: \nhttps://t.me/Farkhod_Developerr
\nEmail: \nhttps://farhodand92@gmail.com
©️ 2022 Farkhod Khojikurbonov
"""
)
| farkhod-developer/DL_Image_Classification_Model | manage.py | manage.py | py | 1,434 | python | en | code | 0 | github-code | 36 |
37407105905 | import pandas as pd
from sklearn.tree import DecisionTreeClassifier, plot_tree
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
# read in your data
df = pd.read_excel("CreditRisk.xlsx")
# define your independent and dependent variables
X = df[['Volume', 'Value', 'Age']]
y = df['Status']
# encode your categorical variables
label = LabelEncoder()
X = X.apply(label.fit_transform)
print(X)
# fit your tree model
tree = DecisionTreeClassifier(min_samples_split = 30)
tree = tree.fit(X, y)
# visualize the tree
plt.figure(figsize = (10,5))
plot_tree(tree, filled = True, feature_names = X.columns, class_names = ["Unpaid", "paid"])
plt.show()
| HyperionDevBootcamps/C4_DS_lecture_examples | Lecture code/Machine Learning/Decision Trees/Decision_Trees_cat.py | Decision_Trees_cat.py | py | 702 | python | en | code | 37 | github-code | 36 |
33589796845 | import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
class fist_bookSpider(CrawlSpider):
name = 'fist_book'
allowed_domains = ['books.toscrape.com']
start_urls = ['http://books.toscrape.com/']
rules = (
Rule(LinkExtractor(), callback='parse_item', follow=True),
)
def parse_item(self, response):
all_headings = response.xpath('//h1/text()').getall()
for heading in all_headings:
yield {
'text': heading,
'page': response.url
}
| andycortex/blog-scraper | first_book.py | first_book.py | py | 598 | python | en | code | 0 | github-code | 36 |
18481411775 | def kartotajs():
with open("sakarto.txt", "r", encoding="utf-8") as fails:
dati = fails.readlines()
vardi = []
skaitļi = []
for i in range(len(dati)):
dati[i] = dati[i].rstrip()
if dati[i].isdecimal():
skaitļi.append(dati[i])
else:
vardi.append(dati[i])
vardi.sort()
skaitļi.sort()
print(vardi)
print(skaitļi)
| aleksspauls/aleksspauls | 12_c/12_04.py | 12_04.py | py | 414 | python | en | code | 0 | github-code | 36 |
18977859509 | from dask.distributed import Client, wait
import dask.dataframe as dd
import os
from shapely.geometry import LineString, Polygon, Point, box
from shapely import wkb
import rtree
import xarray as xr
import pandas as pd
import pyarrow as pa
index_url = './../data/roads'
df_url = './../data/osm_roads/roads.parquet'
client = Client('192.168.0.134:8786')
def get_neighbors(index, row):
# Query R-Tree by the bounding box of road 'x' for neighbors except itself
return [int(i) for i in index.intersection(wkb.loads(row.bbox).bounds) if int(i) != row.name]
def find_intersections(is_neighbors, neighbors, row):
intersections = [] # Container for street intersections
nodes = [] # Arrays of tuples for NetworkX MultiDiGraph
a = wkb.loads(row.geometry)
road = a.coords[:]
if is_neighbors:
for entry in neighbors.itertuples():
b = wkb.loads(entry.geometry)
# Check if road with 'fid' osm_id actually intersects road 'x'
if not (entry.bridge or entry.tunnel) and a.intersects(b):
pts = a.intersection(b)
if pts.type == 'MultiPoint':
(nodes.append((pt.coords[:][0], {'junction': [row.name, entry.Index]})) for pt in pts)
(intersections.append(pt) for pt in pts if pt.coords[:][0] != road[0] and pt.coords[:][0] != road[-1] and (pt.coords[:][0] not in intersections))
elif pts.type == 'Point':
nodes.append((pts.coords[:][0], {'junction': [row.name, entry.Index]}))
if pts.coords[:][0] != road[0] and pts.coords[:][0] != road[-1] and (pts.coords[:][0] not in intersections):
intersections.append(pts)
[nodes.append((pt, {'junction': [row.name]})) for pt in [road[0], road[-1]] if not nodes or pt not in tuple(zip(*nodes))[0]]
return nodes, intersections
def compute_edges(intersections, nodes, row):
road = wkb.loads(row.geometry).coords[:]
edges = []
segment_len = 0
# Coordinate keeping track of previous intersection/edge end
previous_node = road[0]
for idx in range(len(road)-1):
# LineString of straight line segment between two consecutive points
segment = LineString(road[idx:idx+2])
# Coordinate updating on every segment or when intersection encountered
segment_start = road[idx]
queue = [] # Point objects that intersect this particular road straight line segment
for pt in list(intersections):
if segment.intersects(pt):
# Put all junctions intersecting this segment into a queue
queue.append(pt)
# Remove the junction from left-over list of street intersections
intersections.remove(pt)
if not queue:
# If no junctions in this road segment, increase length by distance between LineString consecutive points
segment_len += segment.length
else:
for pt in list(queue):
line_lengths = [LineString([segment_start, p.coords[:][0]]).length for p in queue]
shortest_line = min(line_lengths)
next_node_idx = [k for k, l in enumerate(line_lengths) if l == shortest_line][0]
next_node = queue[next_node_idx].coords[:][0]
segment_len += LineString([segment_start, next_node]).length
if segment_len: # Multiple roads crossing at the same junction. Can skip. osm_id's on intersectinos are maintained by nodes array
edges.append((
previous_node,
next_node,
{
'length': segment_len,
'weight': segment_len/row.maxspeed/1000,
}))
if not row.oneway: # If both way street, add identical reverse relation between MultiDiGraph nodes
edges.append((
next_node,
previous_node,
{
'length': segment_len,
'weight': segment_len/row.maxspeed/1000,
}))
segment_len = 0
previous_node = next_node
segment_start = next_node
# Remove the junction from the queue
queue.remove(queue[next_node_idx])
# Get distance to the endpoint of the segment
segment_len += LineString([segment_start, road[idx+1]]).length
edges.append((
previous_node,
road[-1],
{
'length': segment_len,
'weight': segment_len/row.maxspeed/1000,
}))
if not row.oneway: # If both way street, add identical reverse relation between MultiDiGraph nodes
edges.append((
road[-1],
previous_node,
{
'length': segment_len,
'weight': segment_len/row.maxspeed/1000,
}))
return edges
def foo(row, df, index):
neighbors = None
is_neighbors = False
# Assumption that bridges and tunnels do not have intersections
if not (row.bridge or row.tunnel):
# Retreive from R-tree osm_id's of roads whose bounding box overlaps this road's
fids = get_neighbors(index, row)
# Retreive those roads from the dataset by indexing
neighbors = df.loc[fids].compute(scheduler='single-threaded')
is_neighbors = True
# Build up list of Graph nodes and list of intersections
(nodes, intersections) = find_intersections(is_neighbors, neighbors, row)
# Calculate graph edges between junction nodes
edges = compute_edges(intersections, nodes, row)
return nodes, edges
def process(fn, df_url, index_url):
df = dd.read_parquet(df_url, engine='pyarrow')
d = pd.read_parquet(fn)
index = rtree.index.Rtree(index_url)
d[['nodes','edges']] = d.apply(
foo,
args=(df, index),
axis=1,
result_type='expand')
return d
def write(df, fn, schema):
print('Writing processed data to '+fn)
df.to_parquet(fn, engine='pyarrow', schema=schema)
return
schema = pa.schema([
('osm_id', pa.int64()),
('code', pa.int64()),
('fclass', pa.string()),
('road_name', pa.string()),
('ref', pa.string()),
('oneway', pa.bool_()),
('maxspeed', pa.int64()),
('layer', pa.int64()),
('bridge', pa.bool_()),
('tunnel', pa.bool_()),
('geometry', pa.binary()),
('bbox', pa.binary()),
('nodes', pa.list_(
pa.struct([
('0', pa.list_(pa.float64(), 2)),
('1', pa.struct([
('junction', pa.list_(pa.int64())),
('altitude', pa.int64()),
]))
])
)),
('edges', pa.list_(
pa.struct([
('0', pa.list_(pa.float64(), 2)),
('1', pa.list_(pa.float64(), 2)),
('2', pa.struct([
('length', pa.float64()),
('weight', pa.float64()),
('flatness', pa.float64()),
]))
])
))
])
in_path = './../data/osm_roads/roads_partition.parquet/'
out_path = './../data/osm_roads/roads_intersected.parquet/'
futures = []
for fn in os.listdir(in_path)[0:4]:
a = client.submit(process, in_path + fn, df_url, index_url)
b = client.submit(write, a, out_path + fn, schema)
futures.append(b)
wait(futures)
| maximyudayev/YY-MANET-Protocol | local/network_graph_build_no_dem.py | network_graph_build_no_dem.py | py | 7,549 | python | en | code | 0 | github-code | 36 |
23295546136 | import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Flatten, Dense, Dropout, BatchNormalization, Conv2D, MaxPool2D
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.preprocessing import image
print(tf.__version__)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from tqdm import tqdm
data = pd.read_csv("Movies-Poster_Dataset/train.csv")
img_width = 350
img_height = 350
X = []
for i in tqdm(range(data.shape[0])):
path = "Movies-Poster_Dataset/Images/" + data["Id"][i] + ".jpg"
img = image.load_img(path,target_size=(img_width,img_height,3))
img = image.img_to_array(img)/255.0
X.append(img)
X = np.array(X)
y = data.drop(['Id','Genre'],axis = 1)
y = y.to_numpy()
X_train, X_test, y_train, y_test = train_test_split(X,y,random_state=0, test_size = 0.15)
#Buidling the CNN
model = Sequential()
#First CNN Layer
model.add(Conv2D(16,(3,3),activation='relu',input_shape=X_train[0].shape))
model.add(BatchNormalization())
model.add(MaxPool2D(2,2))
model.add(Dropout(0.3))
#Second CNN Layer
model.add(Conv2D(32,(3,3),activation='relu'))
model.add(BatchNormalization())
model.add(MaxPool2D(2,2))
model.add(Dropout(0.3))
#Third CNN Layer
model.add(Conv2D(64,(3,3),activation='relu'))
model.add(BatchNormalization())
model.add(MaxPool2D(2,2))
model.add(Dropout(0.4))
#Fourth CNN Layer
model.add(Conv2D(128,(3,3),activation='relu'))
model.add(BatchNormalization())
model.add(MaxPool2D(2,2))
model.add(Dropout(0.5))
#First Fully connected layer
model.add(Flatten())
model.add(Dense(128,activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
#Second Fully connected layer
model.add(Flatten())
model.add(Dense(128,activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
#Output Layer
model.add(Dense(25, activation='sigmoid'))
# Include the epoch in the file name (uses `str.format`)
checkpoint_path = "training/cp-{epoch:04d}.ckpt"
# Create a callback that saves the model's weights every 5 epochs
cp_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_path,
verbose=1,
save_weights_only=True,
period=1)
model.compile(optimizer="adam",loss="binary_crossentropy", metrics=['accuracy'])
history = model.fit(X_train,
y_train,
epochs=5,
validation_data=(X_test,y_test),
callbacks=[cp_callback])
model.save('saved_model/workingModel')
#to Load the model:
# new_model = tf.keras.models.load_model('saved_model/my_model') | Ryan-Red/MoviePosters | main.py | main.py | py | 2,665 | python | en | code | 0 | github-code | 36 |
20583493606 | import numpy as np
import cv2
import math
cap = cv2.VideoCapture(0)
while(True):
ret,frame = cap.read()
img = cv2.imread('D:/HW/OpenCV Workshop - distro2/OpenCV Workshop/tek1.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imshow('frame', gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| Altair115/OpenCV2-Workshop | Opdrachten/Op2.py | Op2.py | py | 363 | python | en | code | 0 | github-code | 36 |
74028994663 | # script to parse raw logging from the four sensor/actuators into a single file
#
# Run after running log_terminal.py
#
# Some changes might have ro be changed on line 60-70 depending on the logging level used.
# settings
sps = 128 # samples per second used in the inner loop
ts = 1/sps
t = 0
files_to_log = {"sensor1": {"filename" : "tmp_sensorlog1.txt", "file": None, "lines": None},
"sensor2": {"filename" : "tmp_sensorlog2.txt", "file": None, "lines": None},
"sensor3": {"filename" : "tmp_sensorlog3.txt", "file": None, "lines": None},
"sensor4": {"filename" : "tmp_sensorlog4.txt", "file": None, "lines": None}}
# TODO: make a switch for line 54-56
file_out_name = "20210709_filtering_off_on_offtake_on_off_raw_only.csv"
sensors_to_log = [201,202,203,204]
sync_found = True # removed sync
sync_count = 0
sensor_count = 0
sensor_data = {}
# open files
max_lines = 1000000000
for d in files_to_log.keys():
files_to_log[d]["file"] = open(files_to_log[d]["filename"], 'r')
files_to_log[d]["lines"] = files_to_log[d]["file"].readlines()
max_lines = min(max_lines, len(files_to_log[d]["lines"]))
file_out = open(file_out_name, 'w')
line = 0
for line in range(max_lines):
sync_found = False
for d in files_to_log.keys():
data = files_to_log[d]["lines"][line].split(',')
# skip extra newlines at end
if len(data) < 4:
continue
else:
if len(data) > 8: #5
print("WARNING > TOO MANY VALUES ON A LINE")
continue
if data[0] == "Sync":
sync_found = True
else:
# output values needed to do identification (APP_SERIAL_NORMAL / CTRL_COMMAND_MANUAL)
sensor_data[int(data[1])] = "%s,%s,%s,%s" % (
data[1], data[4], data[5], data[6])
# output values needed to show control (APP_SERIAL_NORMAL / CTRL_COMMAND_AUTO)
# sensor_data[int(data[0])] = "%s,%s,%s,%s,%s,%s" % (
# data[0], data[1], data[2], data[3], data[4], data[5])
# # output values needed to show filtering and do identification (APP_SERIAL_NORMAL / CTRL_COMMAND_MANUAL)
# sensor_data[int(data[1])] = "%s,%s,%s,%s,%s,%s" % (
# data[1], data[2], data[3], data[4], data[5], data[6])
sensor_count += 1
if sync_found:
continue
# all sensors received
if sensor_count == len(sensors_to_log):
# write timestamp
file_out.write("%s," % int(t * 1000))
t += ts
# write sensor data
for i in range(len(sensors_to_log)):
file_out.write("%s" % sensor_data[sensors_to_log[i]].rstrip())
if i < len(sensors_to_log) - 1:
file_out.write(",")
else:
file_out.write("\n")
sensor_count = 0
for d in files_to_log.keys():
files_to_log[d]["file"].close()
file_out.close()
| basboot/WIS-com | parse_serial_log.py | parse_serial_log.py | py | 3,030 | python | en | code | 0 | github-code | 36 |
73721495784 | #!/usr/bin/env python
try:
import RPi.GPIO as GPIO
except RuntimeError:
print("Error importing RPi.GPIO! sudo?")
raise
from time import sleep
LED_COUNT = 32
SIN_PIN = 22
CLOCK_PIN = 27
LATCH_PIN = 17
def setup():
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup((SIN_PIN, CLOCK_PIN, LATCH_PIN), GPIO.OUT)
def cleanup():
GPIO.cleanup((SIN_PIN, CLOCK_PIN))
def pulse(pin):
GPIO.output(pin, True)
GPIO.output(pin, False)
def write(leds):
for led in leds:
GPIO.output(SIN_PIN, led)
pulse(CLOCK_PIN)
pulse(LATCH_PIN)
if __name__ == "__main__":
setup()
for i in xrange(LED_COUNT + 1):
write([i == j for j in xrange(LED_COUNT)])
time.sleep(0.2)
cleanup()
| yossizap/UV-Bicycle | src/pi/cat4016_uv_leds_test/cat4016_uv_leds_test.py | cat4016_uv_leds_test.py | py | 759 | python | en | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.