', ['secgen@ipra.org',]\n\n subject = \"LinkedIn: I support the IPRA Code\"\n\n text_content = \"From: {}, Name: {} {}, Organisation: {}, Job Title: {}, Country: {} \" \\\n .format(email, first_name, last_name, organisation, job_title, country)\n\n html_content = \"From: {},
Name: {} {},
Organisation: {},
\" \\\n \"Job Title: {},
Country: {}\" \\\n .format(email, first_name, last_name, organisation, job_title, country)\n\n msg = EmailMultiAlternatives(subject, html_content, from_email, to, headers={'X-MC-PreserveRecipients': False})\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()\n messages.add_message(request, messages.SUCCESS, 'Thanks for getting in touch')\n return redirect('/')\n else:\n form = LinkedinContactForm()\n\n context = {\"form\":form}\n \n return render(request, 'pages/member-services/code-of-conduct-linkedin.html', context)\n\n####BULK EMAIL 2\nfrom django_sendgrid.message import (SendGridEmailMessage,\n SendGridEmailMultiAlternatives, SendGridBulkEmailMultiAlternatives)\nfrom django_sendgrid.models import EmailMessage\nfrom django.views.decorators.csrf import ensure_csrf_cookie \n\nfrom my_cms.utils import (chunks, get_sendgrid_template,\n create_email_list, save_send_email, get_form_values,\n init_bulkemail_form, create_context, sendgrid_template_choices)\n\n\n@never_cache\n@staff_member_required\ndef bulkemail2(request):\n is_first_step = True\n context = {}\n # get form template choices from sendgrid if not already in session\n if request.session.get('sendgrid_template_choices') is None:\n template_choices = sendgrid_template_choices(request)\n else:\n template_choices = request.session['sendgrid_template_choices']\n\n form = BulkEmailForm(initial={'from_email':request.user}, \n is_first_step=is_first_step, \n template_choices=template_choices,)\n\n if 'testsend' in request.POST:\n form = BulkEmailForm(request.POST, request.FILES, \n is_first_step=is_first_step, \n template_choices=template_choices,)\n if form.is_valid():\n is_first_step = False\n (from_email, cc, bcc, subject, message, template, \n email_group, additional, attachments, message_id) = get_form_values(form)\n extra_data = {\n 'draft': True, 'email_group': email_group,\n 'additional': additional,\n 'template': dict(\n request.session['sendgrid_template_choices']\n )[template]\n }\n message_id = uuid.uuid4()\n save_send_email(\n request=request, cc=None, \n bcc=None, subject=subject, \n message=message, from_email=from_email.email,\n to=[from_email.email,], template=template, \n attachments=attachments, extra_data=extra_data, \n message_id=message_id, send=True, save=False,\n )\n confmessage = 'Sending test to '+ from_email.email\n messages.add_message(request, messages.SUCCESS, confmessage)\n template_id = form.cleaned_data['template']\n template = get_sendgrid_template(template_id)\n email_list = create_email_list(additional, email_group, context)\n create_context(context, template, form, email_list)\n init_bulkemail_form(is_first_step, template_choices, context, form, True)\n\n if 'next' in request.POST:\n form = BulkEmailForm(request.POST, request.FILES, \n is_first_step=is_first_step,\n template_choices=template_choices,)\n if form.is_valid():\n is_first_step = False\n template_id = form.cleaned_data['template']\n template = get_sendgrid_template(template_id)\n email_group = form.cleaned_data['email_group']\n additional = form.cleaned_data['additional']\n email_list = create_email_list(additional, email_group, context)\n create_context(context, template, form, email_list)\n init_bulkemail_form(is_first_step, template_choices, context, form, True)\n\n if 'send' in request.POST:\n form = BulkEmailForm(request.POST, request.FILES, \n is_first_step=is_first_step, \n template_choices=template_choices,)\n if form.is_valid():\n is_first_step = True\n (from_email, cc, bcc, subject, message, template, \n email_group, additional, attachments, message_id) = get_form_values(form)\n email_list = create_email_list(additional, email_group, context)\n if len(email_list) >= 1:\n split_list = chunks(email_list, 1000)\n extra_data = {\n 'draft': False, 'email_group': email_group,\n 'additional': additional, \n 'template': dict(\n request.session['sendgrid_template_choices']\n )[template]\n }\n if not message_id:\n message_id = uuid.uuid4()\n for portion in split_list:\n save_send_email(\n request=request, cc=cc, \n bcc=bcc, subject=subject, \n message=message, from_email=from_email.email,\n to=portion, template=template, \n attachments=attachments, extra_data=extra_data, \n message_id=message_id, send=True, save=True,\n )\n email_message = EmailMessage.objects.get(message_id=message_id)\n confmessage = 'Sending '+ str(len(email_list)) +' emails to Sendgrid'\n messages.add_message(request, messages.SUCCESS, confmessage)\n del request.session['sendgrid_template_choices']\n return HttpResponseRedirect(\"{}/django_sendgrid/emailmessage/{}/change\".format(settings.ADMIN_URL,email_message.id))\n \n else:\n is_first_step = True\n init_bulkemail_form(is_first_step, template_choices, context, form, False)\n confmessage = 'You have not set any recipients'\n messages.add_message(request, messages.ERROR, confmessage)\n\n if 'save' in request.POST:\n form = BulkEmailForm(request.POST, request.FILES,\n is_first_step=is_first_step, \n template_choices=template_choices,)\n if form.is_valid():\n is_first_step = True\n (from_email, cc, bcc, subject, message, template, \n email_group, additional, attachments, message_id) = get_form_values(form)\n email_list = create_email_list(additional, email_group, context)\n split_list = chunks(email_list, 1000)\n extra_data = {\n 'draft': True, 'email_group': email_group,\n 'additional': additional,\n 'template': dict(\n request.session['sendgrid_template_choices']\n )[template]\n }\n if not message_id:\n message_id = uuid.uuid4()\n\n save_send_email(\n request=request, cc=cc, \n bcc=bcc, subject=subject, \n message=message, from_email=from_email,\n to=[], template=template, \n attachments=attachments, extra_data=extra_data, \n message_id=message_id, send=False, save=True,\n )\n ##########\n confmessage = 'Saved as a draft'\n messages.add_message(request, messages.SUCCESS, confmessage)\n del request.session['sendgrid_template_choices']\n email_message = EmailMessage.objects.get(message_id=message_id)\n return HttpResponseRedirect(\"{}/django_sendgrid/emailmessage/{}/change\".format(settings.ADMIN_URL,email_message.id))\n # else:\n # print(form)\n # print('SAVING FORM INVALID')\n\n\n if request.POST.get(\"back\") is not None:\n is_first_step = True\n form = BulkEmailForm(request.POST, request.FILES, is_first_step=is_first_step, template_choices=template_choices,)\n #form.fields['template'].choices = template_choices\n if form.is_valid(): \n context['is_first_step'] = is_first_step\n init_bulkemail_form(is_first_step, template_choices, context, form, False)\n return render(request, 'admin/bulkemail.html', context)\n\n else:\n \n # look in session for user ids from user admin action + email user button on change_form\n if request.session.get('users_to_email') is not None:\n users_to_email = json.loads(request.session.get('users_to_email'))\n emails = User.objects.filter(id__in=users_to_email).values_list('email', flat=True)\n emails = ','.join(map(str, emails))\n context['additional'] = emails\n del request.session['users_to_email']\n\n if request.session.get('attendees_to_email') is not None:\n attendees_to_email = json.loads(request.session.get('attendees_to_email'))\n emails = EventAttendee.objects.filter(id__in=attendees_to_email).values_list('email', flat=True)\n emails = ','.join(map(str, emails))\n context['additional'] = emails\n del request.session['attendees_to_email']\n\n if request.session.get('addresses_to_email') is not None:\n addresses_to_email = json.loads(request.session.get('addresses_to_email'))\n emails = EmailAddress.objects.filter(id__in=addresses_to_email).values_list('email', flat=True)\n emails = ','.join(map(str, emails))\n context['additional'] = emails\n del request.session['addresses_to_email']\n\n # init from saved email message\n if request.session.get('send_email') is not None:\n email_message_id = json.loads(request.session.get('send_email'))\n\n if EmailMessage.objects.filter(id=email_message_id[0]).exists():\n \n email_message = EmailMessage.objects.get(id=email_message_id[0])\n # if hasattr(email_message, 'cc'):\n # cc = email_message.cc.data\n # else:\n # cc = None\n # if hasattr(email_message, 'bcc'):\n # bcc = email_message.bcc.data\n # else:\n # bcc = None\n if email_message.draft:\n message_id = email_message.message_id\n else:\n message_id = None\n\n form = BulkEmailForm(initial={\n # 'cc': cc,\n # 'bcc': bcc,\n 'subject': email_message.subject.data,\n 'message': email_message.body.data,\n 'from_email': request.user,\n 'template': email_message.template,\n 'email_group': email_message.emailgroup.all(),\n 'message_id': message_id,\n }, is_first_step=is_first_step,\n template_choices=template_choices,)\n form.fields['template'].choices = template_choices\n context['additional'] = email_message.additional\n \n del request.session['send_email']\n\n context['form'] = form\n context['is_first_step'] = is_first_step\n return render(request, 'admin/bulkemail.html', context)\n\n\n##########################\n# override cartride.shop.views.checkout_steps\n##########################\nfrom django.core.exceptions import ValidationError\nfrom django.forms import formset_factory\n\n@never_cache\ndef checkout_steps(request, form_class=MyCallbackUUIDOrderForm, extra_context=None):\n \"\"\"\n Display the order form and handle processing of each step.\n \"\"\"\n # Do the authentication check here rather than using standard\n # login_required decorator. This means we can check for a custom\n # LOGIN_URL and fall back to our own login view.\n # ADDED: checks for membership product_type to redirect past join-ipra page\n authenticated = request.user.is_authenticated()\n if not authenticated:\n red = False\n for item in request.cart:\n if item.product_type == 'ME':\n red = True\n ##request.session['purchase_signup'] = True\n \n if red:\n url = \"%s?next=%s\" % ('/accounts/signup/', reverse(\"shop_checkout\"))\n else:\n url = \"%s?next=%s\" % (settings.LOGIN_URL, reverse(\"shop_checkout\"))\n\n return redirect(url)\n\n# try:\n# settings.SHOP_CHECKOUT_FORM_CLASS\n# except AttributeError:\n# pass\n# else:\n# from warnings import warn\n# warn(\"The SHOP_CHECKOUT_FORM_CLASS setting is deprecated - please \"\n# \"define your own urlpattern for the checkout_steps view, \"\n# \"passing in your own form_class argument.\")\n# form_class = import_dotted_path(settings.SHOP_CHECKOUT_FORM_CLASS)\n\n initial = checkout.initial_order_data(request, form_class)\n step = int(request.POST.get(\"step\", None) or\n initial.get(\"step\", None) or\n checkout.CHECKOUT_STEP_FIRST)\n form = form_class(request, step, initial=initial)\n event_true, events = request.cart.cart_events()\n \n EventOrderFormset = formset_factory(\n EventOrderForm, min_num=1, validate_min=True, extra=0,\n )\n formset = EventOrderFormset(\n initial = events,\n form_kwargs={\n 'show_formset': event_true,\n 'step': step,\n 'request': request,\n }\n )\n data = request.POST\n checkout_errors = []\n\n if request.POST.get(\"back\") is not None:\n # Back button in the form was pressed - load the order form\n # for the previous step and maintain the field values entered.\n step -= 1\n form = form_class(request, step, initial=initial, data=data)\n formset = EventOrderFormset(request.POST, request.FILES, initial=events, form_kwargs={\n 'show_formset': event_true,\n 'step': step,\n 'request': request,\n })\n\n elif request.POST.get(\"stripeToken\", \"\") and request.cart.has_items():\n stripe_form = StripeSubmissionForm(request, step, initial=initial, data=data)\n form = form_class(request, step, initial=initial, data=data)\n formset = EventOrderFormset(\n request.POST, request.FILES,\n initial=events, form_kwargs={\n 'show_formset': event_true,\n 'step': step,\n 'request': request,\n })\n if stripe_form.is_valid() and form.is_valid() and formset.is_valid():\n\n request.session[\"order\"] = dict(form.cleaned_data)\n events_to_session(event_true, formset, request)\n sensitive_card_fields = (\"card_number\", \"card_expiry_month\",\n \"card_expiry_year\", \"card_ccv\")\n for field in sensitive_card_fields:\n if field in request.session[\"order\"]:\n del request.session[\"order\"][field]\n\n tid = stripe_process(request, form, stripe_form)\n if tid:\n\n order = form.save(commit=False)\n order.setup(request)\n user, order = update_order_details(order, request.cart, tid)\n has_tickets, events = request.cart.cart_events()\n events_details = cart_ticket_management(\n request.session, has_tickets, events, order\n )\n order.paid =True\n order.payment_type = 'ST'\n order.payment_time = order.time\n try:\n order_handler(request.cart, user, order)\n except Exception as e:\n raise(e)\n order.complete(request)\n messages.add_message(request, messages.SUCCESS, (\"Payment Accepted\"))\n membership_details(order, user)\n send_confirmation_email(order, user, events_details)\n subject = settings.SITE_TITLE + ' STRIPE Payment Recieved'\n send_admin_conf(subject, user, order, tid)\n response = redirect(\"shop_complete\")\n return response\n else:\n form = form_class(request, step, initial=initial)\n formset = EventOrderFormset(\n request.POST, request.FILES,\n initial=events, form_kwargs={\n 'show_formset': event_true,\n 'step': step,\n 'request': request,\n })\n else:\n step = 1\n checkout_errors.append(form.errors)\n checkout_errors.append(formset.errors)\n form = form_class(request, step, initial=initial, data=data,\n errors=checkout_errors)\n formset = EventOrderFormset(\n request.POST, request.FILES,\n initial=events, form_kwargs={\n 'show_formset': event_true,\n 'step': step,\n 'request': request,\n })\n\n elif request.method == \"POST\" and request.cart.has_items():\n form = form_class(request, step, initial=initial, data=data)\n formset = EventOrderFormset(\n request.POST, request.FILES,\n initial=events, form_kwargs={\n 'show_formset': event_true,\n 'step': step,\n 'request': request,\n })\n if form.is_valid() and formset.is_valid():\n # Copy the current form fields to the session so that\n # they're maintained if the customer leaves the checkout\n # process, but remove sensitive fields from the session\n # such as the credit card fields so that they're never\n # stored anywhere.\n request.session[\"order\"] = dict(form.cleaned_data)\n # add formset to session\n events_to_session(event_true, formset, request)\n # remove sensitives\n sensitive_card_fields = (\"card_number\", \"card_expiry_month\",\n \"card_expiry_year\", \"card_ccv\")\n for field in sensitive_card_fields:\n if field in request.session[\"order\"]:\n del request.session[\"order\"][field]\n\n # FIRST CHECKOUT STEP - handle discount code. This needs to\n # be set before shipping, to allow for free shipping to be\n # first set by a discount code.\n if step == checkout.CHECKOUT_STEP_FIRST:\n form.set_discount()\n # ALL STEPS - run billing/tax handlers. These are run on\n # all steps, since all fields (such as address fields) are\n # posted on each step, even as hidden inputs when not\n # visible in the current step.\n try:\n billship_handler(request, form)\n #tax_handler(request, form)\n except checkout.CheckoutError as e:\n checkout_errors.append(e)\n\n # FINAL CHECKOUT STEP - run payment handler and process order.\n if step == checkout.CHECKOUT_STEP_LAST and not checkout_errors:\n uuid = form.cleaned_data['callback_uuid']\n order = form.save(commit=False)\n order.setup(request)\n user, order = update_order_details(order, request.cart, uuid)\n has_tickets, events = request.cart.cart_events()\n events_details = cart_ticket_management(\n request.session, has_tickets, events, order\n )\n order.payment_type = 'BA'\n order.complete(request)\n send_confirmation_email(order, user, events_details)\n # send admin\n subject = settings.SITE_TITLE + ' Payment Pending'\n send_admin_conf(subject, user, order, order.callback_uuid)\n response = redirect(\"shop_pending\")\n messages.add_message(request, messages.SUCCESS, (\"Payment Pending\"))\n return response\n\n # If any checkout errors, assign them to a new form and\n # re-run is_valid. If valid, then set form to the next step.\n form = form_class(request, step, initial=initial, data=data,\n errors=checkout_errors) \n \n formset = EventOrderFormset(\n request.POST, request.FILES,\n initial=events, form_kwargs={\n 'show_formset': event_true,\n 'step': step,\n 'request': request,\n }) \n if form.is_valid() and formset.is_valid():\n step += 1\n form = form_class(request, step, initial=initial)\n formset = EventOrderFormset(\n request.POST, request.FILES,\n initial=events, form_kwargs={\n 'show_formset': event_true,\n 'step': step,\n 'request': request,\n }) \n\n else:\n step = 1\n checkout_errors.append(form.errors)\n checkout_errors.append(formset.errors)\n # Update the step so that we don't rely on POST data to take us back to\n # the same point in the checkout process.\n try:\n request.session[\"order\"][\"step\"] = step\n request.session.modified = True\n except KeyError:\n pass\n\n step_vars = checkout.CHECKOUT_STEPS[step - 1]\n template = \"shop/%s.html\" % step_vars[\"template\"]\n\n # fails on init\n try:\n name_for_invoice = str(initial['billing_detail_first_name'] + ' ' + initial['billing_detail_last_name'])\n except:\n name_for_invoice = ''\n context = {\"CHECKOUT_STEP_FIRST\": step == checkout.CHECKOUT_STEP_FIRST,\n \"CHECKOUT_STEP_LAST\": step == checkout.CHECKOUT_STEP_LAST,\n \"CHECKOUT_STEP_PAYMENT\": (settings.SHOP_PAYMENT_STEP_ENABLED and\n step == checkout.CHECKOUT_STEP_PAYMENT),\n \"step_title\": step_vars[\"title\"], \"step_url\": step_vars[\"url\"],\n \"steps\": checkout.CHECKOUT_STEPS, \"step\": step, \"form\": form, \"formset\": formset,\n \"order_full_name\": name_for_invoice,\n \"event\": events, \"show_formset\": event_true,\n }\n context.update(extra_context or {})\n return TemplateResponse(request, template, context)\n\n@require_POST\n@csrf_exempt\ndef stripe_upstream(request):\n #stripe.api_key = settings.STRIPE_API_KEY\n event_json = json.loads(request.body.decode('utf-8'))\n s_key = event_json['order']['metadata']['s_key']\n SessionStore = import_module(settings.SESSION_ENGINE) \\\n .SessionStore\n session = SessionStore(s_key)\n if \"shipping_total\" in session:\n shipping_total = int((float(session[\"shipping_total\"]) * 100))\n shipping_title = 'Shipping'\n else:\n shipping_total = 0\n shipping_title = 'Shipping n/a'\n return_data = {\n \"order_update\": {\n \"order_id\": event_json['order']['id'],\n \"shipping_methods\": [\n {\n \"id\": shipping_title,\n \"description\": shipping_title,\n \"amount\": shipping_total,\n \"currency\": settings.STRIPE_CURRENCY,\n }\n ],\n }\n }\n return HttpResponse(json.dumps(return_data), status=200)\n\n\n\n\n###########################\n# ACCOUNTS\n#########################\n\ndef login(request, template=\"accounts/account_login.html\",\n form_class=LoginForm, extra_context=None):\n \"\"\"\n Login form.\n \"\"\"\n form = form_class(request.POST or None)\n if request.method == \"POST\" and form.is_valid():\n if request.session.get('ow_returnurl', None):\n authenticated_user = form.save()\n #info(request, _(\"Successfully logged in\"))\n auth_login(request, authenticated_user)\n authenticated_user.profile.gwa = True\n authenticated_user.profile.save()\n # profile_info = {'f2e55e0d-a7a7-4748-b42c-ef3d45a78e96': new_user.profile.organisation,\n # 'f139e8f2-3755-4804-81b7-6c363022bc08': new_user.profile.job_title}\n token = jwt.encode({'TimestampUtc': str(timezone.now()),\n 'Email': authenticated_user.email,\n 'FirstName': authenticated_user.first_name,\n 'LastName': authenticated_user.last_name,\n 'UserNameExists': True,\n 'UserValidatedSuccessfully': True,\n 'UserIsMember': True,\n \n #'ProfileTextFieldData': profile_info\n }, settings.OW_KEY, algorithm='HS256')\n \n return_url = request.session['ow_returnurl']\n del request.session['ow_returnurl']\n return redirect(\"{0}&token={1}\".format(return_url, token.decode(encoding=\"utf-8\")))\n\n authenticated_user = form.save()\n info(request, _(\"Successfully logged in\"))\n auth_login(request, authenticated_user)\n return login_redirect(request)\n\n # check if 'purchase_signup' is in session, set by checkout_steps \n # determines flow if a membership product is \n # being purchased to skip join-ipra page\n if 'purchase_signup' in request.session:\n purchase_signup = True\n else:\n purchase_signup = False\n\n context = {\"form\": form, \"title\": _(\"Log in\"),\n \"next\": (next_url(request) or \"/\"),\n \"purchase_signup\": purchase_signup}\n context.update(extra_context or {})\n return TemplateResponse(request, template, context)\n\n# ### over ridden mezzanine accounts profile view to use paid member decorator\n\n#@login_required\n#@paid_member(login_url='/member-services/join-ipra/', message=\"You are currently not a paid member.\")\ndef profile_update(request, template=\"accounts/account_profile_update.html\",\n extra_context=None):\n \"\"\"\n Profile update form.\n \"\"\"\n \n profile_form = get_profile_form()\n form = profile_form(request.POST or None, request.FILES or None,\n instance=request.user)\n if request.method == \"POST\" and form.is_valid():\n user = form.save()\n info(request, _(\"Profile updated\"))\n try:\n return redirect(\"profile\", username=user.username)\n except NoReverseMatch:\n return redirect(\"profile_update\")\n\n membertype = request.user.profile.membertype\n if membertype == 'SM' or membertype == 'PM':\n is_paying_member = True\n try:\n expiring_in = request.user.profile.expires - timezone.now()\n if expiring_in.days > 30:\n is_expiring = False\n else:\n is_expiring = True\n except:\n is_expiring = False\n else:\n is_paying_member = False\n is_expiring = False\n if membertype == 'SU' or membertype == 'CU' or membertype == 'EM' or membertype == '':\n can_upgrade = True\n else:\n can_upgrade = False\n\n\n #downloads = request.user.profile.purchased_products.all()\n context = {\"form\": form, \"title\": _(\"Update Profile\"),\n #\"downloads\": downloads,\n \"is_paying_member\": is_paying_member,\n \"is_expiring\": is_expiring,\n \"can_upgrade\": can_upgrade}\n context.update(extra_context or {})\n return TemplateResponse(request, template, context)\n\n#@login_required\n#@paid_member(login_url='/member-services/join-ipra/', message=\"You are currently not a paid member.\")\ndef profile(request, username, template=\"accounts/account_profile.html\",\n extra_context=None):\n \"\"\"\n Display a profile.\n \"\"\"\n lookup = {\"username__iexact\": username, \"is_active\": True}\n try:\n prof = User.objects.get(**lookup)\n except User.DoesNotExist:\n raise Http404()\n if prof.profile.membertype != 'PM':\n\n\n if request.user.is_authenticated():\n\n user_membertype = request.user.profile.membertype\n if user_membertype == 'PM' or user_membertype == 'SM' \\\n or user_membertype == 'ME' or user_membertype == 'AM' \\\n or request.user.is_staff or user_membertype == 'AD':\n\n \n context = {\"profile_user\": get_object_or_404(User, **lookup),\n \"example_page\": False}\n context.update(extra_context or {})\n return TemplateResponse(request, template, context)\n elif str(request.user.username) == str(get_object_or_404(User, **lookup)):\n context = {\"profile_user\": get_object_or_404(User, **lookup),\n \"example_page\": True}\n context.update(extra_context or {})\n return TemplateResponse(request, template, context)\n else:\n raise Http404()\n else:\n raise Http404()\n else:\n context = {\"profile_user\": get_object_or_404(User, **lookup),\n \"example_page\": False}\n context.update(extra_context or {})\n return TemplateResponse(request, template, context)\n\n\ndef logout(request):\n \"\"\"\n Log the user out. Redirect to home page\n \"\"\"\n auth_logout(request)\n info(request, _(\"Successfully logged out\"))\n #return redirect(next_url(request) or get_script_prefix())\n return redirect('/')\n\n\ndef signup(request, template=\"accounts/account_signup.html\",\n extra_context=None):\n \"\"\"\n Signup form. Overide to redirect back to OW\n \"\"\"\n profile_form = get_profile_form()\n form = profile_form(request.POST or None, request.FILES or None)\n if request.method == \"POST\" and form.is_valid():\n new_user = form.save()\n # unused as no ACCOUNTS_APPROVAL_REQUIRED etc\n # if not new_user.is_active:\n # if settings.ACCOUNTS_APPROVAL_REQUIRED:\n # send_approve_mail(request, new_user)\n # info(request, _(\"Thanks for signing up! You'll receive \"\n # \"an email when your account is activated.\"))\n # else:\n # send_verification_mail(request, new_user, \"signup_verify\")\n # info(request, _(\"A verification email has been sent with \"\n # \"a link for activating your account.\"))\n # return redirect(next_url(request) or \"/\")\n # else:\n if request.session.get('ow_returnurl', None):\n auth_login(request, new_user)\n new_user.profile.gwa = True\n new_user.profile.save()\n profile_info = {'f2e55e0d-a7a7-4748-b42c-ef3d45a78e96': new_user.profile.organisation,\n 'f139e8f2-3755-4804-81b7-6c363022bc08': new_user.profile.job_title}\n token = jwt.encode({'TimestampUtc': str(timezone.now()),\n 'Email': new_user.email,\n 'FirstName': new_user.first_name,\n 'LastName': new_user.last_name,\n 'UserNameExists': True,\n 'UserValidatedSuccessfully': True,\n 'UserIsMember': True,\n #'UserData': 'Entrant',\n 'ProfileTextFieldData': profile_info}, settings.OW_KEY, algorithm='HS256')\n \n return_url = request.session['ow_returnurl']\n del request.session['ow_returnurl']\n return redirect(\"{0}&token={1}\".format(return_url, token.decode(encoding=\"utf-8\")))\n\n info(request, _(\"Successfully logged in\"))\n auth_login(request, new_user)\n return login_redirect(request)\n context = {\"form\": form, \"title\": _(\"Sign up\")}\n context.update(extra_context or {})\n return TemplateResponse(request, template, context)\n\ndef open_water(request):\n if request.user.is_authenticated():\n token = jwt.encode({'TimestampUtc': str(timezone.now()),\n 'Email': request.user.email,\n 'FirstName': request.user.first_name,\n 'LastName': request.user.last_name,\n 'UserNameExists': True,\n 'UserValidatedSuccessfully': True,\n 'UserIsMember': True,\n #'UserData': 'Entrant',\n }, settings.OW_KEY, algorithm='HS256')\n return_url = request.GET.get('returnUrl')\n return redirect(\"{0}&token={1}\".format(return_url, token.decode(encoding=\"utf-8\")))\n else:\n request.session['ow_returnurl'] = request.GET.get('returnUrl')\n return redirect('login')\n\n\n\ndef send_verification_mail(request, user, verification_type):\n \"\"\"\n Sends an email with a verification link to users when\n ``ACCOUNTS_VERIFICATION_REQUIRED`` is ```True`` and they're signing\n up, or when they reset a lost password.\n The ``verification_type`` arg is both the name of the urlpattern for\n the verification link, as well as the names of the email templates\n to use.\n \"\"\"\n verify_url = reverse(verification_type, kwargs={\n \"uidb36\": int_to_base36(user.id),\n \"token\": default_token_generator.make_token(user),\n }) + \"?next=\" + (next_url(request) or \"/\")\n context = {\n \"request\": request,\n \"user\": user,\n \"verify_url\": verify_url,\n }\n subject_template_name = \"email/%s_subject.txt\" % verification_type\n subject = subject_template(subject_template_name, context)\n\n send_mail_template(subject, \"email/%s\" % verification_type,\n settings.DEFAULT_FROM_EMAIL, user.email,\n context=context,\n addr_bcc=settings.SHOP_ORDER_EMAIL_BCC or None)\n\n\n\ndef password_reset(request, template=\"accounts/account_password_reset.html\",\n form_class=PasswordResetForm, extra_context=None):\n form = form_class(request.POST or None)\n if request.method == \"POST\" and form.is_valid():\n user = form.save()\n send_verification_mail(request, user, \"password_reset_verify\")\n #send_admin_error('user requested pw reset',user.email,'')\n info(request, _(\"A verification email has been sent with \"\n \"a link for resetting your password.\"))\n context = {\"form\": form, \"title\": _(\"Password Reset\")}\n context.update(extra_context or {})\n return TemplateResponse(request, template, context)\n\n@never_cache\ndef complete(request, template=\"shop/complete.html\", extra_context=None):\n \"\"\"\n override cartridge.shop.views.complete \n \"\"\"\n\n context = {}\n return TemplateResponse(request, template, context)\n\n@never_cache\ndef pending(request, template=\"shop/pending.html\", extra_context=None):\n try:\n order = Order.objects.from_request(request)\n except Order.DoesNotExist:\n raise Http404\n items = order.items.all()\n # Assign product names to each of the items since they're not\n # stored.\n skus = [item.sku for item in items]\n variations = ProductVariation.objects.filter(sku__in=skus)\n names = {}\n for variation in variations.select_related(\"product\"):\n names[variation.sku] = variation.product.title\n for i, item in enumerate(items):\n setattr(items[i], \"name\", names[item.sku])\n context = {\"order\": order, \"items\": items, \"has_pdf\": True,\n \"steps\": checkout.CHECKOUT_STEPS}\n context.update(extra_context or {})\n return TemplateResponse(request, template, context)\n\n\n\n\ndef members_ajax(request):\n if request.method == 'POST' and request.is_ajax():\n search = request.POST.get('search', None)\n if search == '':\n contact_list = User.objects.all()\n else:\n contact_list = User.objects.all()\n contact_list = contact_list.filter(Q(first_name__icontains=search) | Q(last_name__icontains=search) |\n Q(profile__organisation__icontains=search) | Q(profile__website__icontains=search) | \n Q(profile__bio__icontains=search)|Q(profile__city__icontains=search) |\n Q(profile__nationality__icontains=search)|Q(profile__county__icontains=search)\n )\n\n contact_list = contact_list.filter(Q(profile__membertype='PM') |\n Q(profile__membertype='SM') |\n Q(profile__membertype='ME') |\n Q(profile__membertype='AM') |\n Q(profile__membertype='AD')).exclude(id=1)\n contact_list = contact_list.values('profile__organisation','profile__photo',\n 'profile__nationality','profile__website',\n 'profile__id','username','first_name','last_name',\n 'profile__country')\n contact_list = tuple(contact_list.order_by('last_name'))\n paginator = Paginator(contact_list, 100) # Show 100 members per page\n page = int(request.POST.get('page_count', None))\n\n try:\n contacts = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n contacts = paginator.page(1)\n except EmptyPage :\n # If page is out of range (e.g. 9999), deliver last page of results.\n contacts = paginator.page(paginator.num_pages)\n count = contacts.paginator.count\n num_pages = contacts.paginator.num_pages\n has_next = contacts.has_next()\n has_previous = contacts.has_previous()\n\n return JsonResponse({'contacts':list(contacts),\n 'count':count,\n 'num_pages':num_pages,\n 'page':page,\n 'has_next':has_next,\n 'has_previous':has_previous}, safe=False)\n\n else:\n raise Http404\n\ndef pros_ajax(request):\n if request.method == 'POST' and request.is_ajax():\n \n search = request.POST.get('search', None)\n if search == '':\n contact_list = User.objects.all()\n else:\n contact_list = User.objects.all()\n contact_list = contact_list.filter(Q(first_name__icontains=search) | Q(last_name__icontains=search) |\n Q(profile__organisation__icontains=search) | Q(profile__website__icontains=search) | \n Q(profile__bio__icontains=search)|Q(profile__city__icontains=search) |\n Q(profile__nationality__icontains=search)|Q(profile__county__icontains=search)\n )\n\n contact_list = contact_list.filter(Q(profile__membertype='PM'))\n contact_list = contact_list.values('profile__organisation','profile__photo',\n 'profile__nationality','profile__website',\n 'profile__id','username','first_name','last_name',\n 'profile__country')\n contact_list = tuple(contact_list.order_by('last_name'))\n #contact_list = contact_list.values('organisation','photo','emailwork')\n paginator = Paginator(contact_list, 100) # Show 25 contacts per page\n\n page = int(request.POST.get('page_count', None))\n\n try:\n contacts = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n contacts = paginator.page(1)\n except EmptyPage :\n # If page is out of range (e.g. 9999), deliver last page of results.\n contacts = paginator.page(paginator.num_pages)\n count = contacts.paginator.count\n num_pages = contacts.paginator.num_pages\n has_next = contacts.has_next()\n has_previous = contacts.has_previous()\n\n return JsonResponse({'contacts':list(contacts),\n 'count':count,\n 'num_pages':num_pages,\n 'page':page,\n 'has_next':has_next,\n 'has_previous':has_previous}, safe=False)\n\n else:\n raise Http404\n \n#####################################\n# OVERIDE TO ALLOW IMAGES IN PDF VERSION OF INVOICES\n#####################################\n\nimport logging\nimport datetime\n\nfrom mezzanine.utils.sites import current_site_id\nfrom django.http import HttpResponse, HttpRequest\nfrom django.template import Context\nfrom django.template.loader import get_template\n\nfrom io import StringIO\nimport pywkhtmltopdf as pdf\ndef invoice(request, order_id, template=\"shop/order_invoice.html\",\n template_pdf=\"shop/order_invoice_pdf.html\", extra_context=None):\n \"\"\"\n Display a plain text invoice for the given order. The order must\n belong to the user which is checked via session or ID if\n authenticated, or if the current user is staff.\n \"\"\"\n try:\n order = Order.objects.get_for_user(order_id, request)\n except Order.DoesNotExist:\n raise Http404\n context = {\"order\": order}\n context.update(order.details_as_dict())\n context.update(extra_context or {})\n context = RequestContext(request, context)\n if request.GET.get(\"format\") == \"pdf\":\n\n html = get_template(template_pdf).render(context)\n\n c = pdf.HTMLToPDFConverter(path_to_bin='/usr/local/bin/wkhtmltopdf')\n output = c.convert(html)\n\n response = HttpResponse(output,content_type=\"application/pdf\")\n name = slugify(\"%s-invoice-%s\" % (settings.SITE_TITLE, order.id))\n response[\"Content-Disposition\"] = \"attachment; filename=%s.pdf\" % name\n\n # with open('test.pdf', 'wb') as fp:\n # fp.write(output)\n\n return response\n return TemplateResponse(request, template, context)\n\ndef certificate(request, user_id, template=\"accounts/certificate.html\",\n template_pdf=\"accounts/certificate_pdf.html\", extra_context=None):\n \"\"\"\n \n \"\"\"\n user = request.user\n context = {'user': user}\n context = RequestContext(request, context)\n if request.GET.get(\"format\") == \"pdf\":\n html = get_template(template_pdf).render(context)\n c = pdf.HTMLToPDFConverter(path_to_bin='/usr/local/bin/wkhtmltopdf')\n output = c.convert(html, orientation='landscape')\n response = HttpResponse(output,content_type=\"application/pdf\")\n name = slugify(\"%s-%s\" % (settings.SITE_TITLE, 'Certificate'))\n response[\"Content-Disposition\"] = \"attachment; filename=%s.pdf\" % name\n return response\n return TemplateResponse(request, template, context)\n\n\ndef example_ticket(request, event_id, template=\"shop/order_ticket.html\",\n template_pdf=\"accounts/certificate_pdf.html\", extra_context=None):\n \"\"\"\n Display a plain text invoice for the given order. The order must\n belong to the user which is checked via session or ID if\n authenticated, or if the current user is staff.\n \"\"\"\n # try:\n # order = Order.objects.get_for_user(order_id, request)\n # except Order.DoesNotExist:\n # raise Http404\n # context = {\"order\": order}\n # context.update(order.details_as_dict())\n # context.update(extra_context or {})\n event = EventPage.objects.get(id=event_id)\n\n site = Site.objects.get_current()\n site_settings = Site_setting.objects.get(site_id=current_site_id())\n attendee_info = {\n \"name\": 'Example Name',\n \"country\": 'XX'\n }\n reference = \"{}-{}-{}\".format(\n event.title.upper().replace(\" \", \"\"), \n 'XX',\n 'XX'\n )\n\n signature = mark_safe(site_settings.document_signature)\n company_logo = site_settings.main_logo\n site_static = \"https://{}{}\".format(site.domain,settings.MEDIA_URL)\n context = {\n \"site_static\": site_static,\n \"event\": event,\n \"attendee\": attendee_info,\n #\"order\": order,\n \"reference\":reference,\n \"signature\": signature,\n \"company_logo\": company_logo,\n }\n #context.update(order.details_as_dict())\n #context.update(extra_context or {})\n return TemplateResponse(request, template, context)\n\n\n \n\n#################################\n#### create admin order/invoice\n#################################\n\n#override default \"resend_order_email\" from cartridge\n\n@login_required\ndef invoice_resend_email(request, order_id):\n \"\"\"\n Re-sends the order complete email for the given order and redirects\n to the previous page.\n \"\"\"\n try:\n order = Order.objects.get_for_user(order_id, request)\n except Order.DoesNotExist:\n raise Http404\n if request.method == \"POST\":\n if EventAttendee.objects.filter(order=order).exists():\n event_details = []\n attendees = EventAttendee.objects.filter(order=order)\n site_settings = Site_setting.objects.get(site_id=current_site_id())\n for attendee in attendees:\n details = {\n \"event\": attendee.event,\n \"attendees\": [{\n 'name': attendee.name,\n 'country': attendee.country,\n 'email': attendee.email,\n 'id': attendee.id\n }],\n \"site_settings\": site_settings,\n }\n event_details.append(details)\n else:\n event_details = False\n user = User.objects.get(id=order.user_id)\n send_confirmation_email(order, user, event_details) \n msg = _(\"The order email for order ID %s has been re-sent\" % order_id)\n info(request, msg)\n # Determine the URL to return the user to.\n redirect_to = next_url(request)\n if redirect_to is None:\n if request.user.is_staff:\n redirect_to = reverse(\"admin:shop_order_change\", args=[order_id])\n else:\n redirect_to = reverse(\"shop_order_history\")\n return redirect(redirect_to)\n\n\n\n\n###################################################\n# GWA WINNERS\n####################################################\n\ndef gwa_winners(request):\n \"\"\"\n returns all gwa winner entries.\n if user is a member (standard, pro, emeritus), return \"is_member\" True, \n if is non member but they have downloads associated with their profile,\n return owns_downloads True\n \"\"\"\n winners = GwaWinner.objects.all()\n try:\n\n if request.user.profile.membertype == 'SM' or \\\n request.user.profile.membertype == 'PM' or \\\n request.user.profile.membertype == 'ME' or \\\n request.user.profile.membertype == 'AM' or \\\n request.user.is_staff:\n is_member = True\n owned_dls = False\n else:\n is_member = False\n if request.user.profile.purchased_downloads.all().exists():\n owned_dls = request.user.profile. \\\n purchased_downloads.all().values_list('product', flat=True)\n else:\n owned_dls = False\n\n except:\n is_member= False\n owned_dls = False\n\n\n c = {'winners': winners, 'is_member': is_member, 'owned_dls': owned_dls}\n return render(request, 'pages/golden-world-awards/winners.html', c)\n\ndef gwa_winners_updated(request):\n \"\"\"\n returns all gwa winner entries.\n if user is a member (standard, pro, emeritus), return \"is_member\" True, \n if is non member but they have downloads associated with their profile,\n return owns_downloads True\n \"\"\"\n winners = GwaWinner.objects.all()\n try:\n\n if request.user.profile.membertype == 'SM' or \\\n request.user.profile.membertype == 'PM' or \\\n request.user.profile.membertype == 'ME' or \\\n request.user.is_staff:\n is_member = True\n owned_dls = False\n else:\n is_member = False\n if request.user.profile.purchased_downloads.all().exists():\n owned_dls = request.user.profile. \\\n purchased_downloads.all().values_list('product', flat=True)\n else:\n owned_dls = False\n\n except:\n is_member= False\n owned_dls = False\n\n\n c = {'winners': winners, 'is_member': is_member, 'owned_dls': owned_dls}\n return render(request, 'pages/golden-world-awards/winners-updated.html', c)\n\n\nfrom itertools import chain\n@never_cache\n@staff_member_required\ndef mega_search(request, template=\"admin/mega_search_results.html\", extra_context=None):\n \"\"\"\n Display search results. Takes an optional \"contenttype\" GET parameter\n in the form \"app-name.ModelName\" to limit search results to a single model.\n \"\"\"\n query = request.GET.get(\"q\", \"\")\n page = request.GET.get(\"page\", 1)\n #per_page = settings.SEARCH_PER_PAGE\n per_page = 1000\n max_paging_links = settings.MAX_PAGING_LINKS\n\n search_type = 'Users'\n user_results = User.objects.filter(Q(id__icontains=query) |\n Q(username__icontains=query) |\n Q(email__icontains=query) |\n Q(profile__emailalternative__icontains=query) |\n Q(profile__emailwork__icontains=query) |\n Q(profile__emailpay__icontains=query) |\n Q(first_name__icontains=query) |\n Q(last_name__icontains=query) |\n Q(profile__phone__icontains=query) |\n Q(profile__organisation__icontains=query) |\n Q(profile__job_title__icontains=query) |\n Q(profile__website__icontains=query) |\n Q(profile__bio__icontains=query) |\n Q(profile__city__icontains=query) |\n Q(profile__nationality__icontains=query) |\n Q(profile__county__icontains=query) |\n Q(profile__zippostcode__icontains=query) |\n Q(profile__country__icontains=query) |\n Q(profile__region__icontains=query) |\n Q(profile__membertype__icontains=query) |\n Q(profile__notes__icontains=query)\n ).exclude(is_active=False)\n inactive_user_results = User.objects.filter(Q(id__icontains=query) |\n Q(username__icontains=query) |\n Q(email__icontains=query) |\n Q(profile__emailalternative__icontains=query) |\n Q(profile__emailwork__icontains=query) |\n Q(profile__emailpay__icontains=query) |\n Q(first_name__icontains=query) |\n Q(last_name__icontains=query) |\n Q(profile__phone__icontains=query) |\n Q(profile__organisation__icontains=query) |\n Q(profile__job_title__icontains=query) |\n Q(profile__website__icontains=query) |\n Q(profile__bio__icontains=query) |\n Q(profile__city__icontains=query) |\n Q(profile__nationality__icontains=query) |\n Q(profile__county__icontains=query) |\n Q(profile__zippostcode__icontains=query) |\n Q(profile__country__icontains=query) |\n Q(profile__region__icontains=query) |\n Q(profile__membertype__icontains=query) |\n Q(profile__notes__icontains=query)\n ).exclude(is_active=True)\n emailgroup_results = EmailGroup.objects.filter(Q(id__icontains=query) |\n Q(title__icontains=query) \n )\n emailaddress_results = EmailAddress.objects.filter(Q(id__icontains=query) |\n Q(email__icontains=query) |\n Q(first_name__icontains=query) |\n Q(last_name__icontains=query) |\n Q(organisation__icontains=query) |\n Q(job_title__icontains=query) |\n Q(country__icontains=query) |\n Q(source__icontains=query) |\n Q(join_date__icontains=query)\n )\n board_results = BoardMember.objects.filter(Q(id__icontains=query) |\n Q(name__icontains=query) |\n Q(bio__icontains=query)\n )\n itl_author_results = Itle_author.objects.filter(Q(id__icontains=query) |\n Q(name__icontains=query) |\n Q(bio__icontains=query) |\n Q(email__icontains=query) |\n Q(website__icontains=query)\n )\n itl_results = Itle.objects.filter(Q(id__icontains=query) |\n Q(title__icontains=query) |\n Q(subtext__icontains=query) |\n Q(content__icontains=query) \n )\n gwa_winner_results = GwaWinner.objects.filter(Q(id__icontains=query) |\n Q(category__icontains=query) |\n Q(pr_agency__icontains=query) |\n Q(title__icontains=query) |\n Q(company__icontains=query) |\n Q(special_prize__icontains=query) \n )\n order_results = Order.objects.filter(Q(id__icontains=query) |\n Q(billing_detail_first_name__icontains=query) |\n Q(billing_detail_last_name__icontains=query) |\n Q(billing_detail_street__icontains=query) |\n Q(billing_detail_city__icontains=query) |\n Q(billing_detail_state__icontains=query) |\n Q(billing_detail_postcode__icontains=query) |\n Q(billing_detail_country__icontains=query) |\n Q(billing_detail_phone__icontains=query) |\n Q(billing_detail_email__icontains=query) |\n Q(shipping_detail_first_name__icontains=query) |\n Q(shipping_detail_last_name__icontains=query) |\n Q(shipping_detail_street__icontains=query) |\n Q(shipping_detail_city__icontains=query) |\n Q(shipping_detail_state__icontains=query) |\n Q(shipping_detail_postcode__icontains=query) |\n Q(shipping_detail_country__icontains=query) |\n Q(shipping_detail_phone__icontains=query) |\n Q(additional_instructions__icontains=query) |\n Q(callback_uuid__icontains=query) |\n Q(product_type__icontains=query) |\n Q(username__icontains=query) |\n Q(user_email__icontains=query) |\n Q(user_first_name__icontains=query) |\n Q(user_last_name__icontains=query) |\n Q(invoice_number__icontains=query) |\n Q(organisation__icontains=query) |\n Q(items__description__icontains=query) \n )\n product_results = Product.objects.filter(Q(id__icontains=query) |\n Q(title__icontains=query) |\n Q(slug__icontains=query) \n )\n blog_results = BlogPost.objects.filter(Q(id__icontains=query) |\n Q(title__icontains=query) |\n Q(slug__icontains=query) \n )\n\n\n result_list = list(chain(\n user_results,\n inactive_user_results,\n emailgroup_results,\n emailaddress_results,\n board_results,\n itl_author_results,\n itl_results,\n gwa_winner_results,\n order_results,\n product_results,\n blog_results\n )\n )\n paginated = paginate(result_list, page, per_page, max_paging_links)\n context = {\"query\": query, \"results\": paginated,\n \"search_type\": search_type}\n context.update(extra_context or {})\n return TemplateResponse(request, template, context)\n\n\n@never_cache\ndef serve_private_file(request, path):\n \"\"\"Hooks into cartridge_downloads to update Purchase info\n eg download count. Also uses the x-sendfile header to\n restrict access to private files\"\"\"\n if not request.user.is_staff:\n # not staff, try to get membertype\n try:\n mt = request.user.profile.membertype\n except:\n mt = False\n # is a paid member allow\n if mt == 'PM' or mt =='SM':\n allow = True \n elif mt == 'CU':\n orders = Order.objects.filter(user_id=request.user.id).values_list('id', flat=True)\n if Purchase.objects.filter(download__file=path, order__in=orders).exists():\n purchase = Purchase.objects.filter(download__file=path, order_id__in=orders)[0]\n # check if download has reached limit\n if purchase.download_count >= purchase.download_limit:\n messages.add_message(\n request,\n messages.ERROR,\n 'Download Limit Exceeded. Please contact us for assistance.')\n return redirect(request.META.get('HTTP_REFERER', '/'))\n # Otherwise proceed with download.\n else:\n purchase.download_count = F('download_count') + 1\n purchase.save()\n allow = True\n # all faild, disallow\n else:\n allow = False\n else:\n # is staff\n allow = True\n if allow:\n # get full file path\n full_file_path = os.path.join(settings.PRIVATE_MEDIA_ROOT,\n path.lstrip(os.path.sep))\n if os.path.isfile(full_file_path):\n # file exists\n if settings.DEBUG:\n response = HttpResponse(open(full_file_path, 'rb'))\n else:\n response = HttpResponse(full_file_path)\n response['X-Sendfile'] = full_file_path\n response['Content-Length'] = os.path.getsize(full_file_path)\n response['Content-Type'] = 'application/%s' % (os.path.splitext(path)[1].lower)\n response['Content-Disposition'] = 'inline; filename=%s' % (path)\n response[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\" # HTTP 1.1.\n response[\"Pragma\"] = \"no-cache\" # HTTP 1.0.\n response[\"Expires\"] = \"0\" # Proxies.\n return response\n else:\n raise Http404()\n else:\n raise Http404()\n \n\n# https://docs.djangoproject.com/en/stable/topics/logging/#django-request\n# logger = logging.getLogger('django.request')\ndef download_index(request):\n \"Display users available purchased downloads and/or members package\"\n if request.user.is_authenticated():\n if request.user.is_staff:\n allow = True\n show_members_package = True\n user_downloads = []\n elif request.user.profile.membertype in ['SM', 'PM']:\n allow = True\n show_members_package = True\n user_downloads = []\n elif Order.objects.filter(user_id=request.user.id).exists():\n dl_orders = Order.objects.filter(user_id=request.user.id).values_list('id', flat=True)\n user_downloads = [\n acq.page for acq in\n Purchase.objects.filter(order_id__in=dl_orders)\n ]\n allow = True\n show_members_package = False\n else:\n allow = False\n show_members_package = False\n user_downloads = []\n if allow:\n return render(request,\n 'shop/downloads/index.html',\n {'user_downloads': user_downloads,\n 'show_members_package': show_members_package})\n raise Http404()\n\ndef renew_membership(request):\n if request.user.is_authenticated():\n try:\n mt = request.user.profile.membertype\n except:\n mt = False\n\n if mt == 'PM' or mt =='SM':\n expires = request.user.profile.expires\n if mt == 'SM':\n product_type = Product.objects.filter(title='IPRA Standard Membership').latest('date_added')\n if mt == 'PM':\n product_type = Product.objects.filter(title='IPRA Professional Membership').latest('date_added')\n c = {'expires': expires,\n 'product_type': product_type}\n return render(request, 'pages/member-services/renew-membership.html', c)\n\n raise Http404()\n\n\ndef documentation_index(request):\n if request.user.is_staff:\n context = {'doc_entries': DocumentationSubject.objects.all(),\n 'entry': False}\n return TemplateResponse(request,\n \"admin/documentation-index.html\", context)\n raise Http404()\n\ndef documentation_entry(request, slug):\n if request.user.is_staff:\n entry = DocumentationEntry.objects.get(slug=slug)\n context = {'doc_entry': entry,\n 'doc_entries': DocumentationSubject.objects.all()}\n return TemplateResponse(request,\n \"admin/documentation-index.html\", context)\n raise Http404\n\n\n\n##########\n# Events\n##########\n\n@never_cache\n@staff_member_required\ndef events(request):\n events = EventPage.objects.all()\n \n context = {\"events\": events}\n \n return render(request, 'admin/events.html', context)\n\nimport xlwt\nfrom io import BytesIO\ndef event_attendee_export(request, event_id):\n event = EventPage.objects.get(id=event_id)\n attendees = EventAttendee.objects.filter(event=event)\n\n filename = \"%s-%s\" % (settings.SITE_TITLE, event.title)[:31]\n \n response = HttpResponse(content_type='application/ms-excel')\n response[\"Content-Disposition\"] = \"attachment; filename=%s.xls\" % filename\n\n wb = xlwt.Workbook(encoding='utf-8')\n ws = wb.add_sheet(filename)\n\n row_num = 0\n\n columns = [\n (u'Name', 5000),\n (u'Email', 5000),\n (u'Country', 5000),\n (u'Entry', 5000),\n (u'Order ID', 5000),\n ]\n\n font_style = xlwt.XFStyle()\n font_style.font.bold = True\n\n for col_num in range(len(columns)):\n ws.write(row_num, col_num, columns[col_num][0], font_style)\n # set column width\n ws.col(col_num).width = columns[col_num][1]\n\n font_style = xlwt.XFStyle()\n font_style.alignment.wrap = 1\n borders = xlwt.Borders()\n borders.bottom = xlwt.Borders.THIN\n borders.right = xlwt.Borders.THIN\n #borders.bottom = xlwt.Style.colour_map['white']\n font_style.borders = borders\n\n # add new colour to palette and set RGB colour value\n xlwt.add_palette_colour(\"custom_paid\", 0x21)\n wb.set_colour_RGB(0x21, 187, 224, 168)\n xlwt.add_palette_colour(\"custom_pending\", 0x22)\n wb.set_colour_RGB(0x22, 224, 168, 168)\n xlwt.add_palette_colour(\"custom_abandoned\", 0x23)\n wb.set_colour_RGB(0x23, 193, 193, 193)\n xlwt.add_palette_colour(\"custom_none\", 0x23)\n wb.set_colour_RGB(0x24, 255, 255, 255)\n\n # now you can use the colour in styles\n #sheet1 = book.add_sheet('Sheet 1')\n #style = xlwt.easyxf('pattern: pattern solid, fore_colour custom_colour')\n #sheet1.write(0, 0, 'Some text', style)\n\n \n \n\n for attendee in attendees:\n try:\n # row coloring\n pattern = xlwt.Pattern()\n pattern.pattern = xlwt.Pattern.SOLID_PATTERN\n \n if attendee.order:\n if attendee.order.paid == False:\n pattern.pattern_fore_colour = xlwt.Style.colour_map['custom_pending']\n font_style.pattern = pattern\n elif attendee.order.abandoned and not attendee.order.paid:\n pattern.pattern_fore_colour = xlwt.Style.colour_map['custom_abandoned']\n font_style.pattern = pattern\n else:\n pattern.pattern_fore_colour = xlwt.Style.colour_map['custom_paid']\n font_style.pattern = pattern\n else:\n pattern.pattern_fore_colour = xlwt.Style.colour_map['custom_none']\n font_style.pattern = pattern\n\n \n\n row_num += 1\n\n if attendee.entry:\n attendee_entry = attendee.entry.title\n else:\n attendee_entry = None\n if attendee.order:\n attendee_order = attendee.order.id\n else:\n attendee_order = None\n\n row = [\n str(attendee.name),\n str(attendee.email),\n str(attendee.country.name),\n str(attendee_entry),\n str(attendee_order)\n ]\n for col_num in range(len(row)):\n ws.write(row_num, col_num, row[col_num], font_style)\n except Exception as e:\n raise Exception(e)\n # if as_attachment:\n # if as_attachment:\n # xlsfile = BytesIO()\n # wb.save(xlsfile)\n # return xlsfile\n # else:\n wb.save(response)\n return response\n\n\n\n # order_report = generate_pdf_report(orders,date_range)\n # response = HttpResponse(order_report,content_type=\"application/pdf\")\n # filename = \"%s-report-%s\" % (settings.SITE_TITLE, date_range)\n # response[\"Content-Disposition\"] = \"attachment; filename=%s.pdf\" % filename\n # return response\n# def event_detail(request, event_id):\n# event = EventPage.objects.get(id=event_id)\n# if request.POST.get(\"eventsave\", \"\"):\n# form = EventPageForm(request.POST, request.FILES, instance=event)\n# if form.is_valid():\n# banner = form.cleaned_data['banner_image']\n# form.save()\n# context = {\n# \"event\": event,\n# \"form\": form\n# }\n# return render(request, 'admin/event_detail.html', context) \n# form = EventPageForm(instance=event)\n# context = {\n# \"event\": event,\n# \"form\": form\n# }\n# return render(request, 'admin/event_detail.html', context)\n\n# def event_attendees(request, event_id):\n# event = EventPage.objects.get(id=event_id)\n# attendees = event.attendee_event.all().order_by('order')\n# form = EventPageForm(instance=event)\n# context = {\n# \"event\": event,\n# \"attendees\": attendees,\n# \"form\": form\n# }\n \n# return render(request, 'admin/event_attendees.html', context)\n\n\n############\n#link butt\n#####\n@never_cache\ndef editor_link_handler(request):\n links = []\n sorted_links = sorted(links, key=lambda link: (link[0], link[1]['value']))\n return HttpResponse(dumps([link[1] for link in sorted_links]))\n\n\n\n########################\n## temporary view to pass session data to emailaddressadmin from emailmessageadmin\n##########################\nfrom django_sendgrid.models import EmailMessage\ndef filter_emailaddresses_by_id(request, emailmessage_id, event_name):\n sg_events = EmailMessage.objects.get(id=emailmessage_id).event_set.all().values_list(\"email\", \"event_type__name\", \"last_modified_time\")\n count = len(sg_events)\n a = []\n if count >= 1:\n all_emails = EmailAddress.objects.all().values_list(\"email\", \"id\")\n for event in sg_events:\n if event[0] in (i[0] for i in all_emails):\n email_id = [x for x in all_emails if event[0] in x][0][1]\n if event[0] not in (i[0] for i in a):\n a.append([event[0], event[1], event[2], email_id])\n else:\n x = [x for x in a if event[0] in x][0]\n\n if x[2] > event[2]:\n a[a.index(x)][1] = event[1]\n a[a.index(x)][2] = event[2]\n a[a.index(x)][3] = email_id\n out_ids = []\n for e in a:\n if e[1]== event_name:\n out_ids.append(e[3])\n request.session[\"filter_by_id\"] = json.dumps(out_ids)\n return redirect(settings.ADMIN_URL+'/my_cms/emailaddress/')","sub_path":"my_cms/my_cms/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":88276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}
+{"seq_id":"205910089","text":"import logging\n\nfrom .models import Log as model\n\n\nclass DBHandler(logging.Handler):\n\n def __init__(self):\n logging.Handler.__init__(self)\n\n def emit(self, record):\n log_entry = model(user=None,\n action=record.levelname,\n extra={'pathname': record.pathname,\n 'lineno': record.lineno,\n 'message': record.getMessage()})\n log_entry.save()\n","sub_path":"pinax/eventlog/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}
+{"seq_id":"608690936","text":"#!/usr/bin/python3\n#$ -P P_comet\n#$ -j y\n#$ -cwd\n#$ -M m.dubouchet18@imperial.ac.uk\n#$ -m be\n#$ -q mc_gpu_long\n#$ -pe multicores_gpu 4\n#$ -l sps=1,GPU=1,GPUtype=V100\nimport os\nimport sys\nsys.path.append(os.getcwd())\nimport argparse\nimport os\nimport torch\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchsummary\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\nparser = argparse.ArgumentParser('Train CDC GAN', argparse.SUPPRESS)\nparser.add_argument('--ngf', type=int)\nparser.add_argument('--ndf', type=int)\nparser.add_argument('--latent-dims', type=int, default=256)\nparser.add_argument('--sequence-length', type=int, default=2048)\nparser.add_argument('--job-id', type=int)\nparser.add_argument('--epoch', type=int)\nparser.add_argument('--net-version', type=int)\nparser.add_argument('--seed', type=int, default=1337)\nparser.add_argument('--gfx', type=bool, default=False)\nargs = parser.parse_args()\noutput_dir = 'output_%d/' % (args.job_id)\n\nimport glob\ndef find_last_epoch():\n last_epoch = 0\n for save_file in glob.glob(output_dir+'states_*.pt'):\n idx = int(save_file.split('/')[1].split('_')[1].split('.')[0])\n if idx > last_epoch:\n last_epoch = idx\n return last_epoch\n\nif args.epoch is None:\n args.epoch = find_last_epoch()\nif args.ndf is None or args.ngf is None:\n log_file = open(output_dir+'output.log', 'rt')\n contents = log_file.read()\n log_file.close()\n g_s = contents.rfind(\"ngf=\")\n g_s = contents.find('=', g_s) + 1\n g_e = contents.find('\\n', g_s)\n if g_s == -1:\n print(\"Couldn't find ngf\")\n exit(1)\n args.ngf = int(contents[g_s:g_e])\n d_s = contents.rfind(\"ndf=\")\n d_s = contents.find('=', d_s) + 1\n d_e = contents.find('\\n', d_s)\n if d_s == -1:\n print(\"Couldn't find ndf\")\n exit(1)\n args.ndf = int(contents[d_s:d_e])\nif args.net_version is None:\n log_file = open(output_dir+'output.log', 'rt')\n contents = log_file.read()\n log_file.close()\n n_s = contents.rfind('networks=')\n if n_s == -1:\n print(\"Couldn't find networks version\")\n exit(1)\n n_s = contents.find('=', n_s) + 1\n n_e = contents.find('\\n', n_s)\n args.net_version = int(contents[n_s:n_e])\n\n\nprint('Evaluating job %d in %s at epoch %d' % (args.job_id, output_dir, args.epoch))\n\nngf = args.ngf\nndf = args.ndf\nprint('ngf:', ngf)\nprint('ndf:', ndf)\nlatent_dims = args.latent_dims\nseq_len = args.sequence_length\n\ndef to_device(x):\n if torch.cuda.is_available():\n return x.cuda()\n else:\n return x\n\ntorch.manual_seed(args.seed)\nnp.random.seed(args.seed)\n\nif torch.cuda.is_available():\n torch.cuda.manual_seed(args.seed)\n\nimport dataset\ndata = dataset.Data()\n\nimport geom_util\ngu = geom_util.GeomUtil(data.get_cdc_tree())\n\nimport importlib\nnetworks = importlib.import_module('networks%s' % (args.net_version))\nprint('Evaluating with networks version %d' % (networks.__version__))\n# Initialize networks\ngen = to_device(networks.Gen(ngf=ngf, latent_dims=latent_dims, seq_len=seq_len))\ndisc = to_device(networks.Disc(ndf=ndf, seq_len=seq_len))\nprint('Gen summary:')\ntorchsummary.summary(gen, input_size=(latent_dims,))\nprint('Disc summary:')\ntorchsummary.summary(disc, input_size=[(3, seq_len), (2, seq_len)])\n\noptimizer_gen = torch.optim.Adam(gen.parameters(), lr=1e-4, betas=(0.5, 0.999))\noptimizer_disc = torch.optim.Adam(disc.parameters(), lr=1e-4, betas=(0.5, 0.999))\ndisciminator_losses = []\ngenerator_losses = []\noccupancy_losses = []\ngradient_penalty = []\ntau = 0\nn_epochs = 0\n\n# Load network states\ndef load_states(path):\n print('Loading GAN states from %s...' % (path))\n device = torch.device('cpu')\n if torch.cuda.is_available():\n device = torch.device('cuda')\n states = torch.load(path, map_location=device)\n disc.load_state_dict(states['disc'])\n optimizer_disc.load_state_dict(states['d_opt'])\n global discriminator_losses\n discriminator_losses = states['d_loss']\n gen.load_state_dict(states['gen'])\n optimizer_gen.load_state_dict(states['g_opt'])\n global generator_losses\n generator_losses = states['g_loss']\n global tau\n tau = states['tau']\n global n_epochs\n n_epochs = states['n_epochs']\n global data\n data.qt = states['qt']\n data.minmax = states['minmax']\n global occupancy_losses\n occupancy_losses = states['occupancy_loss']\n global gradient_penalty\n if 'gradient_penalty' in states:\n gradient_penalty = states['gradient_penalty']\n print('OK')\nload_states('output_%d/states_%d.pt' % (args.job_id, args.epoch))\n\ngen.eval()\n\ndef sample_fake(batch_size, tau):\n noise = to_device(torch.randn((batch_size, latent_dims), requires_grad=True))\n sample = gen(noise, 0.0, tau)\n return sample\n\nprint('tau:', tau)\np, w = sample_fake(1, tau)\nprint(p.shape, w.shape)\n\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nplt.rcParams['savefig.bbox'] = 'tight'\nplt.rcParams['savefig.transparent'] = False\nplt.rcParams['axes.labelsize'] = 'large'\nplt.rcParams['axes.titlesize'] = 'x-large'\nplt.rcParams['savefig.facecolor'] = 'white'\n\ninv_p = data.inv_preprocess(p.permute(0, 2, 1).flatten(0, 1))\nprint(inv_p.shape)\n\n# Load in the training data for comparisons\nprint(\"Loading training data\")\ndata.load()\nprint(\"OK\")\n###########\n\nfrom matplotlib.patches import Ellipse\ndef only_walls(ax):\n # Draw walls\n inner = Ellipse((0, 0), 488*2, 488*2, facecolor=(0, 0, 0, 0), edgecolor='gray')\n outer = Ellipse((0, 0), 786*2, 786*2, facecolor=(0, 0, 0, 0), edgecolor='gray')\n\n ax.add_patch(inner)\n ax.add_patch(outer);\n \n ax.set(xlim=(-800,800), ylim=(-800,800), xlabel='x [mm]', ylabel='y [mm]')\n\nfw = torch.argmax(w, dim=1).flatten().detach().cpu()\nprint(fw.shape)\nplt.figure(figsize=(6,6))\nplt.scatter(gu.wire_x[fw], gu.wire_y[fw], s=inv_p[:,0] * 1e3, c=inv_p[:,2], cmap='inferno',\n vmin=data.doca.min(), vmax=data.doca.max()\n )\n# Draw lines between consecutive hits\nimport matplotlib.lines as lines\nscatter_l = lines.Line2D(gu.wire_x[fw], gu.wire_y[fw], linewidth=0.1, color='gray', alpha=0.7)\nax = plt.gca()\nax.set_aspect(1.0)\nax.add_line(scatter_l)\nonly_walls(ax)\nplt.savefig(output_dir+'gen_scatter.png', dpi=240)\nplt.close()\n\nx = gu.wire_x[fw]\ny = gu.wire_y[fw]\nfake_dist = np.sqrt((x[1:] - x[:-1])**2 + (y[1:] - y[:-1])**2)\n\nplt.figure()\nplt.hist(np.log10(inv_p[:,0]).cpu(), bins=50)\nplt.savefig(output_dir+'gen_edep.png', dpi=120)\nplt.close()\n\n\n\n\n\n# Features and correlations\nn_feat = 3\nfig, axes = plt.subplots(n_feat, n_feat, figsize=(16,16))\naxis_labels = [r'log($E$)', r'log($t$)', 'dca']\naxis_ranges = [[np.log10(data.edep).min(), np.log10(data.edep).max()], \n [np.log10(data.t).min(), np.log10(data.t).max()], [data.doca.min(), data.doca.max()]]\nn_seqs = 8\nn_its = data.edep.size // (n_seqs * seq_len)\nn_elements = n_its * n_seqs * seq_len\ninv_p = np.zeros((n_feat, n_elements))\n\nfor i in range(n_its):\n gen.eval()\n fake_p, fake_w = sample_fake(n_seqs, tau)\n fake_p = fake_p.permute(0, 2, 1).flatten(0, 1)\n _inv_p = data.inv_preprocess(fake_p).cpu().numpy().T\n\n inv_p[0, i*n_seqs*seq_len:(i+1)*n_seqs*seq_len] = np.log10(_inv_p[0])\n inv_p[1, i*n_seqs*seq_len:(i+1)*n_seqs*seq_len] = np.log10(_inv_p[1])\n inv_p[2, i*n_seqs*seq_len:(i+1)*n_seqs*seq_len] = _inv_p[2]\n\nfor i in range(n_feat):\n for j in range(n_feat):\n ax = axes[i][j]\n if j == 0:\n ax.set(ylabel=axis_labels[i])\n if i == n_feat - 1:\n ax.set(xlabel=axis_labels[j])\n \n if j == i:\n ax.hist(inv_p[i], bins=50, range=axis_ranges[i], color='orange')\n else:\n _x = inv_p[j]#.flatten().cpu().detach().numpy()\n _y = inv_p[i]#.flatten().cpu().detach().numpy()\n \n if i > j:\n ax.hist2d(_x, _y, bins=50, range=[axis_ranges[j], axis_ranges[i]])\n else:\n ax.remove()\n # ax.hist2d(_x, _y, bins=50, range=[[-1, 1], [-1, 1]], norm=mcolors.PowerNorm(0.5))\nplt.savefig(output_dir+'feature_matrix_fake.png', dpi=240, bbox_inches='tight')\nplt.close()\n\n#############\n# Scatterplot comparison\np, w = sample_fake(1, tau)\n\nfw = torch.argmax(w, dim=1).flatten().detach().cpu()\ninv_p = data.inv_preprocess(p.permute(0, 2, 1).flatten(0, 1))\nfig, ax = plt.subplots(1, 2, figsize=(12, 6))\nax[0].set_title('GAN')\nax[0].scatter(gu.wire_x[fw], gu.wire_y[fw], s=inv_p[:,0] * 1e3, c=inv_p[:,2], cmap='inferno',\n vmin=data.doca.min(), vmax=data.doca.max())\nax[0].set_aspect(1.0)\nonly_walls(ax[0])\n\nfirst_idx = np.random.randint(0, data.dbg_z.size - seq_len)\nlast_idx = first_idx + seq_len\nax[1].set_title('G4')\nax[1].scatter(data.dbg_z[first_idx:last_idx]-7650, data.dbg_y[first_idx:last_idx], s=data.edep[first_idx:last_idx]*1e3, c=data.doca[first_idx:last_idx], cmap='inferno',\n vmin=data.doca.min(), vmax=data.doca.max())\nax[1].set_aspect(1.0)\nonly_walls(ax[1])\nplt.savefig(output_dir+'comp_scatter.png', dpi=120)\nplt.close()\n\nplt.figure(figsize=(6,6))\nplt.scatter(data.dbg_z[first_idx:last_idx]-7650, data.dbg_y[first_idx:last_idx], s=data.edep[first_idx:last_idx]*1e3, c=data.doca[first_idx:last_idx], cmap='inferno',\n vmin=data.doca.min(), vmax=data.doca.max())\nscatter_l = lines.Line2D(data.dbg_z[first_idx:last_idx]-7650, data.dbg_y[first_idx:last_idx],\n linewidth=0.1, color='gray', alpha=0.7)\nax = plt.gca()\nax.set_aspect(1.0)\nax.add_line(scatter_l)\nplt.savefig(output_dir+'real_scatter.png', dpi=240)\nplt.close()\n\n# Distance distribution comparison\nplt.figure()\n# Real\nx = data.dbg_z[first_idx:last_idx]-7650\ny = data.dbg_y[first_idx:last_idx]\nreal_dist = np.sqrt((x[1:] - x[:-1])**2 + (y[1:] - y[:-1])**2)\nplt.hist(real_dist[real_dist!=0], bins=50, alpha=0.7);\nplt.hist(fake_dist[fake_dist!=0], bins=50, alpha=0.7);\nplt.savefig(output_dir+'comp_dist.png', dpi=120)\nplt.close()\n\n# Time difference distribution comparison\nplt.figure()\nt = data.t[first_idx:last_idx]\nt_diff = t[1:] - t[:-1]\nplt.hist(t_diff, bins=50, alpha=0.7)\nplt.savefig(output_dir+'comp_time_diff.png', dpi=120)\n\n\n\n##########\n# Feature histogram comparison\np, w = sample_fake(16, tau)\n\nfw = torch.argmax(w, dim=1).flatten().detach().cpu()\ninv_p = data.inv_preprocess(p.permute(0, 2, 1).flatten(0, 1))\n\nplt.figure()\n_min = np.log10(data.edep).min()\n_max = np.log10(data.edep).max()\nplt.hist(np.log10(data.edep), bins=50, alpha=0.7, density=True, label='G4', range=[_min, _max])\nplt.hist(np.log10(inv_p[:,0].cpu()), bins=50, alpha=0.7, density=True,\n label='GAN', range=[_min, _max])\nplt.xlabel('log(Edep [MeV])')\nplt.legend()\nplt.savefig(output_dir+'comp_edep.png', dpi=120)\n\nplt.figure()\n_min = np.log10(data.t).min()\n_max = np.log10(data.t).max()\nplt.hist(np.log10(data.t), bins=50, alpha=0.7, density=True, label='G4', range=[_min, _max])\nplt.hist(np.log10(inv_p[:,1].cpu()), bins=50, alpha=0.7, density=True,\n label='GAN', range=[_min, _max])\nplt.xlabel('log(t [ns])')\nplt.legend()\nplt.savefig(output_dir+'comp_t.png', dpi=120)\n\nplt.figure()\n_min = data.doca.min()\n_max = data.doca.max()\nplt.hist(data.doca, bins=50, alpha=0.7, density=True, label='G4', range=[_min, _max])\nplt.hist(inv_p[:,2].cpu(), bins=50, alpha=0.7, density=True, label='GAN', range=[_min, _max])\nplt.xlabel('doca [mm]')\nplt.legend()\nplt.savefig(output_dir+'comp_doca.png', dpi=120)\n\n# Loss plots\nplt.figure()\nn_critic = len(discriminator_losses) // len(generator_losses)\nplt.plot(np.linspace(0, n_epochs, num=len(discriminator_losses)), discriminator_losses,\n label='Discriminator', alpha=0.7)\nplt.plot(np.linspace(0, n_epochs, num=len(generator_losses)), generator_losses, alpha=0.7,\n label='Generator')\nplt.ylabel('WGAN-GP loss')\nplt.xlabel('Epoch')\n#plt.ylim(-200, 200)\nplt.legend()\nplt.savefig(output_dir+'losses.png', dpi=120)\n\n# Occupancy loss plots\nplt.figure()\nplt.plot(np.linspace(0, n_epochs, num=len(occupancy_losses)), occupancy_losses, alpha=0.7)\nplt.ylabel('Occupancy loss')\nplt.xlabel('Epoch')\n#plt.ylim(-200, 200)\nplt.savefig(output_dir+'eval_occupancy_losses.png', dpi=120)\n\n# Critic score (-(D loss - GP))\nif len(gradient_penalty) > 0:\n plt.figure()\n plt.plot(np.linspace(0, n_epochs, num=len(discriminator_losses)), \n -np.subtract(discriminator_losses, gradient_penalty))\n plt.ylabel('Critic score')\n plt.xlabel('Epoch')\n plt.savefig(output_dir+'critic_score.png', dpi=120)\n\n# GP loss plot\nplt.figure()\nplt.plot(np.linspace(0, n_epochs, num=len(gradient_penalty)), gradient_penalty)\nplt.ylabel('Gradient penalty')\nplt.xlabel('Epoch')\nplt.savefig(output_dir+'gp.png', dpi=120)\n\n\n# Sample real sequences of 2048 hits from the training set.\n# Real data in evaluation is not pre-processed, so it does not need to be inv_preprocessed.\ndef sample_real(batch_size):\n idx = np.random.randint(0, data.edep.size - seq_len, size=(batch_size,))\n start = idx\n stop = idx + 2048\n slices = np.zeros((batch_size, 2048), dtype=np.int64)\n for i in range(batch_size):\n slices[i] = np.r_[start[i]:stop[i]] \n edep = data.edep[slices]\n t = data.t[slices]\n doca = data.doca[slices]\n w = data.wire[slices]\n #one_hot_w = F.one_hot(w, num_classes=gu.cum_n_wires[-1]).squeeze(1).permute(0, 2, 1)\n return np.array([edep, t, doca]), w\n\n_p, _w = sample_real(12)\n\nimport matplotlib.gridspec as gridspec\nfig = plt.figure(figsize=(12,12))\nn_samples=4\ngs1 = gridspec.GridSpec(n_samples, n_samples, figure=fig, wspace=0.025, hspace=0.05)\nfor i in range(n_samples):\n for j in range(n_samples):\n p, w = sample_real(1)\n p = p.squeeze()\n w = w.squeeze()\n #plt.title('G4')\n ax = plt.subplot(gs1[i*4 + j])\n only_walls(ax)\n ax.set_xlim(-800,800)\n ax.set_ylim(-800,800)\n ax.set_aspect(1)\n ax.scatter(gu.wire_x[w], gu.wire_y[w], s=p[0] * 1e2+0, alpha=0.7, c=p[2], cmap='inferno', vmin=data.doca.min(), vmax=data.doca.max())\n ax.axis('off')\nplt.savefig(output_dir+'grid_real.png', dpi=240)\n\n# Same for fake samples\nfig = plt.figure(figsize=(12,12))\nn_samples=4\ngs1 = gridspec.GridSpec(n_samples, n_samples, figure=fig, wspace=0.025, hspace=0.05)\ngs1.update(wspace=0.025, hspace=0.05) # set the spacing between axes. \nfor i in range(n_samples):\n for j in range(n_samples):\n p, w = sample_fake(1, tau)\n p = data.inv_preprocess(p.permute(0,2,1).flatten(0,1))\n p = p.squeeze().detach().cpu().T\n w = torch.argmax(w, dim=1).squeeze().detach().cpu()\n #plt.title('G4')\n ax = plt.subplot(gs1[i*4 + j])\n only_walls(ax)\n ax.set_xlim(-800,800)\n ax.set_ylim(-800,800)\n ax.set_aspect(1)\n ax.scatter(gu.wire_x[w], gu.wire_y[w], s=p[0] * 1e2+0, alpha=0.7, c=p[2], cmap='inferno', vmin=data.doca.min(), vmax=data.doca.max())\n ax.axis('off')\nplt.savefig(output_dir+'grid_fake.png', dpi=240)\n\n# Activated wires per sequence histogram\n\nn_fake_uq = []\nn_real_uq = []\nn_seq = 256\nplt.figure()\nfor i in range(n_seq):\n fake_p, fake_w = sample_fake(1, tau)\n real_p, real_w = sample_real(1)\n\n fw = torch.argmax(fake_w, dim=1).flatten().detach().cpu()\n rw = real_w.squeeze()\n\n n_fake_uq.append(np.unique(fw).size)\n n_real_uq.append(np.unique(rw).size)\n#print(n_fake_uq)\n\nplt.hist(n_real_uq, bins=50, alpha=0.7, label='G4', range=[0,800]);\nplt.hist(n_fake_uq, bins=50, alpha=0.7, label='GAN', range=[0, 800]);\n#plt.xlim(0, 1200)\nplt.legend()\nplt.title('Number of activated wires per sequence')\nplt.savefig(output_dir+'activated_wires.png', dpi=120)\n\n\n# Wire, Radius and Theta plots\nn_seqs = 8\nn = data.wire.size // seq_len // n_seqs\nfake_wire = np.zeros(n_seqs * seq_len * n, dtype=int)\nfake_edep = np.zeros(n_seqs * seq_len * n)\nfake_t = np.zeros(n_seqs * seq_len * n)\nfake_doca = np.zeros(n_seqs * seq_len * n)\nfor i in range(n):\n with torch.no_grad():\n gen.eval()\n \n latent_var = to_device(torch.randn((n_seqs, latent_dims)))\n p, w = gen(latent_var, 0, tau)\n fake_wire[i*seq_len*n_seqs:(i+1)*seq_len*n_seqs] = torch.argmax(w, dim=1).cpu().flatten()\n inv_p = data.inv_preprocess(p.permute(0,2,1).flatten(0,1)) \n fake_edep[i*seq_len*n_seqs:(i+1)*seq_len*n_seqs] = inv_p[:,0]\n fake_t[i*seq_len*n_seqs:(i+1)*seq_len*n_seqs] = inv_p[:,1]\n fake_doca[i*seq_len*n_seqs:(i+1)*seq_len*n_seqs] = inv_p[:,2]\n\nprint(data.wire.shape)\nprint(fake_wire.shape)\n\nplt.figure()\nplt.hist(data.wire, bins=200, alpha=0.7, density=True, label='G4');\nplt.hist(fake_wire, bins=200, alpha=0.7, density=True, label='GAN');\nplt.legend()\nplt.savefig(output_dir+'comp_wire.png', dpi=120)\n\n\nplt.figure()\nfake_radius = np.sqrt(gu.wire_x[fake_wire]**2 + gu.wire_y[fake_wire]**2)\nreal_radius = np.sqrt((data.dbg_z - 7650)**2 + data.dbg_y**2)\nfake_layer = (np.round((gu.n_layers-1) * (fake_radius - real_radius.min()) / (real_radius.max() - real_radius.min()))).astype(int)\nplt.hist(data.layer, bins=gu.n_layers, range=[0,gu.n_layers], rwidth=0.8, align='left',\n alpha=0.7, label='G4', density=True)\nplt.hist(fake_layer, bins=gu.n_layers, range=[0,gu.n_layers], rwidth=0.8, align='left',\n alpha=0.7, label='GAN', density=True)\nplt.legend()\nplt.xticks(np.arange(0, gu.n_layers))\nplt.savefig(output_dir+'comp_layer.png', dpi=120)\n#plt.figure()\n#plt.hist(real_radius, alpha=0.7, density=True, label='G4', bins=50)\n# #range=[real_radius.min(), real_radius.max()], label='G4')\n#plt.hist(fake_radius, alpha=0.7, density=True, label='GAN', bins=50);\n# #range=[real_radius.min(), real_radius.max()], label='GAN')\n#plt.legend()\n#plt.savefig(output_dir+'comp_radius.png', dpi=120)\n\nplt.figure()\nfake_theta = np.arctan2(gu.wire_y[fake_wire], gu.wire_x[fake_wire])\nreal_theta = np.arctan2(data.dbg_y, (data.dbg_z-7650))\nplt.hist(real_theta, bins=50, alpha=0.7, density=True, label='G4');\nplt.hist(fake_theta, bins=50, alpha=0.7, density=True, label='GAN');\nplt.legend()\nplt.savefig(output_dir+'comp_theta.png', dpi=120)\n\n# Edep per layer comparison\nplt.figure()\nfake_edep_pl = np.zeros(gu.n_layers)\nreal_edep_pl = np.zeros(gu.n_layers)\nfor i in range(gu.n_layers):\n fake_edep_pl[i] = fake_edep[fake_layer == i].sum() \n real_edep_pl[i] = data.edep[data.layer == i].sum()\n\nplt.bar(np.arange(0, gu.n_layers), real_edep_pl, alpha=0.7, width=0.8, label='G4')\nplt.bar(np.arange(0, gu.n_layers), fake_edep_pl, alpha=0.7, width=0.8, label='GAN')\nplt.xticks(np.arange(0, gu.n_layers))\nplt.legend()\nplt.savefig(output_dir+'comp_edep_per_layer.png', dpi=120)\n\n","sub_path":"eval5.py","file_name":"eval5.py","file_ext":"py","file_size_in_byte":18512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}
+{"seq_id":"578513547","text":"import math\npi = 3.14\nx = int(input(\"Введите x: \"))\na = int(input(\"Введите a: \"))\nv = str(input(\"Введите букву функции, которую xотите вычислить \"))\nF = 0\nG = 0\nY = 0\nif v == \"G\":\n G = -(8 * (12 * pow(a, 2) + 68 * a * x + 63 * pow(x, 2))) / (4 * pow(a, 2) + a * x - 5 * pow(x, 2))\n print(\"G =\", G)\nelif v == \"Y\":\n Y = -7 * pow(a, 2) + 40 * a * x + 63 * pow(x, 2) + 1\n print(\"Y =\", Y)\nelif v == \"F\":\n f_without_sin = int(pi * (40 * a ** 2 - 61 * a * x + 7 * x ** 2) / (pi * (40 * a ** 2 - 61 * a * x + 7 * x ** 2)))\n if f_without_sin:\n F = math.sin(f_without_sin)\n print(\"F =\", F)\n else:\n print('Не существует синуса к заданной функции')\nelse:\n print(\"Введите букву функции, которую xотите вычислить\")\n","sub_path":"lab2/lab2.py","file_name":"lab2.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}
+{"seq_id":"287977946","text":"import os.path\nimport subprocess\n\nfrom click.testing import CliRunner\nfrom pruun.commands import pruun\nfrom pruun.utils import get_dependency_names\n\n\ndef test_deployment_package():\n \"\"\"\n Test parsing of installed packages, creation of .zip, and finally validate integrity of .zip.\n \"\"\"\n runner = CliRunner()\n with runner.isolated_filesystem():\n with open(\"handler.py\", \"w\") as f: # create dummy lambda handler file\n pass\n\n result = runner.invoke(pruun, [\"package\", \"handler.py\"])\n\n # check for no exceptions\n assert result.exit_code == 0\n\n # check for existence of .zip file\n assert os.path.isfile(\"deployment_package.zip\") == True\n\n # verify integrity of .zip\n depens = get_dependency_names()\n for depen in depens:\n underscored_name = depen.replace(\"-\", \"_\").strip()\n cmd = f\"unzip -l deployment_package.zip {underscored_name}*\"\n subprocess.run(\n cmd,\n stdout=subprocess.DEVNULL,\n stderr=subprocess.STDOUT,\n shell=True,\n check=True,\n )\n\n f.close()\n","sub_path":"tests/test_pruun.py","file_name":"test_pruun.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}
+{"seq_id":"266657530","text":"def func(d):\n try:\n return 42 / d\n except ZeroDivisionError:\n print('Error: Invalid argument')\n\ndef cats():\n catNames = []\n while True:\n print('Enter the name of cat ' + str(int(len(catNames) + 1)) + '\\nOr enter nothing to stop')\n name = input()\n if name != '':\n catNames = catNames + [aname]\n else:\n break\n print(catNames)\n\ndef getStringFromList(list):\n result = ''\n for i in range(len(list)):\n result += (list[i] + ', ')\n trim = len(result) - 2\n return result[:trim]\n\n\n\nlist = ['Hello', 'World', 'Universe']\ns = getStringFromList(list)\nprint(s)","sub_path":"LearnPython/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}
+{"seq_id":"97669772","text":"# -*- coding: utf-8 -*-\n# Copyright © 2016, German Neuroinformatics Node (G-Node)\n#\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted under the terms of the BSD License. See\n# LICENSE file in the root of the Project.\nimport numpy as np\n\n\nclass DataSet(object):\n \"\"\"\n Data IO object for DataArray.\n \"\"\"\n\n def __array__(self):\n return self._read_data()[:]\n\n def __getitem__(self, index):\n return self._read_data(index)\n\n def __setitem__(self, index, value):\n self._write_data(value, index)\n\n def __len__(self):\n return self.len()\n\n def __iter__(self):\n for idx in range(self.len()):\n yield self[idx]\n\n def len(self):\n \"\"\"\n Length of the first dimension. Equivalent to `DataSet.shape[0]`.\n\n :type: int or long\n \"\"\"\n return self.shape[0]\n\n @property\n def shape(self):\n \"\"\"\n :type: tuple of data array dimensions.\n \"\"\"\n return self.data_extent\n\n @property\n def size(self):\n \"\"\"\n Number of elements in the DataSet, i.e. the product of the\n elements in :attr:`~nixio.data_array.DataSet.shape`.\n\n :type: int\n \"\"\"\n return np.prod(self.shape)\n\n @property\n def dtype(self):\n \"\"\"\n :type: :class:`numpy.dtype` object holding type infromation about\n the data stored in the DataSet.\n \"\"\"\n return np.dtype(self._get_dtype())\n\n def write_direct(self, data):\n \"\"\"\n Directly write all of ``data`` to the\n :class:`~nixio.data_array.DataSet`. The supplied data must be a\n :class:`numpy.ndarray` that matches the DataSet's shape and must have\n C-style contiguous memory layout (see :attr:`numpy.ndarray.flags` and\n :class:`~numpy.ndarray` for more information).\n\n :param data: The array which contents is being written\n :type data: :class:`numpy.ndarray`\n \"\"\"\n self._write_data(data)\n\n def read_direct(self, data):\n \"\"\"\n Directly read all data stored in the :class:`~nixio.data_array.DataSet`\n into ``data``. The supplied data must be a :class:`numpy.ndarray` that\n matches the DataSet's shape, must have C-style contiguous memory layout\n and must be writeable (see :attr:`numpy.ndarray.flags` and\n :class:`~numpy.ndarray` for more information).\n\n :param data: The array where data is being read into\n :type data: :class:`numpy.ndarray`\n \"\"\"\n data[:] = self._read_data()\n\n def append(self, data, axis=0):\n \"\"\"\n Append ``data`` to the DataSet along the ``axis`` specified.\n\n :param data: The data to append. Shape must agree except for the\n specified axis\n :param axis: Along which axis to append the data to\n \"\"\"\n data = np.ascontiguousarray(data)\n if len(self.shape) != len(data.shape):\n raise ValueError(\n \"Data and DataArray must have the same dimensionality\"\n )\n\n if any([s != ds for i, (s, ds) in\n enumerate(zip(self.shape, data.shape)) if i != axis]):\n raise ValueError(\"Shape of data and shape of DataArray must match \"\n \"in all dimension but axis!\")\n\n offset = tuple(0 if i != axis else x for i, x in enumerate(self.shape))\n count = data.shape\n enlarge = tuple(self.shape[i] + (0 if i != axis else x)\n for i, x in enumerate(data.shape))\n self.data_extent = enlarge\n sl = tuple(slice(o, c+o) for o, c in zip(offset, count))\n self._write_data(data, sl)\n\n @staticmethod\n def __index_to_tuple(index):\n tidx = type(index)\n\n if tidx == tuple:\n return index\n elif tidx == int or tidx == slice:\n return (index, )\n elif tidx == type(Ellipsis):\n return ()\n else:\n raise IndexError(\"Unsupported index\")\n\n @staticmethod\n def __complete_slices(shape, index):\n if type(index) is slice:\n if index.step is not None:\n raise IndexError('Invalid index, stepping unsupported')\n start = index.start\n stop = index.stop\n if start is None:\n start = 0\n elif start < 0:\n start = shape + start\n if stop is None:\n stop = shape\n elif stop < 0:\n stop = shape + stop\n index = slice(start, stop, index.step)\n elif type(index) is int:\n if index < 0:\n index = shape + index\n index = slice(index, index+1)\n else:\n index = slice(index, index+1)\n elif index is None:\n index = slice(0, shape)\n else:\n raise IndexError('Invalid index')\n return index\n\n @staticmethod\n def __fill_none(shape, index, to_replace=1):\n size = len(shape) - len(index) + to_replace\n return tuple([None] * size)\n\n def __tuple_to_count_offset_shape(self, index):\n # precondition: type(index) == tuple and len(index) >= 1\n fill_none = self.__fill_none\n shape = self.shape\n\n if index[0] is Ellipsis:\n index = fill_none(shape, index) + index[1:]\n if index[-1] is Ellipsis:\n # if we have a trailing ellipsis we just cut it away\n # and let complete_slices do the right thing\n index = index[:-1]\n\n # here we handle Ellipsis in the middle of the tuple\n # we *can* only handle one, if there are more, then\n # __complete_slices will raise a InvalidIndex error\n pos = index.index(Ellipsis) if Ellipsis in index else -1\n if pos > -1:\n index = index[:pos] + fill_none(shape, index) + index[pos+1:]\n\n # in python3 map does not work with None therefore if\n # len(shape) != len(index) we wont get the expected\n # result. We therefore need to fill up the missing values\n index = index + fill_none(shape, index, to_replace=0)\n\n completed = list(map(self.__complete_slices, shape, index))\n combined = list(map(lambda s: (s.start, s.stop), completed))\n count = tuple(x[1] - x[0] for x in combined)\n offset = [x for x in zip(*combined)][0]\n\n # drop all indices from count that came from single ints\n # NB: special case when we only have ints, e.g. (int, ) then\n # we get back the empty tuple and this is what we want,\n # because it indicates a scalar result\n squeezed = map(lambda i, c: c if type(i) != int\n else None, index, count)\n shape = list(filter(lambda x: x is not None, squeezed))\n\n return count, offset, shape\n\n def _write_data(self, data, sl=None):\n dataset = self._h5group.get_dataset(\"data\")\n dataset.write_data(data, sl)\n\n def _read_data(self, sl=None):\n dataset = self._h5group.get_dataset(\"data\")\n return dataset.read_data(sl)\n\n @property\n def data_extent(self):\n \"\"\"\n The size of the data.\n\n :type: tuple of int\n \"\"\"\n dataset = self._h5group.get_dataset(\"data\")\n return dataset.shape\n\n @data_extent.setter\n def data_extent(self, extent):\n dataset = self._h5group.get_dataset(\"data\")\n dataset.shape = extent\n\n @property\n def data_type(self):\n \"\"\"\n The data type of the data stored in the DataArray. This is a read only\n property.\n\n :type: DataType\n \"\"\"\n return self._get_dtype()\n\n def _get_dtype(self):\n dataset = self._h5group.get_dataset(\"data\")\n return dataset.dtype\n","sub_path":"nixio/data_set.py","file_name":"data_set.py","file_ext":"py","file_size_in_byte":7802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}
+{"seq_id":"359880018","text":"import sys\nfrom math import sqrt\n\nfor line in sys.stdin:\n\tline = line.strip().split()\n\ta = int(line[0])\n\tb = int(line[1])\n\tc = int(line[2])\n\ttry:\n\t\troot_1 = (-b+sqrt((b**2)-4*a*c))/(2*a)\n\t\troot_2 = (-b-sqrt((b**2)-4*a*c))/(2*a)\n\t\tprint(\"r1 = {}, r2 = {}\".format(root_1, root_2))\n\texcept ValueError:\n\t\tprint(\"None\")\n\texcept ZeroDivisionError:\n\t\tprint(\"None\")\n","sub_path":"z_only_python/roots_62.py","file_name":"roots_62.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}
+{"seq_id":"620362121","text":"def cria_matriz (num_linhas, num_colunas, valor):\n\tmatriz = []\n\tfor i in range(num_linhas):\n\t\tlinha = []\n\t\tfor j in range(num_colunas):\n\t\t\tlinha.append(valor)\n\t\tmatriz.append(linha)\n\treturn matriz\n\ndef soma_matrizes (m1, m2):\n\tnum_linha = len(m1)\n\tnum_coluna = len(m1[0])\n\tmatriz_soma = cria_matriz(num_linha, num_coluna, valor = 0)\n\tfor lin in range(num_linha):\n\t\tfor col in range(num_coluna):\n\t\t\tmatriz_soma[lin][col] = m1[lin][col] + m2[lin][col]\n\n\treturn matriz_soma\n\ndef multiplica_matrizes(m1,m2):\n\tlinhas = len(m1)\n\tcolunas = len(m2[0])\n\n\tif linhas != colunas:\n\t\tbreak\n\n\tmatriz_multiplica = cria_matriz(linhas, colunas, 0)\n\n\tfor lin in range(linhas):\n\t\tfor col in range(colunas):\n\t\t\tlistas = define_listas(m1,m2,lin,col)\n\t\t\tmatriz_multiplica[lin,col] = multplica_listas(listas)\n\ndef define_listas(m1,m2,lin,col):\n\tl1 = m1[lin]\n\tl2 = []\n\tfor i in range(len(m2)):\n\t\tl2.append(m2[i][col])\n\ndef multiplica_listas (listas):\n\tif len(listas[0]) != len(listas[1]):\n\t\tbreak\n\n\tlista_resultado = []\n\n\tfor i in range(len(listas[0])):\n\t\tlista_resultado.append(listas[0][i] * listas[1][i])\n\n\n\ndef teste ():\n\tm1 = [[1,2,3], [4,5,6],[7,8,9]]\n\tm2 = cria_matriz(3,3,1)\n\tteste = soma_matrizes(m1,m2)\n\n\tprint(teste)\n\nif __name__ == \"__main__\":\n\tteste()\n\n\n\n","sub_path":"Coursera/Intro_Python_P2/Semana 3/aula.py","file_name":"aula.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}
+{"seq_id":"354374971","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nGAMMA = u'\\u03b3'\nLOGBINBASE = 1.2\n\ndef drawhistogram(vertices, nbins=20, normed=False, facecolor='green', alpha=0.5, histtype='bar', log=False):\n # the histogram of the degree distribution\n\n degrees = [vertices.getvertex(i).getindegree() for i in range(vertices.getnbofvertices())]\n n, bins, patches = plt.hist(degrees, bins=nbins, normed=normed, facecolor=facecolor, alpha=alpha, histtype=histtype, log=log)\n\n normedlabel = ''\n if normed:\n normedlabel = ' (normalized)'\n\n plt.title('Degree distribution')\n plt.xlabel('Indegree')\n plt.ylabel('Number of nodes'+ normedlabel)\n plt.show()\n\ndef logbins(amax, amin=0, base=LOGBINBASE):\n bins = [amin]\n num = amin + 1\n i = 1\n\n while num <= amax:\n bins.append(num)\n num += base**i\n i += 1\n\n bins.append(amax)\n \n return bins\n\ndef drawloglogdist(vertices, lbinsbase=LOGBINBASE, density=False):\n # the degree distribution in loglog scale\n\n lbins = logbins(vertices.getmaxindegree(), amin=0, base=lbinsbase)\n\n degrees = [vertices.getvertex(i).getindegree() for i in range(vertices.getnbofvertices())]\n #y, bins = np.histogram(degrees, bins=nbins, density=density)\n y, bins = np.histogram(degrees, bins=lbins, density=density)\n x = bins[:-1]\n\n xforlog = []; logx = []; logy = []\n for i in range(len(y)):\n if x[i] and y[i]:\n xforlog.append(x[i])\n logx.append(np.log10(x[i]))\n logy.append(np.log10(y[i]))\n \n gamma, logA = np.polyfit(logx, logy, 1)\n p = np.poly1d([gamma, logA])\n plt.plot(x, y, 'bo', xforlog, 10**p(logx), 'r-')\n plt.loglog()\n plt.title(u'Degree distribution (%s = %#.2f)' % (GAMMA, -1 * gamma))\n plt.xlabel('Indegree')\n plt.ylabel('Number of nodes')\n plt.show()\n\n","sub_path":"ns-allinone-3.21/ns-3.21/scratch/svc_routines.py","file_name":"svc_routines.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}
+{"seq_id":"487000704","text":"import torch\nfrom torch.utils.data import TensorDataset, DataLoader, RandomSampler\nfrom transformers import BertModel, BertTokenizer, AdamW, BertPreTrainedModel, BertConfig, BertForSequenceClassification, BertModel\nfrom keras.preprocessing.sequence import pad_sequences\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport json\nfrom tqdm import trange\nimport re\nfrom itertools import chain\nfrom google.colab import drive\nimport os\nfrom sklearn.metrics import accuracy_score,f1_score\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nn_gpu = torch.cuda.device_count()\ntorch.cuda.get_device_name(0)\nprint(device)\nclass MyBert(BertPreTrainedModel):\n def __init__(self, config, dropout_prob):\n super().__init__(config)\n self.num_labels = 2\n\n self.bert = BertModel(config)\n self.dropout = torch.nn.Dropout(dropout_prob)\n self.classifier = torch.nn.Linear(config.hidden_size, self.num_labels)\n\n self.init_weights()\n\n def forward(self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,):\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n )\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n\n outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here\n if labels is not None:\n loss_fct = torch.nn.CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n outputs = (loss,) + outputs\n\n return outputs # (loss), logits, (hidden_states), (attentions)\n\n\nclass SentiPred():\n def __init__(self,train_file,test_file,max_len=128):\n self.x_train,self.y_train = self.read_sentihood(train_file)\n self.x_test,self.y_test = self.read_sentihood(test_file)\n #self.target_token = {'T':'\n self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)\n self.max_len=max_len\n\n def train(self,epochs=4,batch_size=32,dropout_prob=0.1):\n self.model = MyBert.from_pretrained('bert-base-uncased',dropout_prob=dropout_prob)\n self.model.cuda()\n self.setup_optimizer()\n \n train_data_loader = self.preprocess(self.x_train,self.y_train,batch_size=batch_size)\n validation_data_loader = self.preprocess(self.x_test,self.y_test,batch_size=batch_size)\n losses = []\n accuracy = []\n for epoch in trange(epochs):\n self.model.train()\n tr_loss=0\n tr_steps=0\n for input_id,mask,label in train_data_loader:\n outputs = self.model(input_id.to(device),attention_mask=mask.to(device),labels=label.to(device))\n loss=outputs[0]\n losses.append(loss.item())\n\n\n self.model.zero_grad()\n loss.backward()\n self.optimizer.step()\n \n tr_loss += loss\n tr_steps+=1\n print(\"Training loss: {}\".format(tr_loss/tr_steps))\n\n self.model.eval()\n val_acc=0\n val_steps=0\n for input_id,mask,label in validation_data_loader:\n with torch.no_grad():\n outputs = self.model(input_id.to(device),attention_mask=mask.to(device))\n logits=outputs[0]\n pred=np.argmax(logits.to('cpu').numpy(),axis=1) #take argmax to find class\n acc = np.sum(pred==label.to('cpu').numpy())/len(pred)\n accuracy.append(acc)\n val_acc+=acc\n val_steps+=1\n print(\"Validation Accuracy {}\".format(val_acc/val_steps))\n\n return losses,accuracy\n \n\n def validate(self,x,y,batch_size=32):\n pred = []\n labels = []\n validation_data_loader = self.preprocess(x,y,batch_size=batch_size)\n self.model.eval()\n for input_id,mask,label in validation_data_loader:\n with torch.no_grad():\n outputs = self.model(input_id.to(device),attention_mask=mask.to(device))\n logits=outputs[0]\n pred.append(logits.to('cpu').numpy())\n labels.append(label.to('cpu').numpy())\n\n labels = np.array([l.item() for l in chain.from_iterable(labels)])\n pred = np.array(list(chain.from_iterable([np.argmax(p, axis=1).flatten() for p in pred]))) #take the argmax to get which class\n acc=np.sum(pred==labels)/len(labels)\n return pred,labels,acc\n\n \n def setup_optimizer(self):\n param_optimizer = list(self.model.named_parameters())\n no_decay = ['bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],\n 'weight_decay_rate': 0.01},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],\n 'weight_decay_rate': 0.0}]\n self.optimizer = AdamW(optimizer_grouped_parameters,lr=3e-5) \n \n \n def preprocess(self,x,y,batch_size=32): \n labels = []\n new_x = []\n for text,entry in zip(x,y):\n for label in entry:\n new_x.append(re.sub(label[0],'[TARGET]',text))\n #new_y.append(label)\n if label[1]=='Positive':\n labels.append(1)\n elif label[1]=='Negative':\n labels.append(0)\n else:\n print('Error on {}'.format(text))\n labels = torch.tensor(labels)\n input_ids =[self.tokenizer.encode(text,add_special_tokens=True) for text in new_x]\n input_ids = torch.tensor(pad_sequences(input_ids,maxlen=self.max_len, dtype=\"long\", truncating=\"post\", padding=\"post\")).to(torch.int64) \n attention_masks=[]\n for seq in input_ids:\n seq_mask = [float(i>0) for i in seq]\n attention_masks.append(seq_mask)\n \n masks = torch.tensor(attention_masks)\n data = TensorDataset(input_ids, masks, labels)\n sampler = RandomSampler(data)\n dataloader = DataLoader(data, sampler=sampler, batch_size=batch_size)\n return dataloader \n \n def read_sentihood(self,file):\n with open(file,'r') as f:\n data=json.load(f)\n x = []\n y = []\n for entry in data:\n aspects = [(e['target_entity'],e['sentiment']) for e in entry['opinions'] if e['aspect']=='general']\n if len(aspects) == 0:\n continue\n x.append(entry['text'])\n y.append(aspects)\n return x,y\n\ncls = SentiPred('sentihood-train.json','sentihood-test.json')\ntr_loss,tr_acc = cls.train(epochs=8,batch_size=32,dropout_prob=0.75)\npred,labels,acc = cls.validate(*cls.read_sentihood('sentihood-dev.json'),batch_size=8)\nprint(accuracy_score(labels,pred),f1_score(labels,pred,average='macro'))\n","sub_path":"Module Testing/sentihood_bert.py","file_name":"sentihood_bert.py","file_ext":"py","file_size_in_byte":7302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}
+{"seq_id":"567131148","text":"import pandas as pd\nimport numpy as np\nimport math\nfrom sklearn.linear_model import LinearRegression\nfrom xgboost import XGBRegressor\nfrom sklearn.model_selection import GridSearchCV\n\nimport matplotlib.pyplot as plt\nfrom datetime import timedelta\nfrom datetime import datetime\n\ndef run_model(predict_from_date):\t\t\n\tdata = pd.read_csv('xgboost_input.csv')\n\t\n\ttmp_predict_from_date = datetime.strptime(predict_from_date, '%Y-%m-%d')\n\tsplit_date = (tmp_predict_from_date - timedelta(7)).strftime('%Y-%m-%d')\n\t\n\ttrain = data[data['date'] < split_date]\n\ttrain = train.replace(np.inf, np.nan)\n\ttrain = train.dropna()\n\ttest = data[(data['date'] >= split_date) & (data['date'] < predict_from_date)]\n\n\tX = train.drop(['date', 'change_coming_3_days', 'Region', 'cases'], axis=1)\n\ty = train['change_coming_3_days']\n\n\tparam_grid = {'n_estimators': [30, 50, 100, 200, 300],\n\t 'learning_rate': [0.01, 0.03, 0.05, 0.1],\n\t 'max_depth': [3, 4, 5, 6, 7, 8]\n\t }\n\n\tmodel = XGBRegressor(n_estimators=200, learning_rate=0.01, max_depth=4)\n\tmodel.fit(X, y)\n\tX_test = test.drop(['date', 'change_coming_3_days', 'Region', 'cases'], axis=1)\n\ty_test = test['change_coming_3_days']\n\tpred = model.predict(X_test)\n\ttest['predicted_change'] = pred\n\ttest['predicted_nbr_in_3_days'] = test['iva']*test['predicted_change']\n\ttest['predicted_nbr_in_3_days'] = [int(val) if not math.isnan(val) else val for val in test['predicted_nbr_in_3_days']]\n\ttest['iva_in_3_days'] = test['iva']*test['change_coming_3_days']\n\ttest['absolute_error_%'] = abs(test['predicted_change']-test['change_coming_3_days'])/test['change_coming_3_days']\n\ttest['absolute_error_%'].mean()\n\n\treturn X, train, test, model\n\n\ndef results(input_data, output_data):\n results = output_data[['date', 'Region', 'predicted_nbr_in_3_days']]\n utskrivningar = input_data[['date', 'Region', 'iva']]\n utskrivningar['date'] = [str(datetime.strptime(day, '%Y-%m-%d').date()+timedelta(days=12)) for day in utskrivningar['date']]\n utskrivningar.columns = ['date', 'Region', 'utskrivningar']\n results['date'] = [str(datetime.strptime(day, '%Y-%m-%d').date()+timedelta(days=3)) for day in results['date']]\n results.columns = ['date', 'Region', 'predicted']\n\n results = results.merge(input_data, on=['date', 'Region'], how='left')[['date', 'Region', 'predicted', 'iva']]\n results = results.merge(utskrivningar, on=['date', 'Region'], how='left')\n results['predicted'] = results['predicted']-results['utskrivningar']\n results['iva'] = results['iva']-results['utskrivningar']\n results['absolute_error_%'] = abs(results['predicted']-results['iva'])/results['iva']\n \n return results","sub_path":"src/forecast/run_model.py","file_name":"run_model.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}
+{"seq_id":"506047050","text":"import asyncio\n\nfrom wtforms import (Form, StringField, validators,\n PasswordField, ValidationError)\n\nfrom managers import (get_token, create_user, get_feed, create_feed,\n update_feed_items, get_id_by_token, update_user_feed,\n subscribe_feed, get_items_by_feed_id)\nfrom utils import get_feed_by_direct_url, feed_to_json\n\n\nclass RegisterForm(Form):\n name = StringField(u'Full name', [validators.required(),\n validators.length(max=255)])\n password = PasswordField(u'Password', [validators.required(),\n validators.length(max=255)])\n\n @asyncio.coroutine\n def save(form, engine):\n ''' P.S. - я в курсе, что проверку наличия существующего пользователя\n надо делать в методе валидации, но пока там слишком много гемора '''\n token = yield from get_token(engine, form.data)\n if not token:\n user = yield from create_user(engine, form.data)\n return {'token': user['token']}\n else:\n return {'error': 'Exists user'}\n\n\nclass BasicAuthForm(Form):\n name = StringField(u'Full name', [validators.required(),\n validators.length(max=255)])\n password = PasswordField(u'Password', [validators.required(),\n validators.length(max=255)])\n\n @asyncio.coroutine\n def save(self, engine):\n ''' Authorization by Login and Password, return auth token '''\n token = yield from get_token(engine, self.data)\n if token:\n return {'token': token}\n else:\n return {'error': 'User not exists'}\n\n\nclass AddFeedForm(Form):\n url = StringField(u'Url or name', [validators.required(),\n validators.length(max=255)])\n\n @asyncio.coroutine\n def save(self, engine):\n ''' Add Feed to database '''\n feed = yield from get_feed_by_direct_url(self.url.data)\n feed = feed_to_json(feed) # SLOW\n items = feed['items']\n del feed['items']\n feed_id = yield from create_feed(engine, feed)\n\n def f(item):\n item['feed_id'] = feed_id\n return item\n for item in map(f, items):\n yield from update_feed_items(engine, item)\n\n\nclass AddFeedToUser(Form):\n url = StringField(u'Url or name')\n token = StringField(u'User token')\n\n @asyncio.coroutine\n def save(self, engine):\n ''' Create must records to database and return feed '''\n # Get feed\n feed = yield from get_feed(engine, self.url.data)\n form = AddFeedForm(data={'url': self.url.data})\n if not feed and form.validate():\n yield from form.save(engine)\n feed = dict((yield from get_feed(engine, self.url.data)))\n user_id = yield from get_id_by_token(engine, self.token.data)\n\n sub_id = yield from subscribe_feed(engine, feed['id'], user_id)\n items = yield from get_items_by_feed_id(engine, feed['id'])\n # Write feed items to user database\n feed['items'] = []\n for item in items:\n yield from update_user_feed(engine, item['id'], sub_id)\n tmp = dict(item)\n tmp['published'] = str(tmp['published'])\n feed['items'].append(tmp)\n return feed\n","sub_path":"forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}
+{"seq_id":"474083873","text":"def woord_frequentie(zinnen):\n zinnen = zinnen.lower()\n lijst, uitv = zinnen.split(), {}\n for element in lijst:\n if element[-1] == ',' or element[-1] == '.':\n lijst[lijst.index(element)] = element[:-1]\n for element in lijst:\n if element not in uitv:\n uitv[element] = 1\n else:\n uitv[element] += 1\n return uitv\n\ndef woorden_per_frequentie(zinnen):\n lijst, uitv = woord_frequentie(zinnen), {}\n for element in lijst:\n if lijst[element] in uitv:\n uitv[lijst[element]].append(element)\n else:\n uitv[lijst[element]] = [element]\n return uitv\n\ndef meest_gebruikte_woorden(zinnen):\n verzameling = woorden_per_frequentie(zinnen)\n return verzameling[max(verzameling)]\n\n##########################################################################################\"\"\"\n\nprint(woord_frequentie('Dit is een zin. En nog een zin. En een laatste zin.'))\nprint(woorden_per_frequentie('Dit is een zin. En nog een zin. En een laatste zin.'))\nprint(meest_gebruikte_woorden('Dit is een zin. En nog een zin. En een laatste zin.'))\n\n\n","sub_path":"13 - Dictionaries/05 - Woorden tellen.py","file_name":"05 - Woorden tellen.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}
+{"seq_id":"127721683","text":"import gym\nimport gym.spaces\nimport numpy as np\n\n\ndef check_policy_init(initial_policy):\n assert type(initial_policy) in (np.ndarray, np.matrix)\n assert np.allclose(initial_policy, 1. / n_actions)\n assert np.allclose(np.sum(initial_policy, axis=1), 1)\n print('Policy initialization: Ok!')\n\n\ndef check_generate_session_func(generation_func):\n s, a, r = generation_func(policy)\n assert type(s) == type(a) == list\n assert len(s) == len(a)\n assert type(r) in [float, np.float]\n print('Session generation function: Ok!')\n\n\ndef check_update_policy_func(update_func):\n elite_states, elite_actions = ([1, 2, 3, 4, 2, 0, 2, 3, 1], [0, 2, 4, 3, 2, 0, 1, 3, 3])\n new_policy = update_func(elite_states, elite_actions, 5, 6)\n\n assert np.isfinite(new_policy).all(), 'Your new policy contains NaNs or +-inf. Make sure you do not divide by zero.'\n assert np.all(new_policy >= 0), 'Your new policy should not have negative action probabilities'\n assert np.allclose(new_policy.sum(axis=-1), 1), \\\n 'Your new policy should be a valid probability distribution over actions'\n\n reference_answer = np.array([\n [1., 0., 0., 0., 0.],\n [0.5, 0., 0., 0.5, 0.],\n [0., 0.33333333, 0.66666667, 0., 0.],\n [0., 0., 0., 0.5, 0.5]])\n assert np.allclose(new_policy[:4, :5], reference_answer)\n print('Update policy function: Ok!')\n\n\ndef check_select_elites_func(select_elite_func):\n states_batch = [[1, 2, 3], [4, 2, 0, 2], [3, 1]]\n actions_batch = [[0, 2, 4], [3, 2, 0, 1], [3, 3]]\n rewards_batch = [3, 4, 5]\n\n test_result_0 = select_elite_func(states_batch, actions_batch, rewards_batch, percentile=0)\n test_result_40 = select_elite_func(states_batch, actions_batch, rewards_batch, percentile=30)\n test_result_90 = select_elite_func(states_batch, actions_batch, rewards_batch, percentile=90)\n test_result_100 = select_elite_func(states_batch, actions_batch, rewards_batch, percentile=100)\n\n assert np.all(test_result_0[0] == [1, 2, 3, 4, 2, 0, 2, 3, 1]) and \\\n np.all(test_result_0[1] == [0, 2, 4, 3, 2, 0, 1, 3, 3]), \\\n 'For percentile 0 you should return all states and actions in chronological order'\n\n assert np.all(test_result_40[0] == [4, 2, 0, 2, 3, 1]) and \\\n np.all(test_result_40[1] == [3, 2, 0, 1, 3, 3]), \\\n 'For percentile 30 you should only select states/actions from two first'\n\n assert np.all(test_result_90[0] == [3, 1]) and \\\n np.all(test_result_90[1] == [3, 3]), \\\n 'For percentile 90 you should only select states/actions from one game'\n\n assert np.all(test_result_100[0] == [3, 1]) and \\\n np.all(test_result_100[1] == [3, 3]), \\\n 'Please make sure you use >=, not >. Also double-check how you compute percentile.'\n print('Select elites function : Ok!')\n\n\ndef generate_session(policy, t_max=10 ** 5):\n \"\"\"\n Play game until end or for t_max ticks.\n :param policy: an array of shape [n_states,n_actions] with action probabilities\n :returns: list of states, list of actions and sum of rewards\n \"\"\"\n states, actions = [], []\n total_reward = 0.\n\n s = env.reset()\n\n for t in range(t_max):\n # Choose action from policy\n # You can use np.random.choice() func\n # a = ?\n a = np.random.choice(n_actions, p=policy[s])\n\n # Do action `a` to obtain new_state, reward, is_done,\n new_s, r, is_done, _ = env.step(a)\n\n # Record state, action and add up reward to states, actions and total_reward accordingly.\n # states\n # actions\n # total_reward\n\n states.append(s)\n actions.append(a)\n total_reward += r\n\n # Update s for new cycle iteration\n s = new_s\n\n if is_done:\n break\n\n return states, actions, total_reward\n\n\ndef select_elites(states_batch, actions_batch, rewards_batch, percentile=50):\n \"\"\"\n Select states and actions from games that have rewards >= percentile\n :param states_batch: list of lists of states, states_batch[session_i][t]\n :param actions_batch: list of lists of actions, actions_batch[session_i][t]\n :param rewards_batch: list of rewards, rewards_batch[session_i][t]\n\n :returns: elite_states,elite_actions, both 1D lists of states and respective actions from elite sessions\n\n Please return elite states and actions in their original order\n [i.e. sorted by session number and timestep within session]\n\n If you're confused, see examples below. Please don't assume that states are integers (they'll get different later).\n \"\"\"\n\n states_batch, actions_batch, rewards_batch = map(np.array, [states_batch, actions_batch, rewards_batch])\n\n # Compute reward threshold\n reward_threshold = np.percentile(rewards_batch, q=percentile)\n\n # Compute elite states using reward threshold\n elite_states = states_batch[rewards_batch >= reward_threshold]\n\n # Compute elite actions using reward threshold\n elite_actions = actions_batch[rewards_batch >= reward_threshold]\n\n elite_states, elite_actions = map(np.concatenate, [elite_states, elite_actions])\n\n return elite_states, elite_actions\n\n\ndef update_policy(elite_states, elite_actions, n_states, n_actions):\n \"\"\"\n Given old policy and a list of elite states/actions from select_elites,\n return new updated policy where each action probability is proportional to\n\n policy[s_i,a_i] ~ #[occurences of si and ai in elite states/actions]\n\n Don't forget to normalize policy to get valid probabilities and handle 0/0 case.\n In case you never visited a state, set probabilities for all actions to 1./n_actions\n\n :param elite_states: 1D list of states from elite sessions\n :param elite_actions: 1D list of actions from elite sessions\n\n \"\"\"\n new_policy = np.zeros([n_states, n_actions])\n\n # Compute updated policy\n for state, action in zip(elite_states, elite_actions):\n new_policy[state][action] += 1\n\n for i, state in enumerate(new_policy):\n if np.sum(state) > 0:\n new_policy[i] = state / np.sum(state)\n else:\n new_policy[i] = np.ones(n_actions) / n_actions\n\n return new_policy\n\n\ndef rl_cross_entropy():\n # Useful constants, all should be applied somewhere in your code\n n_sessions = 200 # generate n_sessions for analysis\n percentile = 50 # take this percentage of 'elite' states/actions\n alpha = 0.3 # alpha-blending for policy updates\n total_iterations = 150\n visualize = True\n log = []\n\n # Create random uniform policy\n policy = np.ones(shape=(n_states, n_actions)) / 6\n check_policy_init(policy)\n\n if visualize:\n import matplotlib.pyplot as plt\n plt.figure(figsize=[10, 4])\n\n for i in range(total_iterations):\n\n # Generate n_sessions for further analysis.\n sessions = [generate_session(policy) for _ in range(n_sessions)]\n\n states_batch, actions_batch, rewards_batch = zip(*sessions)\n\n # Select elite states & actions.\n elite_states, elite_actions = select_elites(states_batch, actions_batch, rewards_batch, percentile=percentile)\n\n # Update policy using elite_states, elite_actions.\n new_policy = update_policy(elite_states, elite_actions, n_states, n_actions)\n\n # Alpha blending of old & new policies for stability.\n policy = alpha * new_policy + (1 - alpha) * policy\n\n # Info for debugging\n mean_reward = np.mean(rewards_batch)\n threshold = np.percentile(rewards_batch, percentile)\n log.append([mean_reward, threshold])\n\n print('Iteration = %.0f, Mean Reward = %.3f, Threshold = %.3f' % (i, mean_reward, threshold))\n\n # Visualize training\n if visualize:\n\n plt.subplot(1, 2, 1)\n plt.plot(list(zip(*log))[0], label='Mean rewards', color='red')\n plt.plot(list(zip(*log))[1], label='Reward thresholds', color='green')\n\n if i == 0:\n plt.legend()\n plt.grid()\n\n plt.subplot(1, 2, 2)\n plt.hist(rewards_batch, range=[-990, +10], color='blue', label='Rewards distribution')\n plt.vlines([np.percentile(rewards_batch, percentile)], [0], [100], label='Percentile', color='red')\n\n plt.legend()\n plt.grid()\n\n plt.pause(0.1)\n plt.cla()\n\n\nif __name__ == '__main__':\n # Create environment 'Taxi-v2'\n env = gym.make('Taxi-v2')\n env.reset()\n env.render()\n\n # Compute number of states for this environment\n n_states = env.observation_space.n\n # Compute number of actions for this environment\n n_actions = env.action_space.n\n\n print('States number = %i, Actions number = %i' % (n_states, n_actions))\n\n # Initialize policy - let's say random uniform\n policy = np.ones(shape=(n_states, n_actions)) / n_actions\n check_policy_init(policy)\n\n # Complete generate session function\n check_generate_session_func(generate_session)\n\n # Complete select elites function\n check_select_elites_func(select_elites)\n\n # Complete update policy function\n check_update_policy_func(update_policy)\n\n # Complete rl_cross_entropy()\n rl_cross_entropy()\n\n # Close environment when everything is done\n env.close()\n input(\"Press Enter to continue...\")\n","sub_path":"rl_1/cross_entropy_template.py","file_name":"cross_entropy_template.py","file_ext":"py","file_size_in_byte":9288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}
+{"seq_id":"267826104","text":"\"\"\"\nТройка Пифагора - три натуральных числа a < b < c, для которых выполняется равенство\na^2 + b^2 = c^2\n\nНапример, 3^2 + 4^2 = 9 + 16 = 25 = 5^2.\n\nСуществует только одна тройка Пифагора, для которой a + b + c = 1000.\nНайдите произведение abc.\n\"\"\"\n\n\ndef is_pifagor(a, b, c) -> bool:\n if a < b < c and a**2 + b**2 == c**2:\n return True\n return False\n\n\ndef is_thousand(a, b, c) -> bool:\n if a + b + c == 1000:\n return True\n return False\n\n\ndef search():\n for x in range(1000):\n for y in range(1000):\n for z in range(1000):\n if is_thousand(x, y, z):\n if is_pifagor(x, y, z):\n return x, y, z\n\n\nres1, res2, res3 = search()\nprint(res1 * res2 * res3)\n","sub_path":"9.py","file_name":"9.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}
+{"seq_id":"234973675","text":"class TreeNode:\n\tdef __init__(self, left=None, right=None, val=\"0\"):\n\t\tself.val = val\n\t\tself.left = left\n\t\tself.right = right\n\n\nclass Solution:\n\tdef testSymmetricBinaryTree(self, root):\n\t\tq = [root]\n\t\tlevels = []\n\t\twhile not all([item.val == \"*\" for item in q]):\n\t\t\tfor i in range((len(q) + 1)//2):\n\t\t\t\tif q[i].val != q[len(q)-i-1].val:\n\t\t\t\t\treturn False\n\t\t\tlevels.append(q)\n\t\t\tnextQ = []\n\t\t\tfor node in q:\n\t\t\t\tnextQ.append(node.left if node.left else TreeNode(val=\"*\"))\n\t\t\t\tnextQ.append(node.right if node.right else TreeNode(val=\"*\"))\n\t\t\tq = nextQ\n\t\treturn True\n\n\tdef testSymmetricBinaryTree2(self, root):\n\t\tdef util(left, right):\n\t\t\tif not left or not right:\n\t\t\t\treturn not left and not right\n\t\t\tif left.val != right.val:\n\t\t\t\treturn False\n\t\t\treturn util(left.right, right.left) and util(left.left, right.right)\n\t\tif not root:\n\t\t\treturn True\n\t\treturn util(root.left, root.right)\n\n\n# test case 1\nr = TreeNode(val = \"314\")\nr.left = TreeNode(val = \"6\")\nr.left.right = TreeNode(val = \"561\")\nr.left.right.right = TreeNode(val = \"3\")\nr.right = TreeNode(val = \"6\")\nr.right.left = TreeNode(val = \"2\")\nr.right.left.left = TreeNode(val = \"3\")\n\nprint(Solution().testSymmetricBinaryTree(r) == False)\n\n# test case 2\nr1 = TreeNode(val = \"314\")\nr1.left = TreeNode(val = \"6\")\nr1.left.right = TreeNode(val = \"2\")\nr1.left.right.right = TreeNode(val = \"3\")\nr1.right = TreeNode(val = \"6\")\nr1.right.left = TreeNode(val = \"2\")\nr1.right.left.left = TreeNode(val = \"3\")\n\nprint(Solution().testSymmetricBinaryTree(r1) == True)\n\n# test with recursive func\nprint(Solution().testSymmetricBinaryTree2(r) == False)\nprint(Solution().testSymmetricBinaryTree2(r1) == True)","sub_path":"Tree/test_symmetric_binary_tree.py","file_name":"test_symmetric_binary_tree.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}
+{"seq_id":"147943340","text":"#!/usr/bin/env python3\nimport sys\nimport os\nimport csv\nfrom datetime import datetime\nfrom Pegasus.DAX3 import *\nfrom optparse import OptionParser\n\n# API Documentation: http://pegasus.isi.edu/documentation\n\n# Options\nparser = OptionParser(usage=\"usage: %prog [options]\", version=\"%prog 1.0\")\n\nparser.add_option('-d', '--dax', action='store', dest='daxfile', default='1000genome.dax', help='DAX filename')\nparser.add_option('-D', '--dataset', action='store', dest='dataset', default='20130502', help='Dataset folder')\nparser.add_option('-f', '--datafile', action='store', dest='datafile', default='data.csv', help='Data file with list of input data')\nparser.add_option('-b', '--bash-jobs', action='store_true', dest='use_bash', help='Use original bash scripts for individuals, individuals_merge and sifting')\nparser.add_option('-i', '--ind-jobs', action='store', dest='ind_jobs', type=int, default=250,\n help='Number of individuals jobs that will be created for each chromosome \\\n (if larger than the total number of rows in the data for that chromosome, \\\n then it will be set to the number of rows so each job will process one row)')\n\n\n(options, args) = parser.parse_args()\n\n# Sanity check to ensure it is an integer and a positive one\noptions.ind_jobs = max(int(options.ind_jobs), 1)\n\nbase_dir = os.path.abspath('.')\n\n# Create a abstract dag\nts = datetime.utcnow().strftime(\"%Y%m%dT%H%M%SZ\")\n#workflow = ADAG(\"1000genome-%s\" % options.dataset)\nworkflow = ADAG(\"1000genome-%s\" % ts)\n\nsuffix = \".py\"\n\nif options.use_bash:\n suffix = \"\"\n\n# Executables\ne_individuals = Executable('individuals', arch='x86_64', installed=False)\ne_individuals.addPFN(PFN('file://' + base_dir + '/bin/individuals'+suffix, 'local'))\nworkflow.addExecutable(e_individuals)\n\ne_individuals_merge = Executable('individuals_merge', arch='x86_64', installed=False)\ne_individuals_merge.addPFN(\n PFN('file://' + base_dir + '/bin/individuals_merge'+suffix, 'local'))\nworkflow.addExecutable(e_individuals_merge)\n\ne_sifting = Executable('sifting', arch='x86_64', installed=False)\ne_sifting.addPFN(PFN('file://' + base_dir + '/bin/sifting'+suffix, 'local'))\nworkflow.addExecutable(e_sifting)\n\ne_mutation = Executable('mutation_overlap', arch='x86_64', installed=False)\ne_mutation.addPFN(PFN('file://' + base_dir + '/bin/mutation_overlap.py', 'local'))\nworkflow.addExecutable(e_mutation)\n\ne_freq = Executable('frequency', arch='x86_64', installed=False)\ne_freq.addPFN(PFN('file://' + base_dir + '/bin/frequency.py', 'local'))\nworkflow.addExecutable(e_freq)\n\nf_columns = File('columns.txt')\nf_columns.addPFN(PFN('file://' + base_dir + '/data/' + options.dataset + '/columns.txt', 'condorpool'))\nworkflow.addFile(f_columns)\n\n# Population Files\npopulations = []\nfor base_file in os.listdir('data/populations'):\n f_pop = File(base_file)\n f_pop.addPFN(PFN('file://' + base_dir + '/data/populations' + '/' + base_file, 'local'))\n workflow.addFile(f_pop)\n populations.append(f_pop)\n\nf = open(options.datafile)\ndatacsv = csv.reader(f)\n#step = 1000\nc_nums = []\nindividuals_files = []\nsifted_files = []\nsifted_jobs = []\nindividuals_merge_jobs = []\n\nfor row in datacsv:\n base_file = row[0]\n threshold = int(row[1])\n # To ensure we do not create too many individuals jobs\n ind_jobs = min(options.ind_jobs, threshold)\n step = threshold // ind_jobs\n rest = threshold % ind_jobs\n if rest != 0: \n sys.exit(\"ERROR: for file {}: required individuals jobs {} does not divide the number of rows {}.\".format(base_file, ind_jobs, threshold))\n\n counter = 1\n\n individuals_jobs = []\n output_files = []\n\n # Individuals Jobs\n f_individuals = File(base_file)\n f_individuals.addPFN(PFN('file://' + base_dir + '/data/' + options.dataset + '/' + base_file, 'condorpool'))\n workflow.addFile(f_individuals)\n\n c_num = base_file[base_file.find('chr')+3:]\n c_num = c_num[0:c_num.find('.')]\n c_nums.append(c_num)\n\n while counter < threshold:\n stop = counter + step\n\n out_name = 'chr%sn-%s-%s.tar.gz' % (c_num, counter, stop)\n output_files.append(out_name)\n f_chrn = File(out_name)\n\n j_individuals = Job(name='individuals')\n j_individuals.uses(f_individuals, link=Link.INPUT)\n j_individuals.uses(f_columns, link=Link.INPUT)\n j_individuals.uses(f_chrn, link=Link.OUTPUT, transfer=False, register=False)\n j_individuals.addArguments(f_individuals, c_num, str(counter), str(stop), str(threshold))\n\n individuals_jobs.append(j_individuals)\n workflow.addJob(j_individuals)\n\n counter = counter + step\n\n # merge job\n j_individuals_merge = Job(name='individuals_merge')\n j_individuals_merge.addArguments(c_num)\n\n for out_name in output_files:\n f_chrn = File(out_name)\n j_individuals_merge.uses(f_chrn, link=Link.INPUT)\n j_individuals_merge.addArguments(f_chrn)\n\n individuals_filename = 'chr%sn.tar.gz' % c_num\n f_chrn_merged = File(individuals_filename)\n individuals_files.append(f_chrn_merged)\n j_individuals_merge.uses(f_chrn_merged, link=Link.OUTPUT, transfer=False, register=False)\n\n workflow.addJob(j_individuals_merge)\n individuals_merge_jobs.append(j_individuals_merge)\n\n for job in individuals_jobs:\n workflow.depends(j_individuals_merge, job)\n\n # Sifting Job\n j_sifting = Job(name='sifting')\n \n f_sifting = File(row[2])\n f_sifting.addPFN(PFN('file://' + base_dir +'/data/' + options.dataset + '/sifting/' + row[2], 'condorpool'))\n workflow.addFile(f_sifting)\n\n f_sifted = File('sifted.SIFT.chr%s.txt' % c_num)\n sifted_files.append(f_sifted)\n\n j_sifting = Job(name='sifting')\n j_sifting.uses(f_sifting, link=Link.INPUT)\n j_sifting.uses(f_sifted, link=Link.OUTPUT, transfer=False, register=False)\n j_sifting.addArguments(f_sifting, c_num)\n\n workflow.addJob(j_sifting)\n sifted_jobs.append(j_sifting)\n\n# Analyses jobs\nfor i in range(len(individuals_files)):\n for f_pop in populations:\n # Mutation Overlap Job\n j_mutation = Job(name='mutation_overlap')\n j_mutation.addArguments('-c', c_nums[i], '-pop', f_pop)\n j_mutation.uses(individuals_files[i], link=Link.INPUT)\n j_mutation.uses(sifted_files[i], link=Link.INPUT)\n j_mutation.uses(f_pop, link=Link.INPUT)\n j_mutation.uses(f_columns, link=Link.INPUT)\n\n f_mut_out = File('chr%s-%s.tar.gz' % (c_nums[i], f_pop.name))\n j_mutation.uses(f_mut_out, link=Link.OUTPUT, transfer=True, register=False)\n \n workflow.addJob(j_mutation)\n workflow.depends(j_mutation, individuals_merge_jobs[i])\n workflow.depends(j_mutation, sifted_jobs[i])\n\n # Frequency Mutations Overlap Job\n j_freq = Job(name='frequency')\n j_freq.addArguments('-c', c_nums[i], '-pop', f_pop)\n j_freq.uses(individuals_files[i], link=Link.INPUT)\n j_freq.uses(sifted_files[i], link=Link.INPUT)\n j_freq.uses(f_pop, link=Link.INPUT)\n j_freq.uses(f_columns, link=Link.INPUT)\n\n f_freq_out = File('chr%s-%s-freq.tar.gz' % (c_nums[i], f_pop.name))\n j_freq.uses(f_freq_out, link=Link.OUTPUT, transfer=True, register=False)\n\n workflow.addJob(j_freq)\n workflow.depends(j_freq, individuals_merge_jobs[i])\n workflow.depends(j_freq, sifted_jobs[i])\n\n# Write the DAX to file\nf = open(options.daxfile, \"w\")\nworkflow.writeXML(f)\nf.close()\n","sub_path":"daxgen.py","file_name":"daxgen.py","file_ext":"py","file_size_in_byte":7174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}
+{"seq_id":"55915388","text":"# Scoutmaster 9001\n# Scoutmaster 9001\n# Match Handler\n# Antares Chen\n\n\n# Imports\nimport database as db\nimport utilities as util\n\n\n\n# Setup and database initialization\ncurrent_year = util.conf_lookup(\"year\")\nmatches = None # Holds matches for all events\nevents = None # Hplds information on events\nevents_collection = None\n\n\n# Library functions\ndef get_match(event, qual, number):\n \"\"\"Get match by event id, qualification, and number.\"\"\"\n # Match ID\n match_id = event + \"_\"\n\n # Convert the qualification into a BlueAlliance value\n if qual == \"final\": match_id += \"f1\"\n elif qual[:6] == \"sfinal\": match_id += \"sf\" + qual[6:]\n elif qual[:6] == \"qfinal\": match_id += \"qf\" + qual[6:]\n elif qual == \"qual\": match_id += \"qm\"\n\n # Add the match number\n match_id += 'm' + str(number)\n\n # Look up and return the match (this may be incorrect)\n matches = database.get_collection(event, events_collection) \n return matches.find_one(key=match_id)\n\ndef get_match_team(event, team):\n \"\"\"Get all the matches that a team participates in at an event\"\"\"\n result = []\n try:\n collection = db.get_collection(event)\n # results = db.get_document(matches, collection, {\"\"}) Incorrect key to\n # search by! \n except db.DatabaseNotFoundException:\n print(\"Warning: team or event not found!\")\n return result\n\ndef get_match_all(event):\n \"\"\"Get all the matches from an event\"\"\"\n result = []\n try:\n collection = db.get_collection(event)\n results = db.get_document(matches, collection, {})\n except db.DatabaseNotFoundException:\n print(\"Warning: event not found!\")\n return result\n\ndef refresh_events(year=current_year):\n \"\"\"Refreshes all the events in that year. This function will always\n force an update\"\"\"\n print(\"Refreshing events database!\")\n event_list = uti.get_events(year, force=True)\n for event in event_list:\n # Delete the alliances information in event json and add to collection\n del(event[\"alliances\"])\n db.update_document(events, events_collection, \"key\", event)\n\ndef refresh_matches(force, events=[], year=current_year):\n \"\"\"Refreshes all the matches in given by the events field.\"\"\"\n print(\"Refreshing matches database!\")\n # If no events are predefined then refresh all events.\n if events == []:\n # Get the list of events\n events = [event[\"key\"] for event in util.get_events(year, force=True)]\n\n # Iterate through all event keys\n for event_key in events:\n try:\n # Check if the collection\n # print(\"\\nFind the collection: \" + event_key)\n event_collection = db.get_collection(matches, event_key).name\n except db.CollectionNotFoundException:\n # print(\"Collection Not found creating new one!\")\n # Add the collection and force all events to be refreshed\n event_collection = db.add_collection(matches, event_key).name\n try:\n match_list = util.get_matches(event_key, force=force)\n # print(\"Adding matches!\")\n for match in match_list:\n db.update_document(matches, event_collection, \"key\", match)\n except util.UpdateNotFoundException:\n print(\"No update found!\")\n\n\n# Main\ndef init():\n \"\"\"Initialize the event handler\"\"\"\n # Create the event table if it doesn't exist\n global matches\n global events\n global events_collection\n \n try:\n # Check if the database exists\n matches = db.get_database(\"matches\").name\n events = db.get_database(\"events\").name\n events_collection = db.get_collection(events, \"events\").name\n except db.DatabaseNotFoundException:\n # Create the matches database\n matches = db.add_database(\"matches\").name\n # Create the events database\n events = db.add_database(\"events\").name\n events_collection = db.add_collection(events, \"events\").name\n except db.CollectionNotFoundException:\n # Create the events table in the events database\n events_collection = db.add_collection(events, \"events\").name\n\n #refresh_events()\n #refresh_matches(True)\n","sub_path":"server/handlers/match.py","file_name":"match.py","file_ext":"py","file_size_in_byte":4171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}
+{"seq_id":"222624616","text":"import telegram\nimport json\n\n\nwith open(\"lib/settings.json\", \"r\") as settings_json:\n settings = json.load(settings_json)\n notification_settings = settings[\"NotificationSettings\"][\"Telegram\"]\n\n\n'''\nBasic Notification\n- acc_name: e.g. \"admin\"\n- message: message to send\n'''\nclass Notification:\n def __init__(self, acc_name, message):\n self.acc_name = acc_name\n self.message = message\n\n\n'''\nSend Notifications via Telegram Bot\n'''\nclass TelegramNotification(Notification):\n def __init__(self, acc_name, message):\n super().__init__(acc_name, message)\n self.token = notification_settings[\"Bot_API_Key\"]\n\n def send_message(self):\n if notification_settings[\"Get_Notification\"]:\n tgb = telegram.Bot(token=self.token)\n try:\n tgb.send_message(\n notification_settings[\"Chat_ID\"],\n self.message,\n parse_mode='Markdown'\n )\n # any more elegant possibility to catch all network related errors?\n except Exception as e:\n print(f\"Connection Error\\n{e}\")","sub_path":"lib/Notification.py","file_name":"Notification.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}
+{"seq_id":"160626870","text":"import os\nimport subprocess\n\n#this function splits a table into two tables\ndef splitTables(tableFile, splitFile1, splitFile2, outputFolder):\n with open(tableFile) as f:\n lines = f.readlines()\n data = []\n for i in range(0, len(lines)):\n line = lines[i].split()\n data.append(line) \n #split the table into two tables\n table1 = []\n table2 = [] \n dict1 = {} \n dict2 = {}\n for i in range(2, len(data)):\n line = data[i]\n line1 = []\n line2 = [] \n line1.append(line[0])\n line1.append(line[1])\n line1.append(line[2])\n someString = ''.join(line1)\n if(someString not in dict1):\n dict1[someString] = None\n line1.append(line[3])\n table1.append('\\t'.join(line1))\n line2.append(line[0])\n line2.append(line[4])\n line2.append(line[5])\n if(len(line) > 6):\n line2.append(line[6])\n someString = ''.join(line2)\n if(someString not in dict2):\n dict2[someString] = None\n if(len(line2) > 7):\n line2.append(line[7])\n table2.append('\\t'.join(line2))\n #create the output files\n fromFileName = splitFile1.split('.')[0] + '_' + splitFile2\n printTable(table1, splitFile1, fromFileName, outputFolder)\n printTable(table2, splitFile2, fromFileName, outputFolder)\n#this function outputs a table after splitting\ndef printTable(table, splitFile, fromFileName, outputFolder):\n splitFile = outputFolder + '/' + splitFile.split('.')[0] + '_from_' + fromFileName\n resultFile = open(splitFile, 'w')\n for i in table:\n print>>resultFile, i\n\n#first, get the path for the three folders and for the PeakAnnotator\ninputFolder1 = raw_input(\"Enter the name of the first input folder: \")\ninputFolder2 = raw_input(\"Enter the name of the second input folder: \")\noutputFolder = raw_input(\"Enter the name of the output folder: \")\nprint(\"You must enter the path for the Peak Annotator; it is something like `/home/username/PeakAnnotator/PeakAnnotator_Linux64/\")\npeakAnnotatorPath = raw_input(\"Enter the path for the Peak Annotator: \")\npeakAnnotatorPath += \"PeakAnnotator\"\n#parse the folders\nfor file1 in os.listdir(inputFolder1):\n inputFile1 = inputFolder1 + '/' + file1\n for file2 in os.listdir(inputFolder2):\n inputFile2 = inputFolder2 + '/' + file2\n outputFile = outputFolder + '/' + file1.split('.')[0] + '_' + file2.split('.')[0]\n #replace with the path to your PeakAnnotator \n subprocess.call([peakAnnotatorPath, \"ODS\", inputFile1, inputFile2, outputFile])\n splitTables(outputFile, file1, file2, outputFolder)\n\n\n ","sub_path":"tables/UsingPeakAnnotator.py","file_name":"UsingPeakAnnotator.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}
+{"seq_id":"202653520","text":"import os\nimport sys\nimport json\nimport pickle\n\nimport nltk\nimport tqdm\n# from torchvision import transforms\n# from PIL import Image\n# from transforms import Scale\n\n\"\"\"\nOnly keep questions with certain keywords\n\"\"\"\nkeywords = [\"left\", \"right\"]\n\n\ndef process_question(root, split, word_dic=None, answer_dic=None):\n\n if word_dic is None:\n word_dic = {}\n\n if answer_dic is None:\n answer_dic = {}\n\n with open(os.path.join(root, 'questions',\n f'CLEVR_{split}_questions.json')) as f:\n data = json.load(f)\n\n result = []\n word_index = 1\n answer_index = 0\n\n for question in tqdm.tqdm(data['questions']):\n words = nltk.word_tokenize(question['question'])\n question_token = []\n\n contains_keywords = False\n for kw in keywords:\n if kw in words:\n contains_keywords = True\n break\n\n if not contains_keywords:\n continue\n\n for word in words:\n try:\n question_token.append(word_dic[word])\n\n except:\n question_token.append(word_index)\n word_dic[word] = word_index\n word_index += 1\n\n answer_word = question['answer']\n\n try:\n answer = answer_dic[answer_word]\n\n except:\n answer = answer_index\n answer_dic[answer_word] = answer_index\n answer_index += 1\n\n result.append((question['image_filename'], question_token, answer,\n question['question_family_index']))\n\n if not os.path.exists('data/keywords_only'):\n os.makedirs('data/keywords_only')\n\n print(\"Number of exmaples in {} split with keywords {}:\\t{}\".format(split, keywords, len(result)))\n with open(f'data/keywords_only/{split}.pkl', 'wb') as f:\n pickle.dump(result, f)\n\n return word_dic, answer_dic\n\n\nif __name__ == '__main__':\n\n if len(sys.argv) != 2:\n print(\"Usage: python ... [CLEVR-dir]\", file=sys.stderr)\n exit(1)\n\n root = sys.argv[1]\n\n word_dic, answer_dic = process_question(root, 'train')\n process_question(root, 'val', word_dic, answer_dic)\n\n with open('data/dic.pkl', 'wb') as f:\n pickle.dump({'word_dic': word_dic, 'answer_dic': answer_dic}, f)","sub_path":"constraints/keyword_preprocess.py","file_name":"keyword_preprocess.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}
+{"seq_id":"441301875","text":"#!/usr/bin/python3.6\n\nfrom getpass import getuser\nfrom json import load\nfrom time import sleep\nfrom sys import argv, stdout\nfrom subprocess import getoutput\nfrom platform import system\n\n'''\n Finding the correct json file depending on the OS\n GPMDP only supported on Linux, MacOS and windows\n'''\n\n\ndef json_location(user):\n dir1 = 'Google Play Music Desktop Player'\n dir2 = 'json_store'\n filename = 'playback.json'\n\n if system() == 'Darwin':\n return \"/Users/{0}/Library/Application Support/{1}/{2}/{3}\".format(user, dir1, dir2, filename)\n elif system() == 'Linux':\n return \"/home/{0}/.config/{1}/{2}/{3}\".format(user, dir1, dir2, filename)\n # elif system() == 'Windows':\n # return \"%APPDATA%\\\\{}\\\\{}\\\\{}\".format(dir1, dir2, filename)\n\n\n'''\n converts time in human readable time\n'''\n\n\ndef human_time(time_in_ms):\n minutes = int(time_in_ms / 60000)\n seconds = int((time_in_ms - (int(minutes) * 60000)) / 1000)\n\n if seconds < 10:\n time_for_humanz = str(minutes) + ':0' + str(seconds)\n else:\n time_for_humanz = str(minutes) + ':' + str(seconds)\n\n return time_for_humanz\n\n\n'''\n Run program only if google play music is up.\n'''\n\n\ndef gpm_run_check():\n command = \"ps -Aef | grep -i \\\"Google Play Music Desktop Player\\\" | grep -v grep | wc -l\"\n if int(getoutput(command)) > 0:\n return True\n else:\n return False\n\n\ndef format_song_info(title, artist, ablum):\n if title.find('|') != -1:\n s = list(title)\n s[title.find('|')] = \"-\"\n title = ''.join(s)\n return \"{}, {}, {}\".format(title, artist, ablum)\n\n\ndef string_format(icon, song_info, time):\n return \" {}{} {}\".format(icon, song_info, time)\n\n\ndef show_icon():\n if \"noicon\" in argv:\n return \"\"\n else:\n return \" \"\n\n\ndef format_time(current, total):\n time = \"- {}/{} \".format(current, total)\n if \"shorttime\" in argv:\n time = \"- {} \".format(current)\n elif \"notime\" in argv:\n time = \"\"\n\n return time\n\n\n'''\n print continuously if \"clear or \"rotate\" option is set\n \"rotate\" option will print song info, rotating from right to left\n \"clear\" clears the terminal\n'''\n\n\nclass CurrentSong:\n def __init__(self):\n self.title = ''\n self.artist = ''\n self.album = ''\n\n\ndef cont_print():\n json_info = json_location(getuser())\n\n current_song = CurrentSong()\n i = 0\n\n while True:\n with open(json_info, 'r') as json_file:\n info = load(json_file)\n\n if info['song']['title'] is not None:\n # reset counter if song changed\n if info['song']['title'] != current_song.title or \\\n info['song']['artist'] != current_song.artist or info['song']['album'] != current_song.album:\n current_song.title = info['song']['title']\n current_song.artist = info['song']['artist']\n current_song.album = info['song']['album']\n i = 0\n\n song_info = format_song_info(info['song']['title'], info['song']['artist'], info['song']['album'])\n\n if \"rotate\" in argv:\n song_info = \"{} | {}\".format(song_info[i:], song_info[:i])\n\n if i < len(song_info):\n i += 1\n else:\n i = 0\n\n if \"clear\" in argv:\n print(\"\\033[H\\033[J\")\n\n if \"short\" in argv:\n song_info = song_info[0:20]\n\n icon = show_icon()\n\n time = format_time(human_time(info['time']['current']), human_time(info['time']['total']))\n\n print(string_format(icon, song_info, time))\n stdout.flush()\n sleep(1)\n\n\ndef single_print():\n json_info = json_location(getuser())\n\n with open(json_info, 'r') as json_file:\n info = load(json_file)\n\n song_info = format_song_info(info['song']['title'], info['song']['artist'], info['song']['album'])\n\n time = format_time(human_time(info['time']['current']), human_time(info['time']['total']))\n icon = show_icon()\n\n if \"short\" in argv:\n song_info = song_info[0:20]\n time = \"\"\n\n icon = show_icon()\n\n if info['song']['title'] is not None:\n print(string_format(icon, song_info, time))\n\n\ndef main():\n if \"cont\" in argv or \"clear\" in argv or \"rotate\" in argv:\n cont_print()\n else:\n single_print()\n\n\nif __name__ == '__main__':\n if gpm_run_check():\n main()\n else:\n print(\" \")\n","sub_path":".config/polybar/gpmdpinfo.py","file_name":"gpmdpinfo.py","file_ext":"py","file_size_in_byte":4473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}
+{"seq_id":"563888536","text":"# Author: Christian Brodbeck \n\"\"\"Data containers and basic operations\n\nData Representation\n===================\n\nData is stored in three main vessels:\n\n:class:`Factor`:\n stores categorical data\n:class:`Var`:\n stores numeric data\n:class:`NDVar`:\n stores numerical data where each cell contains an array if data (e.g., EEG\n or MEG data)\n\n\nmanaged by\n\n * Dataset\n\n\nEffect types\n------------\n\nThese are elementary effects in a Model, and identified by :func:`is_effect`\n\n - _Effect\n - Factor\n - Interaction\n - NestedEffect\n - Var\n - NonBasicEffect\n\n\n\"\"\"\nfrom collections import Iterator, OrderedDict, Sequence\nfrom copy import deepcopy\nfrom functools import partial\nimport itertools\nfrom itertools import chain\nfrom keyword import iskeyword\nfrom math import ceil, log\nfrom numbers import Integral, Number\nfrom pathlib import Path\nimport pickle\nimport operator\nimport os\nimport re\nimport string\n\nfrom matplotlib.ticker import (\n FixedLocator, FormatStrFormatter, FuncFormatter, IndexFormatter)\nimport mne\nfrom mne.source_space import label_src_vertno_sel\nfrom nibabel.freesurfer import read_annot\nimport numpy as np\nfrom numpy import newaxis\nimport scipy.signal\nimport scipy.stats\nfrom scipy.linalg import inv, norm\nfrom scipy.optimize import leastsq\nfrom scipy.spatial import ConvexHull\nfrom scipy.spatial.distance import cdist, pdist, squareform\n\nfrom . import fmtxt, _info\nfrom ._exceptions import DimensionMismatchError, IncompleteModel\nfrom ._data_opt import gaussian_smoother\nfrom ._utils import (\n intervals, ui, LazyProperty, n_decimals, natsorted)\nfrom ._utils.numpy_utils import (\n INT_TYPES, FULL_SLICE, FULL_AXIS_SLICE,\n apply_numpy_index, digitize_index, digitize_slice_endpoint,\n index_length, index_to_int_array, slice_to_arange)\nfrom .mne_fixes import MNE_EPOCHS, MNE_EVOKED, MNE_RAW, MNE_LABEL\nfrom functools import reduce\n\n\npreferences = dict(fullrepr=False, # whether to display full arrays/dicts in __repr__ methods\n repr_len=5, # length of repr\n dataset_str_n_cases=500,\n var_repr_n_cases=100,\n factor_repr_n_cases=100,\n bool_fmt='%s',\n float_fmt='%.6g',\n int_fmt='%s',\n factor_repr_use_labels=True,\n short_repr=True, # \"A % B\" vs \"Interaction(A, B)\"\n )\n\n\nSRC_RE = re.compile('^(ico|vol)-(\\d+)(?:-(\\w+))?$')\nUNNAMED = '>'\nLIST_INDEX_TYPES = (*INT_TYPES, slice)\n_pickled_ds_wildcard = (\"Pickled Dataset (*.pickled)\", '*.pickled')\n_tex_wildcard = (\"TeX (*.tex)\", '*.tex')\n_tsv_wildcard = (\"Plain Text Tab Separated Values (*.txt)\", '*.txt')\n_txt_wildcard = (\"Plain Text (*.txt)\", '*.txt')\nEVAL_CONTEXT = vars(np) # updated at end of file\n\n\ndef _effect_eye(n):\n \"\"\"Effect coding for n categories. E.g.::\n\n Examples\n --------\n >>> _effect_eye(4)\n array([[ 1, 0, 0],\n [ 0, 1, 0],\n [ 0, 0, 1],\n [-1, -1, -1]])\n \"\"\"\n x = np.zeros((n, n - 1))\n np.fill_diagonal(x, 1.)\n x[-1] = -1.\n return x\n\n\ndef _effect_interaction(a, b):\n k = a.shape[1]\n out = [a[:, i, None] * b for i in range(k)]\n return np.hstack(out)\n\n\ndef cellname(cell, delim=' '):\n \"\"\"Consistent ``str`` representation for cells.\n\n * for Factor cells: the cell (str)\n * for Interaction cell: delim.join(cell).\n\n \"\"\"\n if isinstance(cell, str):\n return cell\n elif isinstance(cell, (list, tuple)):\n return delim.join(cell)\n elif cell is None:\n return ''\n else:\n return str(cell)\n\n\ndef longname(x):\n if getattr(x, 'name', None) is not None:\n return x.name\n elif isnumeric(x) and 'longname' in x.info:\n return x.info['longname']\n elif np.isscalar(x):\n return repr(x)\n return ''\n\n\ndef nice_label(x, labels={}):\n if x.name in labels:\n return labels[x.name]\n elif 'label' in x.info:\n return x.info['label']\n else:\n return longname(x)\n\n\ndef array_repr(a):\n \"Concise array repr where class does not matter\"\n if a.ndim == 0:\n return str(a)\n elif a.ndim == 1:\n return '[%s]' % ', '.join(map(str, a))\n else:\n raise RuntimeError(\"Array with ndim > 1\")\n\n\ndef dataobj_repr(obj):\n \"\"\"Describe data-objects as parts of __repr__\"\"\"\n if obj is None:\n return 'None'\n elif isdataobject(obj) and obj.name is not None:\n return obj.name\n else:\n return '<%s>' % obj.__class__.__name__\n\n\ndef rank(x, tol=1e-8):\n \"\"\"Rank of a matrix\n\n http://mail.scipy.org/pipermail/numpy-discussion/2008-February/031218.html\n\n \"\"\"\n s = np.linalg.svd(x, compute_uv=0)\n return np.sum(np.where(s > tol, 1, 0))\n\n\ndef check_length(objs, n=None):\n for obj in objs:\n if obj is None:\n pass\n elif n is None:\n n = len(obj)\n elif n != len(obj):\n err = (\"%r has wrong length: %i (%i needed).\" %\n (obj.name, len(obj), n))\n raise ValueError(err)\n\n\ndef isbalanced(x):\n \"\"\"Determine whether x is balanced\n\n Parameters\n ----------\n x : categorial\n Categorial Model, Factor or Interaction.\n \"\"\"\n if isinstance(x, Model):\n return all(isbalanced(e) for e in x.effects)\n else:\n return len({np.sum(x == c) for c in x.cells}) <= 1\n\n\ndef iscategorial(x):\n \"Determine wether x is categorial\"\n if isinstance(x, (Factor, NestedEffect)):\n return True\n elif isinstance(x, Interaction):\n return x.is_categorial\n elif isinstance(x, Model):\n return all(iscategorial(e) for e in x.effects)\n else:\n return False\n\n\n# type checks\n#############\n# _Effect -> Factor, Interaction, NestedEffect\ndef isdatacontainer(x):\n \"Determine whether x is a data-object, including Datasets\"\n return isinstance(x, (Datalist, Dataset, Model, NDVar, Var, _Effect,\n NonbasicEffect))\n\n\ndef isdataobject(x):\n \"Determine whether x is a data-object, excluding Datasets\"\n return isinstance(x, (Datalist, Model, NDVar, Var, _Effect, NonbasicEffect))\n\n\ndef isdatalist(x, contains=None, test_all=True):\n \"\"\"Test whether x is a Datalist instance\n\n Parameters\n ----------\n x : object\n Object to test.\n contains : None | class\n Test whether the content is instances of a specific class.\n test_all : bool\n If contains is provided, test all items' class (otherwise just test the\n first item).\n \"\"\"\n is_dl = isinstance(x, Datalist)\n if is_dl and contains:\n if test_all:\n is_dl = all(isinstance(item, contains) for item in x)\n else:\n is_dl = isinstance(x[0], contains)\n return is_dl\n\n\ndef iseffect(x):\n return isinstance(x, (Var, _Effect, NonbasicEffect))\n\n\ndef isnestedin(item, item2):\n \"Determine whether ``item`` is nested in ``item2``\"\n if isinstance(item, NestedEffect):\n return item2 in find_factors(item.nestedin)\n else:\n return False\n\n\ndef partially_nested(item1, item2):\n \"\"\"Determine whether there is a complete or partial nesting relationship\n\n Used to determine whether a model should include an interaction effect\n between item1 and item2.\n \"\"\"\n if isinstance(item2, NestedEffect):\n if isinstance(item1, NestedEffect):\n raise NotImplementedError(\n \"Interaction between two nested effects is not implemented. \"\n \"Please specify model explicitly\")\n return partially_nested(item2, item1)\n elif isinstance(item1, NestedEffect):\n nestedin = find_factors(item1.nestedin)\n return any(e in nestedin for e in find_factors(item2))\n else:\n return False\n\n\ndef isnumeric(x):\n \"Determine wether x is numeric (a Var or an NDVar)\"\n return isinstance(x, (NDVar, Var))\n\n\ndef isuv(x, interaction=False):\n \"Determine whether x is univariate (a Var or a Factor)\"\n if interaction:\n return isinstance(x, (Factor, Var, Interaction))\n else:\n return isinstance(x, (Factor, Var))\n\n\ndef isboolvar(x):\n \"Determine whether x is a Var whose data type is boolean\"\n return isinstance(x, Var) and x.x.dtype.kind == 'b'\n\n\ndef isintvar(x):\n \"Determine whether x is a Var whose data type is integer\"\n return isinstance(x, Var) and x.x.dtype.kind in 'iu'\n\n\ndef is_higher_order_effect(e1, e0):\n \"\"\"Determine whether e1 is a higher order term of e0\n\n Return True if e1 is a higher order term of e0 (i.e., if all factors in\n e0 are contained in e1).\n\n Parameters\n ----------\n e1, e0 : effects\n The effects to compare.\n \"\"\"\n f1s = find_factors(e1)\n return all(f in f1s for f in find_factors(e0))\n\n\ndef empty_cells(x):\n return [cell for cell in x.cells if not np.any(x == cell)]\n\n\ndef assert_has_no_empty_cells(x):\n \"\"\"Raise a ValueError iff a categorial has one or more empty cells\"\"\"\n if isinstance(x, Factor):\n return\n elif isinstance(x, Interaction):\n if not x.is_categorial:\n return\n empty = empty_cells(x)\n if empty:\n raise NotImplementedError(\"%s contains empty cells: %s\" %\n (dataobj_repr(x), ', '.join(empty)))\n elif isinstance(x, Model):\n empty = []\n for e in x.effects:\n if isinstance(e, Interaction) and e.is_categorial:\n empty_in_e = empty_cells(e)\n if empty_in_e:\n empty.append((dataobj_repr(e), ', '.join(empty_in_e)))\n if empty:\n items = ['%s (%s)' % pair for pair in empty]\n raise NotImplementedError(\"%s contains empty cells in %s\" %\n (dataobj_repr(x), ' and '.join(items)))\n else:\n raise TypeError(\"Need categorial (got %s)\" % repr(x))\n\n\ndef hasrandom(x):\n \"\"\"True if x is or contains a random effect, False otherwise\"\"\"\n if isinstance(x, (Factor, NestedEffect)):\n return x.random\n elif isinstance(x, Interaction):\n for e in x.base:\n if isinstance(e, Factor) and e.random:\n return True\n elif isinstance(x, Model):\n return any(hasrandom(e) for e in x.effects)\n return False\n\n\ndef as_case_identifier(x, ds=None):\n \"Coerce input to a variable that can identify each of its cases\"\n if isinstance(x, str):\n if ds is None:\n raise TypeError(\"Parameter was specified as string, but no Dataset \"\n \"was specified\")\n x = ds.eval(x)\n\n if isinstance(x, Var):\n n = len(x.values)\n elif isinstance(x, Factor):\n n = x.n_cells\n elif isinstance(x, Interaction):\n n = len(set(x))\n else:\n raise TypeError(\"Need a Var, Factor or Interaction to identify cases, \"\n \"got %s\" % repr(x))\n\n if n < len(x):\n raise ValueError(\"%s can not serve as a case identifier because it has \"\n \"at least one non-unique value\" % x.name.capitalize())\n\n return x\n\n\ndef asarray(x, kind=None):\n \"Coerce input to array\"\n if isinstance(x, Var):\n x = x.x\n else:\n x = np.asarray(x)\n\n if kind is not None and x.dtype.kind not in kind:\n # boolean->int conversion\n if 'i' in kind and x.dtype.kind == 'b':\n x = x.astype(int)\n else:\n raise TypeError(\"Expected array of kind %r, got %r (%s)\"\n % (kind, x.dtype.kind, x.dtype))\n return x\n\n\ndef ascategorial(x, sub=None, ds=None, n=None):\n if isinstance(x, str):\n if ds is None:\n err = (\"Parameter was specified as string, but no Dataset was \"\n \"specified\")\n raise TypeError(err)\n x = ds.eval(x)\n\n if iscategorial(x):\n pass\n elif isinstance(x, Interaction):\n x = Interaction([e if isinstance(e, Factor) else e.as_factor() for\n e in x.base])\n else:\n x = asfactor(x)\n\n if sub is not None:\n x = x[sub]\n\n if n is not None and len(x) != n:\n raise ValueError(\"Arguments have different length\")\n\n return x\n\n\ndef asdataobject(x, sub=None, ds=None, n=None):\n \"Convert to any data object or numpy array.\"\n if isinstance(x, str):\n if ds is None:\n err = (\"Data object was specified as string, but no Dataset was \"\n \"specified\")\n raise TypeError(err)\n x = ds.eval(x)\n\n if isdataobject(x):\n pass\n elif isinstance(x, np.ndarray):\n pass\n else:\n x = Datalist(x)\n\n if sub is not None:\n x = x[sub]\n\n if n is not None and len(x) != n:\n raise ValueError(\"Arguments have different length\")\n\n return x\n\n\ndef asepochs(x, sub=None, ds=None, n=None):\n \"Convert to mne Epochs object\"\n if isinstance(x, str):\n if ds is None:\n err = (\"Epochs object was specified as string, but no Dataset was \"\n \"specified\")\n raise TypeError(err)\n x = ds.eval(x)\n\n if isinstance(x, MNE_EPOCHS):\n pass\n else:\n raise TypeError(\"Need mne Epochs object, got %s\" % repr(x))\n\n if sub is not None:\n x = x[sub]\n\n if n is not None and len(x) != n:\n raise ValueError(\"Arguments have different length\")\n\n return x\n\n\ndef asfactor(x, sub=None, ds=None, n=None):\n if isinstance(x, str):\n if ds is None:\n err = (\"Factor was specified as string, but no Dataset was \"\n \"specified\")\n raise TypeError(err)\n x = ds.eval(x)\n\n if isinstance(x, Factor):\n pass\n elif hasattr(x, 'as_factor'):\n x = x.as_factor(name=x.name)\n else:\n x = Factor(x)\n\n if sub is not None:\n x = x[sub]\n\n if n is not None and len(x) != n:\n raise ValueError(\"Arguments have different length\")\n\n return x\n\n\ndef asmodel(x, sub=None, ds=None, n=None):\n if isinstance(x, str):\n if ds is None:\n raise TypeError(\"Model was specified as string, but no Dataset was \"\n \"specified\")\n elif sub is not None:\n # need to sub dataset before building model to get right number of\n # df\n names = set(re.findall('\\w+', x))\n if isinstance(sub, str):\n names.update(re.findall('\\w+', sub))\n names.intersection_update(ds)\n ds = ds[names].sub(sub)\n sub = None\n x = ds.eval(x)\n\n if isinstance(x, Model):\n pass\n else:\n x = Model(x)\n\n if sub is not None:\n x = x[sub]\n\n if n is not None and len(x) != n:\n raise ValueError(\"Arguments have different length\")\n\n return x\n\n\ndef asndvar(x, sub=None, ds=None, n=None, dtype=None):\n if isinstance(x, str):\n if ds is None:\n err = (\"Ndvar was specified as string, but no Dataset was \"\n \"specified\")\n raise TypeError(err)\n x = ds.eval(x)\n\n # convert MNE objects\n if isinstance(x, NDVar):\n pass\n elif isinstance(x, MNE_EPOCHS):\n from .load.fiff import epochs_ndvar\n x = epochs_ndvar(x)\n elif isinstance(x, MNE_EVOKED):\n from .load.fiff import evoked_ndvar\n x = evoked_ndvar(x)\n elif isinstance(x, MNE_RAW):\n from .load.fiff import raw_ndvar\n x = raw_ndvar(x)\n elif isinstance(x, list):\n if isinstance(x[0], MNE_EVOKED):\n from .load.fiff import evoked_ndvar\n x = evoked_ndvar(x)\n else:\n x = combine(map(asndvar, x))\n else:\n raise TypeError(\"NDVar required, got %s\" % repr(x))\n\n if sub is not None:\n x = x[sub]\n\n if n is not None and len(x) != n:\n raise ValueError(\"Arguments have different length\")\n\n if dtype is not None and x.x.dtype != dtype:\n x = x.astype(dtype)\n\n return x\n\n\ndef asnumeric(x, sub=None, ds=None, n=None):\n \"Var, NDVar\"\n if isinstance(x, str):\n if ds is None:\n raise TypeError(\"Numeric argument was specified as string, but no \"\n \"Dataset was specified\")\n x = ds.eval(x)\n\n if not isnumeric(x):\n raise TypeError(\"Numeric argument required (Var or NDVar), got %s\" % repr(x))\n\n if sub is not None:\n x = x[sub]\n\n if n is not None and len(x) != n:\n raise ValueError(\"Arguments have different length\")\n\n return x\n\n\ndef assub(sub, ds=None):\n \"Interpret the sub argument.\"\n if sub is None:\n return None\n elif isinstance(sub, str):\n if ds is None:\n raise TypeError(\"the sub parameter was specified as string, but no \"\n \"Dataset was specified\")\n sub = ds.eval(sub)\n\n if isinstance(sub, Var):\n return sub.x\n elif not isinstance(sub, np.ndarray):\n raise TypeError(\"sub parameters needs to be Var or array, got %r\" % (sub,))\n return sub\n\n\ndef asuv(x, sub=None, ds=None, n=None, interaction=False):\n \"Coerce to Var or Factor\"\n if isinstance(x, str):\n if ds is None:\n raise TypeError(\"Parameter was specified as string, but no Dataset \"\n \"was specified\")\n x = ds.eval(x)\n\n if isuv(x, interaction):\n pass\n elif all(isinstance(v, str) for v in x):\n x = Factor(x)\n else:\n x = Var(x)\n\n if sub is not None:\n x = x[sub]\n\n if n is not None and len(x) != n:\n raise ValueError(\"Arguments have different length\")\n\n return x\n\n\ndef asvar(x, sub=None, ds=None, n=None):\n \"Coerce to Var\"\n if isinstance(x, str):\n if ds is None:\n raise TypeError(\"Var was specified as string, but no Dataset was \"\n \"specified\")\n x = ds.eval(x)\n\n if not isinstance(x, Var):\n x = Var(x)\n\n if sub is not None:\n x = x[sub]\n\n if n is not None and len(x) != n:\n raise ValueError(\"Arguments have different length\")\n\n return x\n\n\ndef index_ndim(index):\n \"\"\"Determine the dimensionality of an index\n\n Parameters\n ----------\n index : numpy_index\n Any valid numpy index.\n\n Returns\n -------\n ndim : int\n Number of index dimensions: 0 for an index to a single element, 1 for\n an index to a sequence.\n \"\"\"\n if isinstance(index, slice):\n return 1\n elif isinstance(index, Integral):\n return 0\n elif np.iterable(index):\n return 1\n else:\n raise TypeError(\"unknown index type: %s\" % repr(index))\n\n\ndef _empty_like(obj, n=None, name=None):\n \"Create an empty object of the same type as obj\"\n n = n or len(obj)\n name = name or obj.name\n if isinstance(obj, Factor):\n return Factor([''], repeat=n, name=name)\n elif isinstance(obj, Var):\n return Var(np.empty(n) * np.NaN, name=name)\n elif isinstance(obj, NDVar):\n shape = (n,) + obj.shape[1:]\n return NDVar(np.empty(shape) * np.NaN, dims=obj.dims, name=name)\n elif isdatalist(obj):\n return Datalist([None] * n, name, obj._fmt)\n else:\n err = \"Type not supported: %s\" % type(obj)\n raise TypeError(err)\n\n\ndef all_equal(a, b, nan_equal=False):\n \"\"\"Test two data-objects for equality\n\n Equivalent to ``numpy.all(a == b)`` but faster and capable of treating nans\n as equal.\n\n Paramaters\n ----------\n a : Var | Factor | NDVar\n Variable to compare.\n a : Var | Factor | NDVar\n Variable to compare.\n nan_equal : bool\n Treat ``nan == nan`` as ``True``.\n\n Returns\n -------\n all_equal : bool\n True if all entries in\n \"\"\"\n if a.__class__ is not b.__class__:\n raise TypeError(\"Comparing %s with %s\" % (a.__class__, b.__class__))\n elif len(a) != len(b):\n raise ValueError(\"a and b have different lengths (%i vs %i)\" %\n (len(a), len(b)))\n elif isinstance(a, Factor):\n if a._codes == b._codes:\n return np.array_equal(a.x, b.x)\n else:\n return np.all(a == b)\n elif isinstance(a, (Var, NDVar)):\n if not nan_equal or a.x.dtype.kind in 'ib': # can't be nan\n return np.array_equal(a.x, b.x)\n else:\n mask = np.isnan(a.x)\n buf = np.isnan(b.x)\n mask &= buf\n np.equal(a.x, b.x, buf)\n buf |= mask\n return buf.all()\n else:\n raise TypeError(\"Comparison for %s is not implemented\" % a.__class__)\n\n\n# --- sorting ---\n\ndef align(d1, d2, i1='index', i2=None, out='data'):\n \"\"\"Align two data-objects based on index variables\n\n Before aligning, two data-objects ``d1`` and ``d2`` describe the same cases,\n but their order does not correspond. :func:`align` uses the index variables\n ``i1`` and ``i2`` to match each case in ``d2`` to a case in ``d1`` (i.e.,\n ``d1`` is used as the basis for the case order in the output), and returns\n reordered versions of of ``d1`` and ``d2`` with matching cases. Cases that\n are present in only one of ``d1`` and ``d2`` are dropped.\n\n Parameters\n ----------\n d1, d2 : data-object\n Two data objects which are to be aligned\n i1, i2 : str | Var | Factor | Interaction\n Indexes for cases in d1 and d2. If d1 and d2 are Datasets, i1 and i2\n can be keys for variables in d1 and d2. If i2 is identical to i1 it can\n be omitted. Indexes have to supply a unique value for each case.\n out : 'data' | 'index'\n **'data'**: returns the two aligned data objects. **'index'**: returns\n two indices index1 and index2 which can be used to align the datasets\n with ``d1[index1]; d2[index2]``.\n\n Returns\n -------\n d1_aligned : data-object | array\n Aligned copy of ``d1`` (or index to align ``d1`` if ``out='index'``).\n d2_aligned : data-object | array\n Aligned copy of ``d2`` (or index to align ``d2`` if ``out='index'``).\n\n See Also\n --------\n align1 : Align one data-object to an index variable\n\n Examples\n --------\n See `examples/datasets/align.py `_.\n \"\"\"\n if i2 is None and isinstance(i1, str):\n i2 = i1\n i1 = as_case_identifier(i1, ds=d1)\n i2 = as_case_identifier(i2, ds=d2)\n if type(i1) is not type(i2):\n raise TypeError(\"i1 and i2 need to be of the same type, got: \\n\"\n \"i1=%r\\ni2=%r\" % (i1, i2))\n\n idx1 = []\n idx2 = []\n for i, case_id in enumerate(i1):\n if case_id in i2:\n idx1.append(i)\n idx2.append(i2.index(case_id)[0])\n\n if out == 'data':\n if all(i == v for i, v in enumerate(idx1)):\n return d1, d2[idx2]\n else:\n return d1[idx1], d2[idx2]\n elif out == 'index':\n return idx1, idx2\n else:\n raise ValueError(\"Invalid value for out parameter: %r\" % out)\n\n\ndef align1(d, idx, d_idx='index', out='data'):\n \"\"\"Align a data object to an index variable\n\n Parameters\n ----------\n d : data-object, n_cases = n1\n Data object with cases that should be aligned to ``idx``.\n idx : Var | array_like, len = n2\n Index array to which ``d`` should be aligned.\n d_idx : str | index array, len = n1\n Variable labeling cases in ``d`` for aligning them to ``idx``. If ``d``\n is a Dataset, ``d_idx`` can be the name of a variable in ``d``.\n out : 'data' | 'index'\n Return a restructured copy of ``d`` (default) or an index array into\n ``d``.\n\n Returns\n -------\n d_aligned : data-object | array\n Aligned copy of ``d`` (or index to align ``d`` to ``idx`` if\n ``out='index'``).\n\n See Also\n --------\n align : Align two data-objects\n \"\"\"\n idx = asuv(idx)\n if not isinstance(d_idx, str):\n # check d_idx length\n if isinstance(d, Dataset):\n if len(d_idx) != d.n_cases:\n msg = (\"d_idx does not have the same number of cases as d \"\n \"(d_idx: %i, d: %i)\" % (len(d_idx), d.n_cases))\n raise ValueError(msg)\n else:\n if len(d_idx) != len(d):\n msg = (\"d_idx does not have the same number of cases as d \"\n \"(d_idx: %i, d: %i)\" % (len(d_idx), len(d)))\n raise ValueError(msg)\n d_idx = asuv(d_idx, ds=d)\n\n align_idx = np.empty(len(idx), int)\n for i, v in enumerate(idx):\n where = d_idx.index(v)\n if len(where) == 1:\n align_idx[i] = where[0]\n elif len(where) == 0:\n raise ValueError(\"%s does not occur in d_idx\" % v)\n else:\n raise ValueError(\"%s occurs more than once in d_idx\" % v)\n\n if out == 'data':\n return d[align_idx]\n elif out == 'index':\n return align_idx\n else:\n ValueError(\"Invalid value for out parameter: %r\" % out)\n\n\ndef choose(choice, sources, name=None):\n \"\"\"Combine data-objects picking from a different object for each case\n\n Parameters\n ----------\n choice : array of int\n Array specifying for each case from which of the sources the data should\n be taken.\n sources : list of data-objects\n Data that should be combined.\n name : str\n Name for the new data-object (optional).\n\n Notes\n -----\n Analogous to :func:`numpy.choose`. Only implemented for NDVars at this time.\n \"\"\"\n choice = asarray(choice, 'i')\n if choice.min() < 0:\n raise ValueError(\"Choice can not be < 0\")\n elif choice.max() > len(sources) - 1:\n raise ValueError(\"choice contains values exceeding the number of sources\")\n\n s0 = sources[0]\n s1 = sources[1:]\n if isinstance(s0, NDVar):\n if not all(isinstance(s, NDVar) for s in s1):\n raise TypeError(\"Sources have different types\")\n elif any(s.dims != s0.dims for s in s1):\n raise DimensionMismatchError(\"Sources have different dimensions\")\n x = np.empty_like(s0.x)\n index_flat = np.empty(len(choice), bool)\n index = index_flat.reshape(index_flat.shape + (1,) * (x.ndim - 1))\n for i, s in enumerate(sources):\n np.equal(choice, i, index_flat)\n np.copyto(x, s.x, where=index)\n return NDVar(x, s0.dims, {}, name)\n else:\n raise NotImplementedError\n\n\ndef shuffled_index(n, cells=None):\n \"\"\"Return an index to shuffle a data-object\n\n Parameters\n ----------\n n : int\n Number of cases in the index.\n cells : categorial\n Only shuffle cases within cells.\n\n Returns\n -------\n index : array of int\n Array with in indexes for shuffling a data-object.\n\n Notes\n -----\n If ``cells`` is not specified, this is equivalent to\n ``numpy.random.permutation(n)``.\n \"\"\"\n if cells is None:\n return np.random.permutation(n)\n cells = ascategorial(cells, n=n)\n out = np.arange(n)\n for cell in cells.cells:\n index = cells == cell\n out[index] = np.random.permutation(out[index])\n return out\n\n\ndef combine(items, name=None, check_dims=True, incomplete='raise'):\n \"\"\"Combine a list of items of the same type into one item.\n\n Parameters\n ----------\n items : sequence\n Sequence of data objects to combine (Dataset, Var, Factor, \n NDVar or Datalist). A sequence of numbers is converted to :class:`Var`, \n a sequence of strings is converted to :class:`Factor`.\n name : None | str\n Name for the resulting data-object. If None, the name of the combined\n item is the common prefix of all items.\n check_dims : bool\n For NDVars, check dimensions for consistency between items (e.g.,\n channel locations in a Sensor dimension). Default is ``True``. Set to\n ``False`` to ignore non-fatal mismatches.\n incomplete : \"raise\" | \"drop\" | \"fill in\"\n Only applies when combining Datasets: how to handle variables that are\n missing from some of the input Datasets. With ``\"raise\"`` (default), a\n KeyError to be raised. With ``\"drop\"``, partially missing variables are\n dropped. With ``\"fill in\"``, they are retained and missing values are\n filled in with empty values (``\"\"`` for factors, ``NaN`` for variables).\n\n Notes\n -----\n The info dict inherits only entries that are equal (``x is y or\n np.array_equal(x, y)``) for all items.\n \"\"\"\n if not isinstance(incomplete, str):\n raise TypeError(\"incomplete=%s, need str\" % repr(incomplete))\n elif incomplete not in ('raise', 'drop', 'fill in'):\n raise ValueError(\"incomplete=%s\" % repr(incomplete))\n\n # check input\n if isinstance(items, Iterator):\n items = tuple(items)\n if len(items) == 0:\n raise ValueError(\"combine() called with empty sequence %s\" % repr(items))\n\n # find type\n first_item = items[0]\n if isinstance(first_item, Number):\n return Var(items, name=name)\n elif isinstance(first_item, str):\n return Factor(items)\n stype = type(first_item)\n if isinstance(first_item, mne.BaseEpochs):\n return mne.concatenate_epochs(items)\n elif not isdatacontainer(first_item):\n return Datalist(items)\n elif any(type(item) is not stype for item in items[1:]):\n raise TypeError(\"All items to be combined need to have the same type, \"\n \"got %s.\" %\n ', '.join(str(i) for i in {type(i) for i in items}))\n\n # find name\n if name is None:\n names = tuple(filter(None, (item.name for item in items)))\n name = os.path.commonprefix(names) or None\n\n # combine objects\n if stype is Dataset:\n out = Dataset(name=name, info=_info.merge_info(items))\n if incomplete == 'fill in':\n # find all keys and data types\n keys = list(first_item.keys())\n sample = dict(first_item)\n for item in items:\n for key in item:\n if key not in keys:\n keys.append(key)\n sample[key] = item[key]\n # create output\n for key in keys:\n pieces = [ds[key] if key in ds else\n _empty_like(sample[key], ds.n_cases) for ds in items]\n out[key] = combine(pieces, check_dims=check_dims)\n else:\n keys = set(first_item)\n if incomplete == 'raise':\n if any(set(item) != keys for item in items[1:]):\n raise KeyError(\"Datasets have unequal keys. Use with \"\n \"incomplete='drop' or incomplete='fill in' \"\n \"to combine anyways.\")\n out_keys = first_item\n else:\n keys.intersection_update(*items[1:])\n out_keys = (k for k in first_item if k in keys)\n\n for key in out_keys:\n out[key] = combine([ds[key] for ds in items])\n return out\n elif stype is Var:\n x = np.hstack(i.x for i in items)\n return Var(x, name, info=_info.merge_info(items))\n elif stype is Factor:\n random = set(f.random for f in items)\n if len(random) > 1:\n raise ValueError(\"Factors have different values for random parameter\")\n random = random.pop()\n labels = first_item._labels\n if all(f._labels == labels for f in items[1:]):\n x = np.hstack(f.x for f in items)\n return Factor(x, name, random, labels=labels)\n else:\n x = sum((i.as_labels() for i in items), [])\n return Factor(x, name, random)\n elif stype is NDVar:\n v_have_case = [v.has_case for v in items]\n if all(v_have_case):\n has_case = True\n all_dims = (item.dims[1:] for item in items)\n elif any(v_have_case):\n raise DimensionMismatchError(\"Some items have a 'case' dimension, \"\n \"others do not\")\n else:\n has_case = False\n all_dims = (item.dims for item in items)\n\n dims = reduce(lambda x, y: intersect_dims(x, y, check_dims), all_dims)\n idx = {d.name: d for d in dims}\n # reduce data to common dimension range\n sub_items = []\n for item in items:\n if item.dims[has_case:] == dims:\n sub_items.append(item)\n else:\n sub_items.append(item.sub(**idx))\n # combine data\n if has_case:\n x = np.concatenate([v.x for v in sub_items], axis=0)\n else:\n x = np.array([v.x for v in sub_items])\n dims = ('case',) + dims\n return NDVar(x, dims, _info.merge_info(sub_items), name)\n elif stype is Datalist:\n return Datalist(sum(items, []), name, items[0]._fmt)\n else:\n raise RuntimeError(\"combine with stype = %r\" % stype)\n\n\ndef find_factors(obj):\n \"Return the list of all factors contained in obj\"\n if isinstance(obj, EffectList):\n f = OrderedDict((id(f), f) for e in obj for f in find_factors(e))\n return EffectList(f.values())\n elif isuv(obj):\n return EffectList([obj])\n elif isinstance(obj, Model):\n f = OrderedDict((id(f), f) for e in obj.effects for f in find_factors(e))\n return EffectList(f.values())\n elif isinstance(obj, NestedEffect):\n return find_factors(obj.effect)\n elif isinstance(obj, Interaction):\n return obj.base\n else: # NonbasicEffect\n try:\n return EffectList(obj.factors)\n except:\n raise TypeError(\"%r has no factors\" % obj)\n\n\nclass EffectList(list):\n def __repr__(self):\n return 'EffectList((%s))' % ', '.join(self.names())\n\n def __contains__(self, item):\n for f in self:\n if ((f.name == item.name) and (type(f) is type(item)) and\n (len(f) == len(item)) and np.all(item == f)):\n return True\n return False\n\n def index(self, item):\n for i, f in enumerate(self):\n if (len(f) == len(item)) and np.all(item == f):\n return i\n raise ValueError(\"Factor %r not in EffectList\" % item.name)\n\n def names(self):\n names = [e.name if isuv(e) else repr(e) for e in self]\n return [UNNAMED if n is None else n for n in names]\n\n\nclass Var(object):\n \"\"\"Container for scalar data.\n\n Parameters\n ----------\n x : array_like\n Data; is converted with ``np.asarray(x)``. Multidimensional arrays\n are flattened as long as only 1 dimension is longer than 1.\n name : str | None\n Name of the variable\n repeat : int | array of int\n repeat each element in ``x``, either a constant or a different number\n for each element.\n tile : int\n Repeat ``x`` as a whole ``tile`` many times.\n info : dict\n Info dictionary. The \"longname\" entry is used for display purposes.\n\n Attributes\n ----------\n x : numpy.ndarray\n The data stored in the Var.\n name : None | str\n The Var's name.\n\n Notes\n -----\n While :py:class:`Var` objects support a few basic operations in a\n :py:mod:`numpy`-like fashion (``+``, ``-``, ``*``, ``/``, ``//``), their\n :py:attr:`Var.x` attribute provides access to the corresponding\n :py:class:`numpy.array` which can be used for anything more complicated.\n :py:attr:`Var.x` can be read and modified, but should not be replaced.\n \"\"\"\n ndim = 1\n\n def __init__(self, x, name=None, repeat=1, tile=1, info=None):\n if isinstance(x, str):\n raise TypeError(\"Var can't be initialized with a string\")\n\n if isinstance(x, Iterator):\n x = tuple(x)\n x = np.asarray(x)\n if x.dtype.kind == 'O':\n raise TypeError(\"Var can not handle object-type arrays. Consider \"\n \"using a Datalist.\")\n elif x.ndim > 1:\n if sum(i > 1 for i in x.shape) <= 1:\n x = np.ravel(x)\n else:\n raise ValueError(\n f\"x with shape {x.shape}; x needs to be one-dimensional. \"\n f\"Use NDVar class for data with more than one dimension.\")\n\n if not (isinstance(repeat, Integral) and repeat == 1):\n x = np.repeat(x, repeat)\n\n if tile > 1:\n x = np.tile(x, tile)\n\n if info is None:\n info = {}\n elif not isinstance(info, dict):\n raise TypeError(\"type(info)=%s; need dict\" % type(info))\n\n self.__setstate__((x, name, info))\n\n def __setstate__(self, state):\n if len(state) == 3:\n x, name, info = state\n else:\n x, name = state\n info = {}\n # raw\n self.name = name\n self.x = x\n self.info = info\n # constants\n self._n_cases = len(x)\n self.df = 1\n self.random = False\n\n def __getstate__(self):\n return (self.x, self.name, self.info)\n\n def __repr__(self, full=False):\n n_cases = preferences['var_repr_n_cases']\n\n if isintvar(self):\n fmt = preferences['int_fmt']\n elif isboolvar(self):\n fmt = preferences['bool_fmt']\n else:\n fmt = preferences['float_fmt']\n\n if full or len(self.x) <= n_cases:\n x = [fmt % v for v in self.x]\n else:\n x = [fmt % v for v in self.x[:n_cases]]\n x.append('... (N=%s)' % len(self.x))\n\n args = ['[%s]' % ', '.join(x)]\n if self.name is not None:\n args.append('name=%r' % self.name)\n\n if self.info:\n args.append('info=%r' % self.info)\n\n return \"Var(%s)\" % ', '.join(args)\n\n def __str__(self):\n return self.__repr__(True)\n\n __array_priority__ = 15\n\n @property\n def __array_interface__(self):\n return self.x.__array_interface__\n\n # container ---\n def __len__(self):\n return self._n_cases\n\n def __getitem__(self, index):\n if isinstance(index, Factor):\n raise TypeError(\"Factor can't be used as index\")\n elif isinstance(index, Var):\n index = index.x\n x = self.x[index]\n if isinstance(x, np.ndarray):\n return Var(x, self.name, info=self.info.copy())\n else:\n return x\n\n def __setitem__(self, index, value):\n self.x[index] = value\n\n def __contains__(self, value):\n return value in self.x\n\n # numeric ---\n def __neg__(self):\n x = -self.x\n info = self.info.copy()\n info['longname'] = '-' + longname(self)\n return Var(x, info=info)\n\n def __pos__(self):\n return self\n\n def __abs__(self):\n return self.abs()\n\n def __add__(self, other):\n if isdataobject(other):\n # ??? should Var + Var return sum or Model?\n return Model((self, other))\n\n x = self.x + other\n info = self.info.copy()\n info['longname'] = longname(self) + ' + ' + longname(other)\n return Var(x, info=info)\n\n def __iadd__(self, other):\n self.x += other.x if isinstance(other, Var) else other\n return self\n\n def __radd__(self, other):\n if np.isscalar(other):\n x = other + self.x\n elif len(other) != len(self):\n raise ValueError(\"Objects have different length (%i vs \"\n \"%i)\" % (len(other), len(self)))\n else:\n x = other + self.x\n\n info = self.info.copy()\n info['longname'] = longname(other) + ' + ' + longname(self)\n return Var(x, info=info)\n\n def __sub__(self, other):\n \"subtract: values are assumed to be ordered. Otherwise use .sub method.\"\n if np.isscalar(other):\n x = self.x - other\n elif len(other) != len(self):\n err = (\"Objects have different length (%i vs \"\n \"%i)\" % (len(self), len(other)))\n raise ValueError(err)\n else:\n x = self.x - other.x\n\n info = self.info.copy()\n info['longname'] = longname(self) + ' - ' + longname(other)\n return Var(x, info=info)\n\n def __isub__(self, other):\n self.x -= other.x if isinstance(other, Var) else other\n return self\n\n def __rsub__(self, other):\n if np.isscalar(other):\n x = other - self.x\n elif len(other) != len(self):\n raise ValueError(\"Objects have different length (%i vs \"\n \"%i)\" % (len(other), len(self)))\n else:\n x = other - self.x\n\n info = self.info.copy()\n info['longname'] = longname(other) + ' - ' + longname(self)\n return Var(x, info=info)\n\n def __mul__(self, other):\n if isinstance(other, Model):\n return Model((self,)) * other\n elif iscategorial(other):\n return Model((self, other, self % other))\n elif isinstance(other, Var):\n x = self.x * other.x\n else:\n x = self.x * other\n\n info = self.info.copy()\n info['longname'] = longname(self) + ' * ' + longname(other)\n return Var(x, info=info)\n\n def __imul__(self, other):\n self.x *= other.x if isinstance(other, Var) else other\n return self\n\n def __rmul__(self, other):\n if np.isscalar(other):\n x = other * self.x\n elif len(other) != len(self):\n raise ValueError(\"Objects have different length (%i vs \"\n \"%i)\" % (len(other), len(self)))\n else:\n x = other * self.x\n\n info = self.info.copy()\n info['longname'] = longname(other) + ' * ' + longname(self)\n return Var(x, info=info)\n\n def __floordiv__(self, other):\n if isinstance(other, Var):\n x = self.x // other.x\n else:\n x = self.x // other\n\n info = self.info.copy()\n info['longname'] = longname(self) + ' // ' + longname(other)\n return Var(x, info=info)\n\n def __ifloordiv__(self, other):\n self.x //= other.x if isinstance(other, Var) else other\n return self\n\n def __mod__(self, other):\n if isinstance(other, Model):\n return Model(self) % other\n elif isinstance(other, Var):\n x = self.x % other.x\n elif isdataobject(other):\n return Interaction((self, other))\n else:\n x = self.x % other\n\n info = self.info.copy()\n info['longname'] = longname(self) + ' % ' + longname(other)\n return Var(x, info=info)\n\n def __imod__(self, other):\n self.x %= other.x if isinstance(other, Var) else other\n return self\n\n def __lt__(self, y):\n return self.x < y\n\n def __le__(self, y):\n return self.x <= y\n\n def __eq__(self, y):\n return self.x == y\n\n def __ne__(self, y):\n return self.x != y\n\n def __gt__(self, y):\n return self.x > y\n\n def __ge__(self, y):\n return self.x >= y\n\n def __truediv__(self, other):\n return self.__div__(other)\n\n def __itruediv__(self, other):\n return self.__idiv__(other)\n\n def __div__(self, other):\n if isinstance(other, Var):\n x = self.x / other.x\n elif iscategorial(other): # separate slope for each level (for ANCOVA)\n dummy_factor = other.as_dummy_complete\n codes = dummy_factor * self.as_effects\n # center\n means = codes.sum(0) / dummy_factor.sum(0)\n codes -= dummy_factor * means\n # create effect\n name = '%s per %s' % (self.name, other.name)\n return NonbasicEffect(codes, [self, other], name,\n beta_labels=other.dummy_complete_labels)\n else:\n x = self.x / other\n\n info = self.info.copy()\n info['longname'] = longname(self) + ' / ' + longname(other)\n return Var(x, info=info)\n\n def __idiv__(self, other):\n self.x /= other.x if isinstance(other, Var) else other\n return self\n\n def __pow__(self, other):\n if isinstance(other, Var):\n x = self.x ** other.x\n else:\n x = self.x ** other\n info = self.info.copy()\n info['longname'] = longname(self) + ' ** ' + longname(other)\n return Var(x, info=info)\n\n def __round__(self, n=0):\n return Var(np.round(self.x, n), self.name)\n\n def _coefficient_names(self, method):\n return longname(self),\n\n def abs(self, name=None):\n \"Return a Var with the absolute value.\"\n info = self.info.copy()\n info['longname'] = 'abs(' + longname(self) + ')'\n return Var(np.abs(self.x), name, info=info)\n\n def argmax(self):\n \"\"\":func:`numpy.argmax`\"\"\"\n return np.argmax(self.x)\n\n def argmin(self):\n \"\"\":func:`numpy.argmin`\"\"\"\n return np.argmin(self.x)\n\n def argsort(self, kind='quicksort'):\n \"\"\":func:`numpy.argsort`\n\n Parameters\n ----------\n kind : 'quicksort' | 'mergesort' | 'heapsort'\n Sorting algorithm (default 'quicksort').\n\n Returns\n -------\n index_array : array of int\n Array of indices that sort `a` along the specified axis.\n In other words, ``a[index_array]`` yields a sorted `a`.\n \"\"\"\n return np.argsort(self.x, kind=kind)\n\n def astype(self, dtype):\n \"\"\"Copy of the Var with data cast to the specified type\n\n Parameters\n ----------\n dtype : numpy dtype\n Numpy data-type specification (see :meth:`numpy.ndarray.astype`).\n \"\"\"\n return Var(self.x.astype(dtype), self.name, info=self.info.copy())\n\n @property\n def as_dummy(self):\n \"For dummy coding\"\n return self.x[:, None]\n\n @property\n def as_effects(self):\n \"For effect coding\"\n return self.x[:, None] - self.x.mean()\n\n def as_factor(self, labels='%r', name=True, random=False):\n \"\"\"Convert the Var into a Factor\n\n Parameters\n ----------\n labels : str | dict\n Either a format string for converting values into labels (default:\n ``'%r'``) or a dictionary mapping values to labels (see examples).\n In a dictionary, multiple values can be assigned the same label by\n providing multiple keys in a tuple. A special key 'default' can be\n used to assign values that are not otherwise specified in the\n dictionary (by default this is the empty string ``''``).\n name : None | True | str\n Name of the output Factor, ``True`` to keep the current name\n (default ``True``).\n random : bool\n Whether the Factor is a random Factor (default ``False``).\n\n Examples\n --------\n >>> v = Var([0, 1, 2, 3])\n >>> v.as_factor()\n Factor(['0', '1', '2', '3'])\n >>> v.as_factor({0: 'a', 1: 'b'})\n Factor(['a', 'b', '', ''])\n >>> v.as_factor({(0, 1): 'a', (2, 3): 'b'})\n Factor(['a', 'a', 'b', 'b'])\n >>> v.as_factor({0: 'a', 1: 'b', 'default': 'c'})\n Factor(['a', 'b', 'c', 'c'])\n \"\"\"\n labels_ = {}\n if isinstance(labels, dict):\n # flatten\n for key, v in labels.items():\n if (isinstance(key, Sequence) and not isinstance(key, str)):\n for k in key:\n labels_[k] = v\n else:\n labels_[key] = v\n\n default = labels_.pop('default', '')\n if default is not None:\n for key in np.unique(self.x):\n if key not in labels_:\n labels_[key] = default\n else:\n for value in np.unique(self.x):\n labels_[value] = labels % value\n\n if name is True:\n name = self.name\n\n return Factor(self.x, name, random, labels=labels_)\n\n def copy(self, name=True):\n \"Return a deep copy\"\n x = self.x.copy()\n if name is True:\n name = self.name\n return Var(x, name, info=deepcopy(self.info))\n\n def count(self):\n \"\"\"Count the number of occurrence of each value\n\n Notes\n -----\n Counting starts with zero (see examples). This is to facilitate\n integration with indexing.\n\n Examples\n --------\n >>> v = Var([1, 2, 3, 1, 1, 1, 3])\n >>> v.count()\n Var([0, 0, 0, 1, 2, 3, 1])\n \"\"\"\n x = np.empty(len(self.x), int)\n index = np.empty(len(self.x), bool)\n for v in np.unique(self.x):\n np.equal(self.x, v, index)\n x[index] = np.arange(index.sum())\n return Var(x)\n\n def aggregate(self, x, func=np.mean, name=True):\n \"\"\"Summarize cases within cells of x\n\n Parameters\n ----------\n x : categorial\n Model defining cells in which to aggregate.\n func : callable\n Function that converts arrays into scalars, used to summarize data\n within each cell of x.\n name : None | True | str\n Name of the output Var, ``True`` to keep the current name (default\n ``True``).\n\n Returns\n -------\n aggregated_var : Var\n A Var instance with a single value for each cell in x.\n \"\"\"\n if len(x) != len(self):\n err = \"Length mismatch: %i (Var) != %i (x)\" % (len(self), len(x))\n raise ValueError(err)\n\n x_out = []\n for cell in x.cells:\n x_cell = self.x[x == cell]\n if len(x_cell) > 0:\n x_out.append(func(x_cell))\n\n if name is True:\n name = self.name\n\n return Var(x_out, name, info=self.info.copy())\n\n @property\n def beta_labels(self):\n return [self.name]\n\n def diff(self, to_end=None, to_begin=None):\n \"\"\"The differences between consecutive values\n\n Parameters\n ----------\n to_end : scalar (optional)\n Append ``to_end`` at the end.\n to_begin : scalar (optional)\n Add ``to_begin`` at the beginning.\n\n Returns\n -------\n diff : Var\n Difference.\n \"\"\"\n if len(self) == 0:\n return Var(np.empty(0), info=self.info.copy())\n return Var(np.ediff1d(self.x, to_end, to_begin), info=self.info.copy())\n\n # def difference(self, x, v1, v2, match):\n # \"\"\"\n # Subtract x==v2 from x==v1; sorts values according to match (ascending)\n #\n # Parameters\n # ----------\n # x : categorial\n # Model to define cells.\n # v1, v2 : str | tuple\n # Cells on x for subtraction.\n # match : categorial\n # Model that defines how to mach cells in v1 to cells in v2.\n # \"\"\"\n # raise NotImplementedError\n # # FIXME: use celltable\n # assert isinstance(x, Factor)\n # I1 = (x == v1); I2 = (x == v2)\n # Y1 = self[I1]; Y2 = self[I2]\n # m1 = match[I1]; m2 = match[I2]\n # s1 = np.argsort(m1); s2 = np.argsort(m2)\n # y = Y1[s1] - Y2[s2]\n # name = \"{n}({x1}-{x2})\".format(n=self.name,\n # x1=x.cells[v1],\n # x2=x.cells[v2])\n # return Var(y, name, info=self.info.copy())\n\n @classmethod\n def from_dict(cls, base, values, name=None, default=0, info=None):\n \"\"\"\n Construct a Var object by mapping ``base`` to ``values``.\n\n Parameters\n ----------\n base : sequence\n Sequence to be mapped to the new Var.\n values : dict\n Mapping from values in base to values in the new Var.\n name : None | str\n Name for the new Var.\n default : scalar\n Default value to supply for entries in ``base`` that are not in\n ``values``.\n\n Examples\n --------\n >>> base = Factor('aabbcde')\n >>> Var.from_dict(base, {'a': 5, 'e': 8}, default=0)\n Var([5, 5, 0, 0, 0, 0, 8])\n \"\"\"\n return cls([values.get(b, default) for b in base], name, info=info)\n\n @classmethod\n def from_apply(cls, base, func, name=None, info=None):\n \"\"\"\n Construct a Var instance by applying a function to each value in a base\n\n Parameters\n ----------\n base : sequence, len = n\n Base for the new Var. Can be an NDVar, if ``func`` is a\n dimensionality reducing function such as :func:`numpy.mean`.\n func : callable\n A function that when applied to each element in ``base`` returns\n the desired value for the resulting Var.\n \"\"\"\n if isinstance(base, (Var, NDVar)):\n base = base.x\n\n if isinstance(func, np.ufunc):\n x = func(base)\n elif getattr(base, 'ndim', 1) > 1:\n x = func(base.reshape((len(base), -1)), axis=1)\n else:\n x = np.array([func(val) for val in base])\n\n return cls(x, name, info=info)\n\n def index(self, value):\n \"``v.index(value)`` returns an array of indices where v equals value\"\n return np.flatnonzero(self == value)\n\n def isany(self, *values):\n \"Boolean index, True where the Var is equal to one of the values\"\n return np.in1d(self.x, values)\n\n def isin(self, values):\n \"Boolean index, True where the Var value is in values\"\n if isinstance(values, dict):\n values = tuple(values)\n return np.in1d(self.x, values)\n\n def isnot(self, *values):\n \"Boolean index, True where the Var is not equal to one of the values\"\n return np.in1d(self.x, values, invert=True)\n\n def isnotin(self, values):\n \"Boolean index, True where the Var value is not in values\"\n return np.in1d(self.x, values, invert=True)\n\n def log(self, base=None, name=None):\n \"\"\"Element-wise log\n\n Parameters\n ----------\n base : scalar\n Base of the log (default is the natural log).\n name : str\n Name of the output Var (default is the current name).\n \"\"\"\n if base is None:\n x = np.log(self.x)\n elif base == 2:\n x = np.log2(self.x)\n elif base == 10:\n x = np.log10(self.x)\n else:\n x = np.log(self.x)\n x /= log(base)\n info = self.info.copy()\n if base is None:\n info['longname'] = f'log({longname(self)})'\n else:\n info['longname'] = f'log{base}({longname(self)})'\n return Var(x, name, info=info)\n\n def max(self):\n \"The highest value\"\n return self.x.max()\n\n def mean(self):\n \"The mean\"\n return self.x.mean()\n\n def min(self):\n \"The smallest value\"\n return self.x.min()\n\n def repeat(self, repeats, name=True):\n \"\"\"\n Repeat each element ``repeats`` times\n\n Parameters\n ----------\n repeats : int | array of int\n Number of repeats, either a constant or a different number for each\n element.\n name : None | True | str\n Name of the output Var, ``True`` to keep the current name (default\n ``True``).\n \"\"\"\n if name is True:\n name = self.name\n return Var(self.x.repeat(repeats), name, info=self.info.copy())\n\n def split(self, n=2, name=None):\n \"\"\"\n A Factor splitting y into ``n`` categories with equal number of cases\n\n Parameters\n ----------\n n : int\n number of categories\n name : str\n Name of the output Factor.\n\n Examples\n --------\n Use n = 2 for a median split::\n\n >>> y = Var([1,2,3,4])\n >>> y.split(2)\n Factor(['0', '0', '1', '1'])\n\n >>> z = Var([7, 6, 5, 4, 3, 2])\n >>> z.split(3)\n Factor(['2', '2', '1', '1', '0', '0'])\n\n \"\"\"\n y = self.x\n\n d = 100. / n\n percentile = np.arange(d, 100., d)\n values = [scipy.stats.scoreatpercentile(y, p) for p in percentile]\n x = np.zeros(len(y), dtype=int)\n for v in values:\n x += y > v\n return Factor(x, name)\n\n def std(self):\n \"The standard deviation\"\n return self.x.std()\n\n def sort_index(self, descending=False):\n \"\"\"Create an index that could be used to sort the Var.\n\n Parameters\n ----------\n descending : bool\n Sort in descending instead of an ascending order.\n \"\"\"\n idx = np.argsort(self.x, kind='mergesort')\n if descending:\n idx = idx[::-1]\n return idx\n\n def sum(self):\n \"The sum over all values\"\n return self.x.sum()\n\n @property\n def values(self):\n return np.unique(self.x)\n\n\nclass _Effect(object):\n # numeric ---\n def __add__(self, other):\n return Model(self) + other\n\n def __mul__(self, other):\n if partially_nested(other, self):\n return Model((self, other))\n return Model((self, other, self % other))\n\n def __mod__(self, other):\n if isinstance(other, Model):\n return Model((self % e for e in other.effects))\n return Interaction((self, other))\n\n def as_var(self, labels, default=None, name=None):\n \"\"\"Convert into a Var\n\n Parameters\n ----------\n labels : dict\n A ``{old_value: new_value}`` mapping.\n default : None | scalar\n Default value for old values not mentioned in ``labels``. If not\n specified, old values missing from ``labels`` will raise a\n ``KeyError``.\n name : str\n Name of the output Var (default is the old object's name).\n \"\"\"\n if default is None:\n x = [labels[v] for v in self]\n else:\n x = [labels.get(v, default) for v in self]\n return Var(x, name or self.name)\n\n def count(self, value, start=-1):\n \"\"\"Cumulative count of the occurrences of ``value``\n\n Parameters\n ----------\n value : str | tuple (value in .cells)\n Cell value which is to be counted.\n start : int\n Value at which to start counting (with the default of -1, the first\n occurrence will be 0).\n\n Returns\n -------\n count : Var of int, len = len(self)\n Cumulative count of value in self.\n\n Examples\n --------\n >>> a = Factor('abc', tile=3)\n >>> a\n Factor(['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c'])\n >>> a.count('a')\n array([0, 0, 0, 1, 1, 1, 2, 2, 2])\n \"\"\"\n return Var(np.cumsum(self == value) + start)\n\n def enumerate_cells(self, name=None):\n \"\"\"Enumerate the occurrence of each cell value throughout the data\n\n Parameters\n ----------\n name : None | str\n Name for the returned Var.\n\n Returns\n -------\n enum : Var\n Result.\n\n Examples\n --------\n >>> f = Factor('aabbccabc')\n >>> f.enumerate_cells()\n Var([0, 1, 0, 1, 0, 1, 2, 2, 2])\n \"\"\"\n counts = {cell: 0 for cell in self.cells}\n enum = np.empty(len(self), int)\n for i, value in enumerate(self):\n enum[i] = counts[value]\n counts[value] += 1\n return Var(enum, name)\n\n def index(self, cell):\n \"\"\"Array with ``int`` indices equal to ``cell``\n\n Examples\n --------\n >>> f = Factor('abcabcabc')\n >>> f\n Factor(['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c'])\n >>> f.index('b')\n array([1, 4, 7])\n >>> f[f.index('b')] = 'new_b'\n >>> f\n Factor(['a', 'new_b', 'c', 'a', 'new_b', 'c', 'a', 'new_b', 'c'])\n \"\"\"\n return np.flatnonzero(self == cell)\n\n def index_opt(self, cell):\n \"\"\"Find an optimized index for a given cell.\n\n Returns\n -------\n index : slice | array\n If possible, a ``slice`` object is returned. Otherwise, an array\n of indices (as with ``e.index(cell)``).\n \"\"\"\n index = np.flatnonzero(self == cell)\n d_values = np.unique(np.diff(index))\n if len(d_values) == 1:\n start = index.min() or None\n step = d_values[0]\n stop = index.max() + 1\n if stop > len(self) - step:\n stop = None\n if step == 1:\n step = None\n index = slice(start, stop, step)\n return index\n\n def sort_index(self, descending=False, order=None):\n \"\"\"Create an index that could be used to sort this data_object.\n\n Parameters\n ----------\n descending : bool\n Sort in descending instead of the default ascending order.\n order : None | sequence\n Sequence of cells to define a custom order. Any cells that are not\n present in ``order`` will be omitted in the sort_index, i.e. the\n sort_index will be shorter than its source.\n\n Returns\n -------\n sort_index : array of int\n Array which can be used to sort a data_object in the desired order.\n \"\"\"\n idx = np.empty(len(self), dtype=np.intp)\n if order is None:\n cells = self.cells\n else:\n cells = order\n idx.fill(-1)\n\n for i, cell in enumerate(cells):\n idx[self == cell] = i\n\n sort_idx = np.argsort(idx, kind='mergesort')\n if order is not None:\n excluded = np.count_nonzero(idx == -1)\n if excluded:\n sort_idx = sort_idx[excluded:]\n\n if descending:\n if not isinstance(descending, bool):\n raise TypeError(\"descending=%s, need bool\" % repr(descending))\n sort_idx = sort_idx[::-1]\n\n return sort_idx\n\n\nclass Factor(_Effect):\n \"\"\"Container for categorial data.\n\n Parameters\n ----------\n x : iterator\n Sequence of Factor values (see also the ``labels`` kwarg).\n name : str\n Name of the Factor.\n random : bool\n Treat Factor as random factor (for ANOVA; default is False).\n repeat : int | array of int\n repeat each element in ``x``, either a constant or a different number\n for each element.\n tile : int\n Repeat ``x`` as a whole ``tile`` many times.\n labels : dict | OrderedDict | tuple\n An optional dictionary mapping values as they occur in ``x`` to the\n Factor's cell labels. Since :class`dict`s are unordered, labels are\n sorted alphabetically by default. In order to define cells in a\n different order, use a :class:`collections.OrderedDict` object or\n define labels as ``((key, value), ...)`` tuple.\n\n Attributes\n ----------\n .name : None | str\n The Factor's name.\n .cells : tuple of str\n Sorted names of all cells.\n .random : bool\n Whether the Factor is defined as random factor (for ANOVA).\n\n Examples\n --------\n The most obvious way to initialize a Factor is a list of strings::\n\n >>> Factor(['in', 'in', 'in', 'out', 'out', 'out'])\n Factor(['in', 'in', 'in', 'out', 'out', 'out'])\n\n The same can be achieved with a list of integers plus a labels dict::\n\n >>> Factor([1, 1, 1, 0, 0, 0], labels={1: 'in', 0: 'out'})\n Factor(['in', 'in', 'in', 'out', 'out', 'out'])\n\n Or more parsimoniously:\n\n >>> Factor([1, 0], labels={1: 'in', 0: 'out'}, repeat=3)\n Factor(['in', 'in', 'in', 'out', 'out', 'out'])\n\n Since the Factor initialization simply iterates over the ``x``\n argument, a Factor with one-character codes can also be initialized\n with a single string::\n\n >>> Factor('iiiooo')\n Factor(['i', 'i', 'i', 'o', 'o', 'o'])\n \"\"\"\n def __init__(self, x, name=None, random=False, repeat=1, tile=1, labels={}):\n if isinstance(x, Iterator):\n x = tuple(x)\n n_cases = len(x)\n\n if n_cases == 0 or not (np.any(repeat) or np.any(tile)):\n self.__setstate__({'x': np.empty(0, np.uint32), 'labels': {},\n 'name': name, 'random': random})\n return\n\n # find mapping and ordered values\n if isinstance(labels, dict):\n labels_dict = labels\n label_values = labels.values()\n if not isinstance(labels, OrderedDict):\n label_values = natsorted(label_values)\n else:\n labels_dict = dict(labels)\n label_values = [pair[1] for pair in labels]\n\n if isinstance(x, Factor):\n labels_dict = {x._codes.get(s): d for s, d in labels_dict.items()}\n labels_dict.update({code: label for code, label in x._labels.items()\n if code not in labels_dict})\n x = x.x\n\n if isinstance(x, np.ndarray) and x.dtype.kind in 'ifb':\n assert x.ndim == 1\n unique = np.unique(x)\n # find labels corresponding to unique values\n u_labels = [labels_dict[v] if v in labels_dict else str(v) for\n v in unique]\n # merge identical labels\n u_label_index = np.array([u_labels.index(label) for label in\n u_labels])\n\n x_ = u_label_index[np.digitize(x, unique, True)]\n # {label: code}\n codes = dict(zip(u_labels, u_label_index))\n else:\n # convert x to codes\n highest_code = -1\n codes = {} # {label -> code}\n x_ = np.empty(n_cases, dtype=np.uint32)\n for i, value in enumerate(x):\n if value in labels_dict:\n label = labels_dict[value]\n elif isinstance(value, str):\n label = value\n else:\n label = str(value)\n\n if label in codes:\n x_[i] = codes[label]\n else: # new code\n x_[i] = codes[label] = highest_code = highest_code + 1\n\n if highest_code >= 2**32:\n raise RuntimeError(\"Too many categories in this Factor\")\n\n # collect ordered_labels\n ordered_labels = OrderedDict(((codes[label], label) for label in\n label_values if label in codes))\n for label in natsorted(set(codes).difference(label_values)):\n ordered_labels[codes[label]] = label\n\n if not (isinstance(repeat, int) and repeat == 1):\n x_ = x_.repeat(repeat)\n\n if tile > 1:\n x_ = np.tile(x_, tile)\n\n self.__setstate__({'x': x_, 'ordered_labels': ordered_labels,\n 'name': name, 'random': random})\n\n def __setstate__(self, state):\n self.x = x = state['x']\n self.name = state['name']\n self.random = state['random']\n if 'ordered_labels' in state:\n # 0.13: ordered_labels replaced labels\n self._labels = state['ordered_labels']\n self._codes = {lbl: code for code, lbl in self._labels.items()}\n else:\n labels = state['labels']\n cells = natsorted(labels.values())\n self._codes = codes = {lbl: code for code, lbl in labels.items()}\n self._labels = OrderedDict([(codes[label], label) for label in cells])\n\n self._n_cases = len(x)\n\n def __getstate__(self):\n state = {'x': self.x,\n 'name': self.name,\n 'random': self.random,\n 'ordered_labels': self._labels}\n return state\n\n def __repr__(self, full=False):\n use_labels = preferences['factor_repr_use_labels']\n n_cases = preferences['factor_repr_n_cases']\n\n if use_labels:\n values = self.as_labels()\n else:\n values = self.x.tolist()\n\n if full or len(self.x) <= n_cases:\n x = repr(values)\n else:\n x = [repr(v) for v in values[:n_cases]]\n x.append('<... N=%s>' % len(self.x))\n x = '[' + ', '.join(x) + ']'\n\n args = [x]\n\n if self.name is not None:\n args.append('name=%r' % self.name)\n\n if self.random:\n args.append('random=True')\n\n if not use_labels:\n args.append('labels=%s' % self._labels)\n\n return 'Factor(%s)' % ', '.join(args)\n\n def __str__(self):\n return self.__repr__(True)\n\n # container ---\n def __len__(self):\n return self._n_cases\n\n def __getitem__(self, index):\n if isinstance(index, Var):\n index = index.x\n\n x = self.x[index]\n if isinstance(x, np.ndarray):\n return Factor(x, self.name, self.random, labels=self._labels)\n else:\n return self._labels[x]\n\n def __setitem__(self, index, x):\n # convert x to code\n if isinstance(x, str):\n self.x[index] = self._get_code(x)\n else:\n self.x[index] = tuple(map(self._get_code, x))\n\n # obliterate redundant labels\n for code in set(self._labels).difference(self.x):\n del self._codes[self._labels.pop(code)]\n\n def _get_code(self, label):\n \"Add the label if it does not exists and return its code\"\n try:\n return self._codes[label]\n except KeyError:\n code = 0\n while code in self._labels:\n code += 1\n\n if code >= 2**32:\n raise ValueError(\"Too many categories in this Factor.\")\n\n self._labels[code] = label\n self._codes[label] = code\n return code\n\n def __iter__(self):\n return (self._labels[i] for i in self.x)\n\n def __contains__(self, value):\n return value in self._codes\n\n # numeric ---\n def __eq__(self, other):\n return self.x == self._encode(other)\n\n def __ne__(self, other):\n return self.x != self._encode(other)\n\n def _encode(self, x):\n if isinstance(x, str):\n return self._codes.get(x, -1)\n elif len(x) == 0:\n return np.empty(0, dtype=int)\n elif isinstance(x, Factor):\n mapping = [self._codes.get(x._labels.get(xcode, -1), -1) for\n xcode in range(x.x.max() + 1)]\n return np.array(mapping)[x.x]\n else:\n return np.array([self._codes.get(label, -1) for label in x])\n\n def __call__(self, other):\n \"\"\"Create a nested effect.\n\n A factor A is nested in another factor B if\n each level of A only occurs together with one level of B.\n\n \"\"\"\n return NestedEffect(self, other)\n\n @property\n def as_dummy(self): # x_dummy_coded\n codes = np.empty((self._n_cases, self.df))\n for i, cell in enumerate(self.cells[:-1]):\n codes[:, i] = (self == cell)\n return codes\n\n @property\n def as_dummy_complete(self):\n x = self.x\n categories = np.unique(x)\n out = np.empty((len(self), len(categories)))\n for i, cat in enumerate(categories):\n np.equal(x, cat, out[:, i])\n return out\n\n @property\n def as_effects(self): # x_deviation_coded\n shape = (self._n_cases, self.df)\n codes = np.empty(shape)\n for i, cell in enumerate(self.cells[:-1]):\n codes[:, i] = (self == cell)\n\n contrast = (self == self.cells[-1])\n codes -= contrast[:, None]\n return codes\n\n def _coefficient_names(self, method):\n if method == 'dummy':\n return [\"%s:%s\" % (self.name, cell) for cell in self.cells[:-1]]\n contrast_cell = self.cells[-1]\n return [\"%s:%s-%s\" % (self.name, cell, contrast_cell)\n for cell in self.cells[:-1]]\n\n def as_labels(self):\n \"Convert the Factor to a list of str\"\n return [self._labels[v] for v in self.x]\n\n @property\n def beta_labels(self):\n cells = self.cells\n txt = '{0}=={1}'\n return [txt.format(cells[i], cells[-1]) for i in range(len(cells) - 1)]\n\n @property\n def cells(self):\n return tuple(self._labels.values())\n\n def _cellsize(self):\n \"int if all cell sizes are equal, otherwise a {cell: size} dict\"\n buf = np.empty(self.x.shape, bool)\n ns = {value: np.equal(self.x, code, buf).sum() for code, value\n in self._labels.items()}\n n_set = set(ns.values())\n if len(n_set) == 1:\n return n_set.pop()\n else:\n return ns\n\n def aggregate(self, x, name=True):\n \"\"\"\n Summarize the Factor by collapsing within cells in `x`.\n\n Raises an error if there are cells that contain more than one value.\n\n Parameters\n ----------\n x : categorial\n A categorial model defining cells to collapse.\n name : None | True | str\n Name of the output Factor, ``True`` to keep the current name\n (default ``True``).\n\n Returns\n -------\n f : Factor\n A copy of self with only one value for each cell in x\n \"\"\"\n if len(x) != len(self):\n raise ValueError(\n f\"x={dataobj_repr(x)} of length {len(x)} for Factor \"\n f\"{dataobj_repr(self)} of length {len(self)}\")\n\n x_out = []\n for cell in x.cells:\n idx = (x == cell)\n if np.sum(idx):\n x_i = np.unique(self.x[idx])\n if len(x_i) > 1:\n labels = tuple(self._labels[code] for code in x_i)\n raise ValueError(\n f\"Can not determine aggregated value for Factor \"\n f\"{dataobj_repr(self)} in cell {cell!r} because the \"\n f\"cell contains multiple values {labels}. Set \"\n f\"drop_bad=True in order to ignore this inconsistency \"\n f\"and drop the Factor.\")\n else:\n x_out.append(x_i[0])\n\n if name is True:\n name = self.name\n\n return Factor(x_out, name, self.random, labels=self._labels)\n\n def copy(self, name=True, repeat=1, tile=1):\n \"A deep copy\"\n if name is True:\n name = self.name\n return Factor(self.x, name, self.random, repeat, tile, self._labels)\n\n @property\n def df(self):\n return max(0, len(self._labels) - 1)\n\n def endswith(self, substr):\n \"\"\"An index that is true for all cases whose name ends with ``substr``\n\n Parameters\n ----------\n substr : str\n String for selecting cells that end with substr.\n\n Returns\n -------\n idx : boolean array, len = len(self)\n Index that is true wherever the value ends with ``substr``.\n\n Examples\n --------\n >>> a = Factor(['a1', 'a2', 'b1', 'b2'])\n >>> a.endswith('1')\n array([True, False, True, False], dtype=bool)\n \"\"\"\n values = [v for v in self.cells if v.endswith(substr)]\n return self.isin(values)\n\n def floodfill(self, regions, empty=''):\n \"\"\"Fill in empty regions in a Factor from the nearest non-empty value\n\n Parameters\n ----------\n regions : array_like | str\n How to define regions to fill. Can be an object with same length as\n the factor that indicates regions to fill (see example). Can also\n be ``\"previous\"``, in which case the last value before the empty\n cell is used.\n empty : str\n Value that is to be treated as empty (default is '').\n\n Examples\n --------\n >>> f = Factor(['', '', 'a', '', '', '', 'b', ''])\n >>> f.floodfill([1, 1, 1, 1, 2, 2, 2, 2])\n Factor(['a', 'a', 'a', 'a', 'b', 'b', 'b', 'b'])\n >>> f.floodfill([1, 1, 1, 2, 2, 2, 2, 2])\n Factor(['a', 'a', 'a', 'b', 'b', 'b', 'b', 'b'])\n >>> f.floodfill([1, 1, 1, 1, 1, 1, 1, 1])\n Factor(['a', 'a', 'a', 'a', 'a', 'a', 'b', 'b'])\n \"\"\"\n if isinstance(regions, str) and regions not in ('previous',):\n raise ValueError(\"demarcation=%r\" % (regions,))\n\n out = self.copy(None)\n if empty not in self._codes:\n return out\n empty = out._codes[empty]\n x = out.x\n\n if isinstance(regions, str):\n if regions == 'previous':\n is_empty = np.flatnonzero(x == empty)\n if is_empty[0] == 0:\n is_empty = is_empty[1:]\n for i in is_empty:\n x[i] = x[i - 1]\n else:\n raise RuntimeError(\"demarcation=%r\" % (regions,))\n else:\n assert(len(regions) == self._n_cases)\n i_region_start = 0\n region_v = -1 if regions[0] is None else None\n fill_with = empty\n for i in range(self._n_cases):\n if regions[i] == region_v:\n if x[i] == empty:\n if fill_with != empty:\n x[i] = fill_with\n else:\n if fill_with == empty:\n x[i_region_start:i] = x[i]\n fill_with = x[i]\n else: # region change\n region_v = regions[i]\n fill_with = x[i]\n if fill_with == empty:\n i_region_start = i\n\n # remove redundant label\n if empty not in x:\n del out._codes[out._labels.pop(empty)]\n\n return out\n\n def get_index_to_match(self, other):\n \"\"\"Generate index to conform to another Factor's order\n\n Assuming that ``other`` is a reordered version of self,\n ``get_index_to_match()`` generates an index to transform from the order\n of ``self`` to the order of ``other``.\n To guarantee exact matching, each value can only occur once in ``self``.\n\n Examples\n --------\n >>> index = factor1.get_index_to_match(factor2)\n >>> all(factor1[index] == factor2)\n True\n\n \"\"\"\n assert self._labels == other._labels\n index = []\n for v in other.x:\n where = np.where(self.x == v)[0]\n if len(where) == 1:\n index.append(where[0])\n else:\n msg = \"%r contains several cases of %r\" % (self, v)\n raise ValueError(msg)\n return np.array(index)\n\n def isany(self, *values):\n \"\"\"Find the index of entries matching one of the ``*values``\n\n Returns\n -------\n index : array of bool\n For each case True if the value is in values, else False.\n\n Examples\n --------\n >>> a = Factor('aabbcc')\n >>> b.isany('b', 'c')\n array([False, False, True, True, True, True], dtype=bool)\n \"\"\"\n return self.isin(values)\n\n def isin(self, values):\n \"\"\"Find the index of entries matching one of the ``values``\n\n Returns\n -------\n index : array of bool\n For each case True if the value is in values, else False.\n\n Examples\n --------\n >>> f = Factor('aabbcc')\n >>> f.isin(('b', 'c'))\n array([False, False, True, True, True, True], dtype=bool)\n \"\"\"\n return np.in1d(self.x, self._encode(values))\n\n def isnot(self, *values):\n \"\"\"Find the index of entries not in ``values``\n\n Returns\n -------\n index : array of bool\n For each case False if the value is in values, else True.\n \"\"\"\n return self.isnotin(values)\n\n def isnotin(self, values):\n \"\"\"Find the index of entries not in ``values``\n\n Returns\n -------\n index : array of bool\n For each case False if the value is in values, else True.\n \"\"\"\n return np.in1d(self.x, self._encode(values), invert=True)\n\n def label_length(self, name=None):\n \"\"\"Create Var with the length of each label string\n\n Parameters\n ----------\n name : str\n Name of the output Var (default ``None``).\n\n Examples\n --------\n >>> f = Factor(['a', 'ab', 'long_label'])\n >>> f.label_length()\n Var([1, 2, 10])\n \"\"\"\n label_lengths = {code: len(label) for code, label in self._labels.items()}\n x = np.empty(len(self))\n for i, code in enumerate(self.x):\n x[i] = label_lengths[code]\n\n if name:\n longname = name\n elif self.name:\n longname = self.name + '.label_length()'\n else:\n longname = 'label_length'\n\n return Var(x, name, info={\"longname\": longname})\n\n @property\n def n_cells(self):\n return len(self._labels)\n\n def update_labels(self, labels):\n \"\"\"Change one or more labels in place\n\n Parameters\n ----------\n labels : dict\n Mapping from old labels to new labels. Existing labels that are not\n in ``labels`` are kept.\n\n Examples\n --------\n >>> f = Factor('aaabbbccc')\n >>> f\n Factor(['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c'])\n >>> f.update_labels({'a': 'v1', 'b': 'v2'})\n >>> f\n Factor(['v1', 'v1', 'v1', 'v2', 'v2', 'v2', 'c', 'c', 'c'])\n\n In order to create a copy of the Factor with different labels just\n use the labels argument when initializing a new Factor:\n\n >>> Factor(f, labels={'c': 'v3'})\n Factor(['v1', 'v1', 'v1', 'v2', 'v2', 'v2', 'v3', 'v3', 'v3'])\n\n Notes\n -----\n If ``labels`` contains a key that is not a label of the Factor, a\n ``KeyError`` is raised.\n \"\"\"\n missing = tuple(old for old in labels if old not in self._codes)\n if missing:\n if len(missing) == 1:\n msg = (\"Factor does not contain label %r\" % missing[0])\n else:\n msg = (\"Factor does not contain labels %s\"\n % ', '.join(repr(m) for m in missing))\n raise KeyError(msg)\n\n # check for merged labels\n new_labels = {c: labels.get(l, l) for c, l in self._labels.items()}\n codes_ = sorted(new_labels)\n labels_ = tuple(new_labels[c] for c in codes_)\n for i, label in enumerate(labels_):\n if label in labels_[:i]:\n old_code = codes_[i]\n new_code = codes_[labels_.index(label)]\n self.x[self.x == old_code] = new_code\n del new_labels[old_code]\n\n self._labels = new_labels\n self._codes = {l: c for c, l in new_labels.items()}\n\n def sort_cells(self, order):\n \"\"\"Reorder the cells of the Factor (in-place)\n\n The cell order controls the order in which data are displayed in tables\n and plots.\n\n Parameters\n ----------\n order : sequence of str\n New cell order. Needs to contain each cell exactly once.\n \"\"\"\n new_order = tuple(order)\n new = set(new_order)\n old = set(self.cells)\n if new != old:\n invalid = new.difference(old)\n if invalid:\n raise ValueError(\"Factor does not have cells: %s\" % ', '.join(invalid))\n missing = old.difference(new)\n if missing:\n raise ValueError(\"Factor has cennls not in order: %s\" % ', '.join(missing))\n raise RuntimeError(\"Factor.sort_cells comparing %s and %s\" % (old, new))\n self._labels = OrderedDict((self._codes[cell], cell) for cell in new_order)\n\n def startswith(self, substr):\n \"\"\"An index that is true for all cases whose name starts with ``substr``\n\n Parameters\n ----------\n substr : str\n String for selecting cells that start with substr.\n\n Returns\n -------\n idx : boolean array, len = len(self)\n Index that is true wherever the value starts with ``substr``.\n\n Examples\n --------\n >>> a = Factor(['a1', 'a2', 'b1', 'b2'])\n >>> a.startswith('b')\n array([False, False, True, True], dtype=bool)\n \"\"\"\n values = [v for v in self.cells if v.startswith(substr)]\n return self.isin(values)\n\n def table_categories(self):\n \"A table containing information about categories\"\n table = fmtxt.Table('rll')\n table.title(self.name)\n for title in ['i', 'Label', 'n']:\n table.cell(title)\n table.midrule()\n for code, label in self._labels.items():\n table.cell(code)\n table.cell(label)\n table.cell(np.sum(self.x == code))\n return table\n\n def repeat(self, repeats, name=True):\n \"\"\"Repeat each element ``repeats`` times\n\n Parameters\n ----------\n repeats : int | array of int\n Number of repeats, either a constant or a different number for each\n element.\n name : None | True | str\n Name of the output Var, ``True`` to keep the current name (default\n ``True``).\n \"\"\"\n if name is True:\n name = self.name\n return Factor(self.x, name, self.random, repeats, labels=self._labels)\n\n def tile(self, repeats, name=True):\n \"\"\"Construct a Factor by repeating ``self`` ``repeats`` times\n\n Parameters\n ----------\n repeats : int\n Number of repeats.\n name : None | True | str\n Name of the output Var, ``True`` to keep the current name (default\n ``True``).\n \"\"\"\n if name is True:\n name = self.name\n return Factor(self.x, name, self.random, tile=repeats, labels=self._labels)\n\n\nclass NDVar(object):\n \"\"\"Container for n-dimensional data.\n\n Parameters\n ----------\n x : array_like\n The data.\n dims : Sequence of Dimension\n The dimensions characterizing the axes of the data. If present, ``Case``\n should always occupy the first position.\n info : dict\n A dictionary with data properties (can contain arbitrary\n information that will be accessible in the info attribute).\n name : str\n Name for the NDVar.\n\n\n Notes\n -----\n An :class:`NDVar` consists of the following components:\n\n - A :class:`numpy.ndarray`, stored in the :attr:`.x` attribute.\n - Meta-information describing each axis of the array using a\n :class:`Dimension` object (for example, :class:`UTS` for uniform\n time series, or :class:`Sensor` for a sensor array). These\n dimensions are stored in the :attr:`.dims` attribute, with the ith\n element of :attr:`.dims` describing the ith axis of :attr:`.x`.\n - A dictionary containing other meta-information stored in the\n :attr:`.info` attribute.\n - A name stored in the :attr:`.name` attribute.\n\n *Indexing*: For classical indexing, indexes need to be provided in the\n correct sequence. For example, assuming ``ndvar``'s first axis is time,\n ``ndvar[0.1]`` retrieves a slice at time = 0.1 s. If time is the second\n axis, the same can be achieved with ``ndvar[:, 0.1]``.\n In :meth:`NDVar.sub`, dimensions can be specified as keywords, for example,\n ``ndvar.sub(time=0.1)``, regardless of which axis represents the time\n dimension.\n\n *Shallow copies*: When generating a derived NDVars, :attr:`x` and\n :attr:`dims` are generated without copying data whenever possible.\n A shallow copy of :attr:`info` is stored. This means that modifying a\n derived NDVar in place can affect the NDVar it was derived from.\n When indexing an NDVar, the new NDVar will contain a view\n on the data whenever possible based on the underlying array (See `NumPy\n Indexing `_). This only matters when explicitly modifying an NDVar in place\n (e.g., ``ndvar += 1``) because NDVar methods that return NDVars never\n implicitly modify the original NDVars in place (see `this note\n `_).\n\n\n Examples\n --------\n Create an NDVar for 600 time series of 80 time points each:\n\n >>> data.shape\n (600, 80)\n >>> time = UTS(-.2, .01, 80)\n >>> ndvar = NDVar(data, dims=(Case, time))\n\n Baseline correction:\n\n >>> ndvar -= ndvar.mean(time=(None, 0))\n\n \"\"\"\n def __init__(self, x, dims, info={}, name=None):\n # check data shape\n if (isinstance(dims, Dimension) or dims is Case or isinstance(dims, str)):\n dims_ = [dims]\n else:\n dims_ = list(dims)\n\n x = np.asanyarray(x)\n if len(dims_) != x.ndim:\n raise DimensionMismatchError(\n \"Unequal number of dimensions (data: %i, dims: %i)\" %\n (x.ndim, len(dims_)))\n\n first_dim = dims_[0]\n if first_dim is Case or (isinstance(first_dim, str) and first_dim == 'case'):\n dims_[0] = Case(len(x))\n\n if not all(isinstance(dim, Dimension) for dim in dims_):\n raise TypeError(\n \"Invalid dimension in dims=%r. All dimensions need to be \"\n \"Dimension subclass objects, with the exception of the \"\n \"first dimension which can also be 'case'\" % (dims,))\n elif any(isinstance(dim, Case) for dim in dims_[1:]):\n raise TypeError(\n \"Invalid dimension in dims=%r. Only the first dimension can be \"\n \"Case.\" % (dims,))\n\n # check dimensions\n for dim, n in zip(dims_, x.shape):\n if len(dim) != n:\n raise DimensionMismatchError(\n \"Dimension %r length mismatch: %i in data, %i in dimension \"\n \"%r\" % (dim.name, n, len(dim), dim.name))\n\n self.x = x\n self.dims = tuple(dims_)\n self.info = dict(info)\n self.name = name\n self._init_secondary()\n\n def _init_secondary(self):\n self.has_case = isinstance(self.dims[0], Case)\n self._truedims = self.dims[self.has_case:]\n self.dimnames = tuple(dim.name for dim in self.dims)\n self.ndim = len(self.dims)\n self.shape = self.x.shape\n self._dim_2_ax = {dimname: i for i, dimname in enumerate(self.dimnames)}\n # Dimension attributes\n for dim in self._truedims:\n if hasattr(self, dim.name):\n raise ValueError(\"Invalid dimension name: %r (name is reserved \"\n \"for an NDVar attribute)\" % dim.name)\n else:\n setattr(self, dim.name, dim)\n\n def __setstate__(self, state):\n # backwards compatibility\n if 'properties' in state:\n state['info'] = state.pop('properties')\n if isinstance(state['dims'][0], str):\n state['dims'] = (Case(len(state['x'])),) + state['dims'][1:]\n\n self.x = state['x']\n self.dims = state['dims']\n self.name = state['name']\n self.info = state['info']\n self._init_secondary()\n\n def __getstate__(self):\n return {'dims': self.dims,\n 'x': self.x,\n 'name': self.name,\n 'info': self.info}\n\n __array_priority__ = 15\n\n @property\n def __array_interface__(self):\n return self.x.__array_interface__\n\n # numeric ---\n def __neg__(self):\n return NDVar(-self.x, self.dims, self.info.copy(), self.name)\n\n def __pos__(self):\n return self\n\n def __abs__(self):\n return self.abs()\n\n def __invert__(self):\n return NDVar(~self.x, self.dims, self.info.copy(), self.name)\n\n def __lt__(self, other):\n return NDVar(self.x < self._ialign(other),\n self.dims, _info.for_boolean(self.info), self.name)\n\n def __le__(self, other):\n return NDVar(self.x <= self._ialign(other),\n self.dims, _info.for_boolean(self.info), self.name)\n\n def __eq__(self, other):\n return NDVar(self.x == self._ialign(other),\n self.dims, _info.for_boolean(self.info), self.name)\n\n def __ne__(self, other):\n return NDVar(self.x != self._ialign(other),\n self.dims, _info.for_boolean(self.info), self.name)\n\n def __gt__(self, other):\n return NDVar(self.x > self._ialign(other),\n self.dims, _info.for_boolean(self.info), self.name)\n\n def __ge__(self, other):\n return NDVar(self.x >= self._ialign(other),\n self.dims, _info.for_boolean(self.info), self.name)\n\n def _align(self, other):\n \"\"\"Align data from 2 NDVars.\n\n Notes\n -----\n For unequal but overlapping dimensions, the intersection is used.\n For example, ``c = a + b`` with ``a`` [-100 300] ms and ``b`` [0 400] ms\n ``c`` will be [0 300] ms.\n \"\"\"\n if isinstance(other, Var):\n return self.dims, self.x, self._ialign(other)\n elif isinstance(other, NDVar):\n dimnames = list(self.dimnames)\n i_add = 0\n for dimname in other.dimnames:\n if dimname not in dimnames:\n dimnames.append(dimname)\n i_add += 1\n\n # find data axes\n self_axes = self.dimnames\n if i_add:\n self_axes += (None,) * i_add\n other_axes = tuple(name if name in other.dimnames else None\n for name in dimnames)\n\n # find dims\n dims = []\n crop = False\n crop_self = []\n crop_other = []\n for name, other_name in zip(self_axes, other_axes):\n if name is None:\n dim = other.get_dim(other_name)\n cs = co = FULL_SLICE\n elif other_name is None:\n dim = self.get_dim(name)\n cs = co = FULL_SLICE\n else:\n self_dim = self.get_dim(name)\n other_dim = other.get_dim(other_name)\n if self_dim == other_dim:\n dim = self_dim\n cs = co = FULL_SLICE\n else:\n dim = self_dim.intersect(other_dim)\n crop = True\n cs = self_dim._array_index(dim)\n co = other_dim._array_index(dim)\n dims.append(dim)\n crop_self.append(cs)\n crop_other.append(co)\n\n x_self = self.get_data(self_axes)\n x_other = other.get_data(other_axes)\n if crop:\n x_self = x_self[tuple(crop_self)]\n x_other = x_other[tuple(crop_other)]\n return dims, x_self, x_other\n elif np.isscalar(other):\n return self.dims, self.x, other\n else:\n raise TypeError(\"%r; need NDVar, Var or scalar\")\n\n def _ialign(self, other):\n \"Align for self-modifying operations (+=, ...)\"\n if np.isscalar(other):\n return other\n elif isinstance(other, Var):\n assert self.has_case\n n = len(other)\n shape = (n,) + (1,) * (self.x.ndim - 1)\n return other.x.reshape(shape)\n elif isinstance(other, NDVar):\n assert all(dim in self.dimnames for dim in other.dimnames)\n i_other = []\n for dim in self.dimnames:\n if dim in other.dimnames:\n i_other.append(dim)\n else:\n i_other.append(None)\n return other.get_data(i_other)\n else:\n raise TypeError(\"%r; need NDVar, Var or scalar\")\n\n def __add__(self, other):\n dims, x_self, x_other = self._align(other)\n return NDVar(x_self + x_other, dims, self.info.copy(), self.name)\n\n def __iadd__(self, other):\n self.x += self._ialign(other)\n return self\n\n def __radd__(self, other):\n return NDVar(other + self.x, self.dims, self.info.copy(), self.name)\n\n def __div__(self, other):\n dims, x_self, x_other = self._align(other)\n return NDVar(x_self / x_other, dims, self.info.copy(), self.name)\n\n def __idiv__(self, other):\n self.x /= self._ialign(other)\n return self\n\n def __rdiv__(self, other):\n return NDVar(other / self.x, self.dims, self.info.copy(), self.name)\n\n def __truediv__(self, other):\n return self.__div__(other)\n\n def __itruediv__(self, other):\n return self.__idiv__(other)\n\n def __rtruediv__(self, other):\n return self.__rdiv__(other)\n\n def __mul__(self, other):\n dims, x_self, x_other = self._align(other)\n return NDVar(x_self * x_other, dims, self.info.copy(), self.name)\n\n def __imul__(self, other):\n self.x *= self._ialign(other)\n return self\n\n def __rmul__(self, other):\n return NDVar(other * self.x, self.dims, self.info.copy(), self.name)\n\n def __pow__(self, other):\n dims, x_self, x_other = self._align(other)\n return NDVar(np.power(x_self, x_other), dims, self.info.copy(),\n self.name)\n\n def __ipow__(self, other):\n self.x **= self._ialign(other)\n return self\n\n def __rpow__(self, other):\n return NDVar(other ** self.x, self.dims, self.info.copy(), self.name)\n\n def __round__(self, n=0):\n return NDVar(np.round(self.x, n), self.dims, self.info.copy(), self.name)\n\n def __sub__(self, other):\n dims, x_self, x_other = self._align(other)\n return NDVar(x_self - x_other, dims, self.info.copy(), self.name)\n\n def __isub__(self, other):\n self.x -= self._ialign(other)\n return self\n\n def __rsub__(self, other):\n return NDVar(other - self.x, self.dims, self.info.copy(), self.name)\n\n def __and__(self, other):\n dims, x_self, x_other = self._align(other)\n return NDVar(x_self & x_other, dims, self.info.copy(), self.name)\n\n def __iand__(self, other):\n self.x &= self._ialign(other)\n return self\n\n def __rand__(self, other):\n return NDVar(other & self.x, self.dims, self.info.copy(), self.name)\n\n def __xor__(self, other):\n dims, x_self, x_other = self._align(other)\n return NDVar(x_self ^ x_other, dims, self.info.copy(), self.name)\n\n def __ixor__(self, other):\n self.x ^= self._ialign(other)\n return self\n\n def __rxor__(self, other):\n return NDVar(other ^ self.x, self.dims, self.info.copy(), self.name)\n\n def __or__(self, other):\n dims, x_self, x_other = self._align(other)\n return NDVar(x_self | x_other, dims, self.info.copy(), self.name)\n\n def __ior__(self, other):\n self.x |= self._ialign(other)\n return self\n\n def __ror__(self, other):\n return NDVar(other | self.x, self.dims, self.info.copy(), self.name)\n\n # container ---\n def _dim_index_unravel(self, index):\n \"Convert ravelled array index to dimension index\"\n if self.ndim == 1:\n return self.dims[0]._dim_index(index)\n return self._dim_index(np.unravel_index(index, self.x.shape))\n\n def _dim_index(self, index):\n \"Convert array index to dimension index\"\n if isinstance(index, tuple):\n return tuple(dim._dim_index(i) for dim, i in zip(self.dims, index))\n return self.dims[0]._dim_index(index)\n\n def __getitem__(self, index):\n if isinstance(index, tuple):\n return self.sub(*index)\n else:\n return self.sub(index)\n\n def __setitem__(self, key, value):\n if isinstance(value, NDVar):\n raise NotImplementedError(\"Setting NDVar to NDVar\")\n self.x[self._array_index(key)] = value\n\n def __len__(self):\n return len(self.x)\n\n def __iter__(self):\n dim = self.dims[0]\n name = self.dimnames[0]\n for value in dim:\n yield self.sub(**{name: value})\n\n def __repr__(self):\n return '' % {\n 'name': '' if self.name is None else ' %r' % self.name,\n 'dims': ', '.join('%i %s' % (len(dim), dim.name) for dim in\n self.dims)}\n\n def abs(self, name=None):\n \"\"\"Compute the absolute values\n\n Parameters\n ----------\n name : str\n Name of the output NDVar (default is the current name).\n\n Returns\n -------\n abs : NDVar\n NDVar with same dimensions and absolute values.\n \"\"\"\n return NDVar(np.abs(self.x), self.dims, self.info.copy(),\n name or self.name)\n\n def all(self, dims=(), **regions):\n \"\"\"Whether all values are nonzero over given dimensions\n\n Parameters\n ----------\n dims : str | tuple of str | boolean NDVar\n Dimensions over which to operate. A str is used to specify a single\n dimension, a tuple of str to specify several dimensions, None to\n compute whether there are any nonzero values at all.\n An boolean NDVar with the same dimensions as the data can be used\n to find nonzero values in specific elements (if the NDVar has cases\n on a per case basis).\n *regions*\n Regions over which to aggregate as keywords. \n For example, to check whether all values between time=0.1 and \n time=0.2 are non-zero, use ``ndvar.all(time=(0.1, 0.2))``.\n name : str\n Name of the output NDVar (default is the current name).\n\n Returns\n -------\n any : NDVar | Var | bool\n Boolean data indicating presence of nonzero value over specified\n dimensions. Return a Var if only the case dimension remains, and a\n boolean if the function collapses over all data.\n \n Examples\n --------\n Examples for\n \n >>> ndvar\n \n\n Check whether all values are nonzero:\n \n >>> ndvar.all()\n True\n \n Check whether each time point contains at least one non-zero value\n \n >>> ndvar.all(('case', 'sensor'))\n \n \n Check for nonzero values between time=0.1 and time=0.2\n \n >>> ndvar.all(time=(0.1, 0.2))\n \n \n \"\"\"\n return self._aggregate_over_dims(dims, regions, np.all)\n\n def any(self, dims=(), **regions):\n \"\"\"Compute presence of any value other than zero over given dimensions\n\n Parameters\n ----------\n dims : str | tuple of str | boolean NDVar\n Dimensions over which to operate. A str is used to specify a single\n dimension, a tuple of str to specify several dimensions, None to\n compute whether there are any nonzero values at all.\n An boolean NDVar with the same dimensions as the data can be used\n to find nonzero values in specific elements (if the NDVar has cases\n on a per case basis).\n *regions*\n Regions over which to aggregate. For example, to check for nonzero\n values between time=0.1 and time=0.2, use\n ``ndvar.any(time=(0.1, 0.2))``.\n name : str\n Name of the output NDVar (default is the current name).\n\n Returns\n -------\n any : NDVar | Var | bool\n Boolean data indicating presence of nonzero value over specified\n dimensions. Return a Var if only the case dimension remains, and a\n boolean if the function collapses over all data.\n \"\"\"\n return self._aggregate_over_dims(dims, regions, np.any)\n\n def argmax(self):\n \"\"\"Find the index of the largest value.\n\n ``ndvar[ndvar.argmax()]`` is equivalent to ``ndvar.max()``.\n\n Returns\n -------\n argmax : index | tuple\n Index appropriate for the NDVar's dimensions. If NDVar has more\n than one dimensions, a tuple of indices.\n \"\"\"\n return self._dim_index_unravel(self.x.argmax())\n\n def argmin(self):\n \"\"\"Find the index of the smallest value.\n\n ``ndvar[ndvar.argmin()]`` is equivalent to ``ndvar.min()``.\n\n Returns\n -------\n argmin : index | tuple\n Index appropriate for the NDVar's dimensions. If NDVar has more\n than one dimensions, a tuple of indices.\n \"\"\"\n return self._dim_index_unravel(self.x.argmin())\n\n def _array_index(self, arg):\n \"Convert dimension index to array index\"\n if isinstance(arg, NDVar):\n if arg.x.dtype.kind != 'b':\n raise IndexError(\"Only boolean NDVar can be used as index\")\n elif arg.dims == self.dims:\n return arg.x\n target_dims = tuple(dim if dim in arg.dimnames else None for dim in self.dimnames)\n shared_dims = tuple(filter(None, target_dims))\n self_dims = self.get_dims(shared_dims)\n args_dims = arg.get_dims(shared_dims)\n if args_dims != self_dims:\n raise DimensionMismatchError(\n f'The index has different dimensions than the NDVar\\n'\n f'NDVar: {self_dims}\\nIndex: {args_dims}')\n x = arg.x\n if arg.dimnames != shared_dims:\n if any(dim not in shared_dims for dim in arg.dimnames):\n missing = (dim for dim in arg.dimnames if dim not in shared_dims)\n raise DimensionMismatchError(\n f\"Index has dimensions {', '.join(missing)} not in {self}\")\n source_axes = tuple(range(arg.ndim))\n dest_axes = tuple(shared_dims.index(dim) for dim in arg.dimnames)\n x = np.moveaxis(x, source_axes, dest_axes)\n for axis, dim in enumerate(target_dims):\n if dim is None:\n x = np.expand_dims(x, axis)\n x = np.repeat(x, self.shape[axis], axis)\n return x\n elif isinstance(arg, tuple):\n return tuple(dim._array_index(i) for dim, i in zip(self.dims, arg))\n elif isinstance(arg, np.ndarray) and arg.ndim > 1:\n raise NotImplementedError\n else:\n return self.dims[0]._array_index(arg)\n\n def assert_dims(self, dims):\n if self.dimnames != dims:\n err = \"Dimensions of %r do not match %r\" % (self, dims)\n raise DimensionMismatchError(err)\n\n def aggregate(self, x, func=np.mean, name=None):\n \"\"\"\n Summarize data in each cell of ``x``.\n\n Parameters\n ----------\n x : categorial\n Categorial whose cells define which cases to aggregate.\n func : function with axis argument\n Function that is used to create a summary of the cases falling\n into each cell of x. The function needs to accept the data as\n first argument and ``axis`` as keyword-argument. Default is\n ``numpy.mean``.\n name : str\n Name of the output NDVar (default is the current name).\n\n Returns\n -------\n aggregated_ndvar : NDVar\n NDVar with data aggregated over cells of ``x``.\n \"\"\"\n if not self.has_case:\n raise DimensionMismatchError(\"%r has no case dimension\" % self)\n if len(x) != len(self):\n err = \"Length mismatch: %i (Var) != %i (x)\" % (len(self), len(x))\n raise ValueError(err)\n\n x_out = []\n for cell in x.cells:\n idx = (x == cell)\n if np.sum(idx):\n x_cell = self.x[idx]\n x_out.append(func(x_cell, axis=0))\n\n # update info for summary\n info = self.info.copy()\n if 'summary_info' in info:\n info.update(info.pop('summary_info'))\n\n return NDVar(np.array(x_out), (Case(len(x_out)),) + self.dims[1:], info, name or self.name)\n\n def _aggregate_over_dims(self, axis, regions, func):\n name = regions.pop('name', self.name)\n if regions:\n data = self.sub(**regions)\n additional_axis = [dim for dim in regions if data.has_dim(dim)]\n if additional_axis:\n if isinstance(axis, NDVar):\n data = data.sub(axis)\n axis = additional_axis\n elif not axis:\n axis = additional_axis\n elif isinstance(axis, str):\n axis = [axis] + additional_axis\n else:\n axis = list(axis) + additional_axis\n return data._aggregate_over_dims(axis, {'name': name}, func)\n elif not axis:\n return func(self.x)\n elif isinstance(axis, NDVar):\n if axis.ndim == 1:\n dim = axis.dims[0]\n if self.get_dim(dim.name) != dim:\n raise DimensionMismatchError(\"Index dimension %s does not \"\n \"match data dimension\" %\n dim.name)\n dim_axis = self.get_axis(dim.name)\n index = FULL_AXIS_SLICE * dim_axis + (axis.x,)\n x = func(self.x[index], dim_axis)\n dims = tuple(self.dims[i] for i in range(self.ndim) if i != dim_axis)\n else:\n # if the index does not contain all dimensions, numpy indexing\n # is weird\n if self.ndim - self.has_case != axis.ndim - axis.has_case:\n raise NotImplementedError(\n \"If the index is not one dimensional, it needs to have \"\n \"the same dimensions as the data.\")\n dims, self_x, index = self._align(axis)\n if self.has_case:\n if axis.has_case:\n x = np.array([func(x_[i]) for x_, i in zip(self_x, index)])\n else:\n index = index[0]\n x = np.array([func(x_[index]) for x_ in self_x])\n return Var(x, name, info=_info.for_data(x, self.info))\n elif axis.has_case:\n raise IndexError(\"Index with case dimension can not be \"\n \"applied to data without case dimension\")\n else:\n return func(self_x[index])\n elif isinstance(axis, str):\n axis = self._dim_2_ax[axis]\n x = func(self.x, axis=axis)\n dims = tuple(self.dims[i] for i in range(self.ndim) if i != axis)\n else:\n axes = tuple(self._dim_2_ax[dim_name] for dim_name in axis)\n x = func(self.x, axes)\n dims = tuple(self.dims[i] for i in range(self.ndim) if i not in axes)\n\n return self._package_aggregated_output(x, dims, _info.for_data(x, self.info), name)\n\n def astype(self, dtype):\n \"\"\"Copy of the NDVar with data cast to the specified type\n\n Parameters\n ----------\n dtype : numpy dtype\n Numpy data-type specification (see :meth:`numpy.ndarray.astype`).\n \"\"\"\n return NDVar(self.x.astype(dtype), self.dims, self.info.copy(),\n self.name)\n\n def bin(self, step=None, start=None, stop=None, func=None, dim=None,\n name=None, nbins=None):\n \"\"\"Bin the data along a given dimension (default ``'time'``)\n\n Parameters\n ----------\n step : scalar\n Time step between bins.\n start : None | scalar\n Earliest time point (default is from the beginning).\n stop : None | scalar\n End of the data to use (the default is to use as many whole\n ``tstep`` intervals as fit in the data).\n func : callable | str\n How to summarize data in each time bin. Can be the name of a numpy\n function that takes an axis parameter (e.g., 'sum', 'mean', 'max') or\n 'extrema' which selects the value with the maximum absolute value.\n The default depends on ``ndvar.info['meas']``:\n 'p': minimum;\n 'f': maximum;\n 't', 'r': extrema;\n otherwise: mean.\n dim : str\n Dimension over which to bin. If the NDVar has more than one\n dimension, the default is ``'time'``.\n name : str\n Name of the output NDVar (default is the current name).\n nbins : int\n Instead of specifying ``step``, ``nbins`` can be specified to divide\n ``dim`` into an even number of bins.\n\n Returns\n -------\n binned_ndvar : NDVar\n NDVar with data binned along the time axis (i.e., each time point\n reflects one time bin).\n \"\"\"\n if nbins is not None:\n if step is not None:\n raise TypeError(\"can only specify one of step and nbins\")\n elif not isinstance(nbins, int):\n raise TypeError(\"nbins needs to be int, got %r\" % (nbins,))\n elif nbins < 1:\n raise ValueError(\"nbins needs to be >= 1, got %r\" % (nbins,))\n elif step is None and nbins is None:\n raise TypeError(\"need to specify one of step and nbins\")\n\n if dim is None:\n if len(self.dims) == 1 + self.has_case:\n dim = self.dims[-1].name\n elif self.has_dim('time'):\n dim = 'time'\n else:\n raise TypeError(\"NDVar has more then 1 dimensions, the dim \"\n \"argument needs to be specified\")\n\n # summary-func\n if func is None:\n meas = self.info.get('meas', '').lower()\n if meas == 'p':\n func = np.min\n elif meas == 'f':\n func = np.max\n elif meas in ('t', 'r'):\n func = extrema\n else:\n func = np.mean\n elif isinstance(func, str):\n if func not in EVAL_CONTEXT:\n raise ValueError(\"Unknown summary function: func=%r\" % func)\n func = EVAL_CONTEXT[func]\n elif not callable(func):\n raise TypeError(\"func=%s\" % repr(func))\n\n axis = self.get_axis(dim)\n dim = self.get_dim(dim)\n edges, out_dim = dim._bin(start, stop, step, nbins)\n\n out_shape = list(self.shape)\n out_shape[axis] = len(edges) - 1\n x = np.empty(out_shape)\n bins = []\n idx_prefix = FULL_AXIS_SLICE * axis\n for i, bin_ in enumerate(intervals(edges)):\n src_idx = idx_prefix + (dim._array_index(bin_),)\n dst_idx = idx_prefix + (i,)\n x[dst_idx] = func(self.x[src_idx], axis=axis)\n bins.append(bin_)\n\n dims = list(self.dims)\n dims[axis] = out_dim\n info = self.info.copy()\n info['bins'] = tuple(bins)\n return NDVar(x, dims, info, name or self.name)\n\n def clip(self, min=None, max=None, name=None, out=None):\n \"\"\"Clip data (see :func:`numpy.clip`)\n\n Parameters\n ----------\n min : scalar | Var | NDVar\n Minimum value.\n max : scalar | Var | NDVar\n Maximum value.\n name : str\n Name of the output NDVar (default is the current name).\n out : NDVar\n Container for output.\n \"\"\"\n if min is not None:\n min = self._ialign(min)\n if max is not None:\n max = self._ialign(max)\n if out is not None:\n if out is not self:\n assert out.dims == self.dims\n x = self.x.clip(min, max, out.x)\n else:\n x = self.x.clip(min, max)\n if out is None:\n return NDVar(x, self.dims, self.info.copy(), name or self.name)\n else:\n return out\n\n def copy(self, name=None):\n \"\"\"A deep copy of the NDVar's data\n\n Parameters\n ----------\n name : str\n Name of the output NDVar (default is the current name).\n\n Returns\n -------\n ndvar_copy : NDVar\n An copy of the ndvar with a deep copy of the data.\n\n Notes\n -----\n The info dictionary is still a shallow copy.\n \"\"\"\n return NDVar(self.x.copy(), self.dims, self.info.copy(),\n name or self.name)\n\n def diff(self, dim=None, n=1, pad=True, name=None):\n \"\"\"Discrete difference\n\n parameters\n ----------\n dim : str\n Dimension along which to operate.\n n : int\n Number of times to difference (default 1).\n pad : bool\n Pad the ``dim`` dimension of the result to conserve NDVar shape\n (default).\n name : str\n Name of the output NDVar (default is the current name).\n\n Returns\n -------\n diff : NDVar\n NDVar with the ``n`` th differences.\n \"\"\"\n if dim is None:\n if self.ndim - self.has_case == 1:\n axis = -1\n else:\n raise TypeError(\"Need to specify dimension over which to \"\n \"differentiate\")\n else:\n axis = self.get_axis(dim)\n\n x = np.diff(self.x, n, axis)\n if pad == 1:\n idx = FULL_AXIS_SLICE * axis + (slice(0, n),)\n x = np.concatenate((np.zeros_like(x[idx]), x), axis)\n else:\n raise NotImplementedError(\"pad != 1\")\n\n return NDVar(x, self.dims, self.info.copy(), name or self.name)\n\n def dot(self, ndvar, dim=None, name=None):\n \"\"\"Dot product\n\n Parameters\n ----------\n ndvar : NDVar\n Second NDVar, has to have at least the dimension ``dim``.\n dim : str\n Dimension over which to form the dot product (default is the last\n dimension).\n name : str\n Name of the output NDVar (default is ``ndvar.name``).\n\n Examples\n --------\n >>> to_dss, from_dss = dss(x)\n >>> x_dss_6 = to_dss[:6].dot(x, 'sensor')\n \"\"\"\n if dim is None:\n for dim in self.dimnames[::-1]:\n if ndvar.has_dim(dim):\n break\n if dim == 'case':\n raise NotImplementedError(\"dim='case': dot-product along Case dimension\")\n dim_x1 = self.get_dim(dim)\n dim_x2 = ndvar.get_dim(dim)\n if dim_x1 == dim_x2:\n x1_index = x2_index = None\n else:\n out_dim = dim_x1.intersect(dim_x2)\n x1_index = None if dim_x1 == out_dim else dim_x1._array_index_to(out_dim)\n x2_index = None if dim_x2 == out_dim else dim_x2._array_index_to(out_dim)\n\n v1_dimnames = self.get_dimnames((None,) * (self.ndim - 1) + (dim,))\n dims = tuple(self.get_dim(d) for d in v1_dimnames[:-1])\n\n v2_dimnames = (dim,)\n if ndvar.has_case:\n v2_dimnames = ('case',) + v2_dimnames\n dims = ('case',) + dims\n v2_dimnames += (None,) * (ndvar.ndim - ndvar.has_case - 1)\n v2_dimnames = ndvar.get_dimnames(v2_dimnames)\n dims += tuple(ndvar.get_dim(d) for d in v2_dimnames[1 + ndvar.has_case:])\n\n x1 = self.get_data(v1_dimnames)\n if x1_index is not None:\n x1 = np.take(x1, x1_index, -1)\n x2 = ndvar.get_data(v2_dimnames)\n if x2_index is not None:\n x2 = np.take(x2, x2_index, v2_dimnames.index(dim))\n\n if ndvar.has_case:\n x = np.array([np.tensordot(x1, x2_, 1) for x2_ in x2])\n else:\n x = np.tensordot(x1, x2, 1)\n return NDVar(x, dims, {}, name or ndvar.name)\n\n def envelope(self, dim='time', name=None):\n \"\"\"Compute the Hilbert envelope of a signal\n\n Parameters\n ----------\n dim : str\n Dimension over which to compute the envelope (default 'time').\n name : str\n Name of the output NDVar (default is the current name).\n\n Returns\n -------\n envelope : NDVar\n NDVar with identical dimensions containing the envelope.\n\n Notes\n -----\n The Hilbert envelope is computed with :func:`scipy.signal.hilbert`::\n\n >>> numpy.abs(scipy.signal.hilbert(x))\n\n This function can be very slow when the number of time samples is\n uneven.\n \"\"\"\n x = np.abs(scipy.signal.hilbert(self.x, axis=self.get_axis(dim)))\n info = self.info.copy()\n return NDVar(x, self.dims, info, name or self.name)\n\n def extrema(self, dims=(), **regions):\n \"\"\"Extrema (value farthest away from 0) over given dimensions\n\n For each data point,\n ``extremum = max(x) if max(x) >= abs(min(x)) else min(x)``.\n\n Parameters\n ----------\n dims : str | tuple of str | boolean NDVar\n Dimensions over which to operate. A str is used to specify a single\n dimension, a tuple of str to specify several dimensions, ``None`` to\n compute the maximum over all dimensions.\n An boolean NDVar with the same dimensions as the data can be used\n to compute the extrema in specific elements (if the data has a case\n dimension, the extrema are computed for each case).\n *regions*\n Regions over which to aggregate. For example, to get the maximum\n between time=0.1 and time=0.2, use ``ndvar.max(time=(0.1, 0.2))``.\n name : str\n Name of the output NDVar (default is the current name).\n\n Returns\n -------\n extrema : NDVar | Var | float\n Extrema over specified dimensions. Return a Var if only the\n case dimension remains, and a float if the function collapses over\n all data.\n \"\"\"\n return self._aggregate_over_dims(dims, regions, extrema)\n\n def fft(self, dim=None, name=None):\n \"\"\"Fast fourier transform\n\n Parameters\n ----------\n dim : str\n Dimension along which to operate (the default is the ``time``\n dimension if present).\n name : str\n Name of the output NDVar (default is the current name).\n\n Returns\n -------\n fft : NDVar\n NDVar containing the FFT, with the ``time`` dimension replaced by\n ``frequency``.\n \"\"\"\n if dim is None:\n if self.ndim - self.has_case == 1:\n dim = self.dimnames[-1]\n elif self.has_dim('time'):\n dim = 'time'\n else:\n raise ValueError(\"NDVar has more than one dimension, you need \"\n \"to specify along which dimension to operate.\")\n axis = self.get_axis(dim)\n x = np.abs(np.fft.rfft(self.x, axis=axis))\n if dim == 'time':\n uts = self.get_dim(dim)\n freqs = np.fft.rfftfreq(len(uts), uts.tstep)\n freq = Scalar('frequency', freqs, 'Hz')\n else:\n n = self.shape[axis]\n freqs = np.fft.rfftfreq(n, 1. / n)\n freq = Scalar('frequency', freqs, 'Hz')\n dims = self.dims[:axis] + (freq,) + self.dims[axis + 1:]\n info = _info.default_info('Amplitude', self.info)\n return NDVar(x, dims, info, name or self.name)\n\n def flatnonzero(self):\n \"\"\"Return indices where a 1-d NDVar is non-zero\n\n Like :func:`numpy.flatnonzero`.\n \"\"\"\n if self.ndim != 1:\n raise ValueError(\"flatnonzero only applies to 1-d NDVars\")\n dim = self.dims[0]\n return [dim._dim_index(index) for index in np.flatnonzero(self.x)]\n\n def get_axis(self, name):\n \"Return the data axis for a given dimension name\"\n if self.has_dim(name):\n return self._dim_2_ax[name]\n else:\n raise DimensionMismatchError(\"%r has no dimension named %r\" %\n (self, name))\n\n def get_data(self, dims):\n \"\"\"Retrieve the NDVar's data with a specific axes order.\n\n Parameters\n ----------\n dims : str | sequence of str\n Sequence of dimension names (or single dimension name). The array\n that is returned will have axes in this order. To insert a new\n axis with size 1 use ``numpy.newaxis``/``None``.\n\n Notes\n -----\n A shallow copy of the data is returned. To retrieve the data with the\n stored axes order use the .x attribute.\n \"\"\"\n if isinstance(dims, str):\n dims = (dims,)\n\n dims_ = tuple(d for d in dims if d is not newaxis)\n if set(dims_) != set(self.dimnames) or len(dims_) != len(self.dimnames):\n raise DimensionMismatchError(\"Requested dimensions %r from %r\" %\n (dims, self))\n\n # transpose\n axes = tuple(self.dimnames.index(d) for d in dims_)\n x = self.x.transpose(axes)\n\n # insert axes\n if len(dims) > len(dims_):\n for ax, dim in enumerate(dims):\n if dim is newaxis:\n x = np.expand_dims(x, ax)\n\n return x\n\n def get_dim(self, name):\n \"Return the Dimension object named ``name``\"\n return self.dims[self.get_axis(name)]\n\n def get_dimnames(self, names=None, last=None):\n \"\"\"Fill in a partially specified tuple of Dimension names\n\n Parameters\n ----------\n names : sequence of {str | None}\n Dimension names. Names specified as ``None`` are inferred.\n last : str\n Instead of ptoviding ``names``, specify a constraint on the last\n dimension only.\n\n Returns\n -------\n inferred_names : tuple of str\n Dimension names in the same order as in ``names``.\n \"\"\"\n if last is not None:\n if names is not None:\n raise TypeError(\"Can only specify names or last, not both\")\n elif last not in self.dimnames:\n raise ValueError(f\"{self} has no dimension called {last!r}\")\n dims = list(self.dimnames)\n dims.remove(last)\n dims.append(last)\n return tuple(dims)\n\n if not all(n is None or n in self.dimnames for n in names):\n raise ValueError(f\"{names} contains dimension not in {self}\")\n elif len(names) != len(self.dims):\n raise ValueError(f\"{names}: wrong number of dimensions for {self}\")\n elif any(names.count(n) > 1 for n in names if n is not None):\n raise ValueError(f\"{names}: duplicate name\")\n elif None in names:\n if len(names) != len(self.dims):\n raise ValueError(f\"{names}: ambiguous (more than one unspecified dimension)\")\n none_dims = [n for n in self.dimnames if n not in names]\n return tuple(n if n is not None else none_dims.pop(0) for\n n in names)\n else:\n return tuple(names)\n\n def get_dims(self, names):\n \"\"\"Return a tuple with the requested Dimension objects\n\n Parameters\n ----------\n names : sequence of {str | None}\n Names of the dimension objects. If ``None`` is inserted in place of\n names, these dimensions are inferred.\n\n Returns\n -------\n dims : tuple of Dimension\n Dimension objects in the same order as in ``names``.\n \"\"\"\n if None in names:\n names = self.get_dimnames(names)\n return tuple(self.get_dim(name) for name in names)\n\n def has_dim(self, name):\n return name in self._dim_2_ax\n\n def label_clusters(self, threshold=0, tail=0, name=None):\n \"\"\"Find and label clusters of values exceeding a threshold\n\n Parameters\n ----------\n threshold : scalar\n Threshold value for clusters (default 0 to find clusters of\n non-zero values).\n tail : 0 | -1 | 1\n Whether to label cluster smaller than threshold, larger than\n threshold, or both (default).\n name : str\n Name of the output NDVar (default is the current name).\n\n Returns\n -------\n clusters : NDVar\n NDVar of int, each cluster labeled with a unique integer value.\n ``clusters.info['cids']`` contains an array of all cluster IDs.\n \"\"\"\n from ._stats.testnd import Connectivity, label_clusters\n\n custom = [dim._connectivity_type == 'custom' for dim in self.dims]\n if any(custom):\n if sum(custom) > 1:\n raise NotImplementedError(\"More than one non-adjacent dimension\")\n nad_ax = custom.index(True)\n else:\n nad_ax = 0\n\n if nad_ax:\n x = self.x.swapaxes(0, nad_ax)\n connectivity = Connectivity(\n (self.dims[nad_ax],) + self.dims[:nad_ax] + self.dims[nad_ax + 1:])\n else:\n x = self.x\n connectivity = Connectivity(self.dims)\n\n cmap, cids = label_clusters(x, threshold, tail, connectivity, None)\n\n if nad_ax:\n cmap = cmap.swapaxes(0, nad_ax)\n\n info = self.info.copy()\n info['cids'] = cids\n return NDVar(cmap, self.dims, info, name or self.name)\n\n def log(self, base=None, name=None):\n \"\"\"Element-wise log\n\n Parameters\n ----------\n base : scalar\n Base of the log (default is the natural log).\n name : str\n Name of the output NDVar (default is the current name).\n \"\"\"\n if base is None:\n x = np.log(self.x)\n elif base == 2:\n x = np.log2(self.x)\n elif base == 10:\n x = np.log10(self.x)\n else:\n x = np.log(self.x)\n x /= log(base)\n return NDVar(x, self.dims, self.info.copy(), name or self.name)\n\n def mask(self, mask, name=None):\n \"\"\"Create a masked version of this NDVar (see :class:`numpy.ma.MaskedArray`)\n\n Parameters\n ----------\n mask : bool NDVar\n Mask, with equal dimensions (``True`` values will be masked).\n name : str\n Name of the output NDVar (default is the current name).\n \"\"\"\n x_mask = self._ialign(mask)\n if x_mask.dtype.kind != 'b':\n x_mask = x_mask.astype(bool)\n x = np.ma.MaskedArray(self.x, x_mask)\n return NDVar(x, self.dims, self.info.copy(), name or self.name)\n\n def max(self, dims=(), **regions):\n \"\"\"Compute the maximum over given dimensions\n\n Parameters\n ----------\n dims : str | tuple of str | boolean NDVar\n Dimensions over which to operate. A str is used to specify a single\n dimension, a tuple of str to specify several dimensions, None to\n compute the maximum over all dimensions.\n An boolean NDVar with the same dimensions as the data can be used\n to compute the maximum in specific elements (if the data has a case\n dimension, the maximum is computed for each case).\n *regions*\n Regions over which to aggregate. For example, to get the maximum\n between time=0.1 and time=0.2, use ``ndvar.max(time=(0.1, 0.2))``.\n name : str\n Name of the output NDVar (default is the current name).\n\n Returns\n -------\n max : NDVar | Var | float\n The maximum over specified dimensions. Return a Var if only the\n case dimension remains, and a float if the function collapses over\n all data.\n \"\"\"\n return self._aggregate_over_dims(dims, regions, np.max)\n\n def mean(self, dims=(), **regions):\n \"\"\"Compute the mean over given dimensions\n\n Parameters\n ----------\n dims : str | tuple of str | boolean NDVar\n Dimensions over which to operate. A str is used to specify a single\n dimension, a tuple of str to specify several dimensions, None to\n compute the mean over all dimensions.\n A boolean NDVar with the same dimensions as the data can be used\n to compute the mean in specific elements (if the data has a case\n dimension, the mean is computed for each case).\n *regions*\n Regions over which to aggregate. For example, to get the mean\n between time=0.1 and time=0.2, use ``ndvar.mean(time=(0.1, 0.2))``.\n name : str\n Name of the output NDVar (default is the current name).\n\n Returns\n -------\n mean : NDVar | Var | float\n The mean over specified dimensions. Return a Var if only the case\n dimension remains, and a float if the function collapses over all\n data.\n \"\"\"\n return self._aggregate_over_dims(dims, regions, np.mean)\n\n def min(self, dims=(), **regions):\n \"\"\"Compute the minimum over given dimensions\n\n Parameters\n ----------\n dims : str | tuple of str | boolean NDVar\n Dimensions over which to operate. A str is used to specify a single\n dimension, a tuple of str to specify several dimensions, None to\n compute the minimum over all dimensions.\n A boolean NDVar with the same dimensions as the data can be used\n to compute the minimum in specific elements (if the data has a case\n dimension, the minimum is computed for each case).\n *regions*\n Regions over which to aggregate. For example, to get the minimum\n between time=0.1 and time=0.2, use ``ndvar.min(time=(0.1, 0.2))``.\n name : str\n Name of the output NDVar (default is the current name).\n\n Returns\n -------\n min : NDVar | Var | float\n The minimum over specified dimensions. Return a Var if only the\n case dimension remains, and a float if the function collapses over\n all data.\n \"\"\"\n return self._aggregate_over_dims(dims, regions, np.min)\n\n def norm(self, dim, ord=2, name=None):\n \"\"\"Norm over ``dim``\n\n Parameters\n ----------\n dim : str\n Dimension over which to operate.\n ord : scalar\n See description of vector norm for :func:`scipy.linalg.norm` \n (default 2).\n name : str\n Name of the output NDVar (default is the current name).\n\n Returns\n -------\n norm : NDVar\n Norm over ``dim``.\n\n Examples\n --------\n To normalize ``x`` along the sensor dimension:\n\n >>> x /= x.norm('sensor')\n \"\"\"\n axis = self.get_axis(dim)\n x = norm(self.x, ord, axis)\n if self.ndim == 1:\n return x\n dims = self.dims[:axis] + self.dims[axis + 1:]\n return NDVar(x, dims, self.info.copy(), name or self.name)\n\n def ols(self, x, name=None):\n \"\"\"Sample-wise ordinary least squares regressions\n\n Parameters\n ----------\n x : Model | str\n Predictor or predictors. A Model to regress over cases, or a\n dimension name to regress against values of one of the\n ``NDVar``'s dimensions. A Model with multiple ``Var``s can be\n supplied as argument list of ``Var``.\n name : str\n Name of the output NDVar (default is the current name).\n\n Returns\n -------\n beta : NDVar\n Per sample beta weights. The case dimension reflects the predictor\n variables in the same order as the Model's effects.\n\n Notes\n -----\n The intercept is generated internally, and betas for the intercept are\n not returned. If you need access to more details of the results,\n consider using :class:`testnd.LM`.\n\n See Also\n --------\n .ols_t : T-values for regression coefficients\n \"\"\"\n from ._stats import stats\n\n info = self.info.copy()\n info.update(meas='beta', unit=None)\n if 'summary_info' in info:\n del info['summary_info']\n\n if isinstance(x, str):\n if x.startswith('.'):\n x = x[:]\n dimnames = self.get_dimnames((x,) + (None,) * (self.ndim - 1))\n dim = self.get_dim(x)\n values = dim._as_scalar_array()\n y = self.get_data(dimnames)\n betas = stats.betas(y, Var(values, x))[1]\n out_dims = self.get_dims(dimnames[1:])\n elif not self.has_case:\n raise DimensionMismatchError(\n \"Can only apply regression to NDVar with case dimension\")\n else:\n x = asmodel(x)\n if len(x) != len(self):\n raise DimensionMismatchError(\n \"Predictors do not have same number of cases (%i) as the \"\n \"dependent variable (%i)\" % (len(x), len(self)))\n\n betas = stats.betas(self.x, x)[1:] # drop intercept\n out_dims = (Case,) + self.dims[1:]\n return self._package_aggregated_output(betas, out_dims, info, name or self.name)\n\n def ols_t(self, x, name=None):\n \"\"\"\n Compute T-values for sample-wise ordinary least squares regressions\n\n Parameters\n ----------\n x : Model\n Predictor or predictors. Can also be supplied as argument that can\n be converted to a Model, for example ``Var`` or list of ``Var``.\n name : str\n Name of the output NDVar (default is the current name).\n\n Returns\n -------\n t : NDVar\n Per sample t-values. The case dimension reflects the predictor\n variables in the same order as the Model's effects.\n\n Notes\n -----\n Betas for the intercept are not returned.\n\n See Also\n --------\n .ols : Regression coefficients\n \"\"\"\n from ._stats import stats\n\n if not self.has_case:\n raise DimensionMismatchError(\n \"Can only apply regression to NDVar with case dimension\")\n\n x = asmodel(x)\n if len(x) != len(self):\n raise DimensionMismatchError(\n \"Predictors do not have same number of cases (%i) as the \"\n \"dependent variable (%i)\" % (len(x), len(self)))\n\n t = stats.lm_t(self.x, x._parametrize())[1:] # drop intercept\n return NDVar(t, ('case',) + self.dims[1:], self.info.copy(),\n name or self.name)\n\n @staticmethod\n def _package_aggregated_output(x, dims, info, name):\n ndims = len(dims)\n if ndims == 0:\n return x\n elif ndims == 1 and isinstance(dims[0], Case):\n return Var(x, name, info=info)\n else:\n return NDVar(x, dims, info, name)\n\n def repeat(self, repeats, name=None):\n \"\"\"Repeat slices of the NDVar along the case dimension\n\n Parameters\n ----------\n repeats : int | array of ints\n The number of repetitions for each element. `repeats` is\n broadcasted to fit the shape of the given dimension.\n name : str\n Name of the output NDVar (default is the current name).\n \"\"\"\n if self.has_case:\n x = self.x.repeat(repeats, axis=0)\n dims = self.dims\n else:\n x = self.x[newaxis].repeat(repeats, axis=0)\n dims = (Case(repeats),) + self.dims\n return NDVar(x, dims, self.info.copy(), name or self.name)\n\n def residuals(self, x, name=None):\n \"\"\"\n The residuals of sample-wise ordinary least squares regressions\n\n Parameters\n ----------\n x : Model\n Predictor or predictors. Can also be supplied as argument that can\n be converted to a Model, for example ``Var`` or list of ``Var``.\n name : str\n Name of the output NDVar (default is the current name).\n\n Returns\n -------\n residuals : NDVar\n Residual for each case and sample (same dimensions as data).\n \"\"\"\n if not self.has_case:\n raise DimensionMismatchError(\n \"Can only apply regression to NDVar with case dimension\")\n\n x = asmodel(x)\n if len(x) != len(self):\n raise DimensionMismatchError(\n \"Predictors do not have same number of cases (%i) as the \"\n \"dependent variable (%i)\" % (len(x), len(self)))\n\n from ._stats import stats\n res = stats.residuals(self.x, x)\n info = self.info.copy()\n return NDVar(res, self.dims, info, name or self.name)\n\n def rms(self, axis=(), **regions):\n \"\"\"Compute the root mean square over given dimensions\n\n Parameters\n ----------\n axis : str | tuple of str | boolean NDVar\n Dimensions over which to operate. A str is used to specify a single\n dimension, a tuple of str to specify several dimensions, None to\n compute the standard deviation over all values.\n An boolean NDVar with the same dimensions as the data can be used\n to compute the RMS in specific elements (if the data has a case\n dimension, the RMS is computed for each case).\n *regions*\n Regions over which to aggregate. For example, to get the RMS\n between time=0.1 and time=0.2, use ``ndvar.rms(time=(0.1, 0.2))``.\n name : str\n Name of the output NDVar (default is the current name).\n\n Returns\n -------\n rms : NDVar | Var | float\n The root mean square over specified dimensions. Return a Var if\n only the case dimension remains, and a float if the function\n collapses over all data.\n \"\"\"\n from ._stats.stats import rms\n return self._aggregate_over_dims(axis, regions, rms)\n\n def sign(self, name=None):\n \"\"\"Element-wise indication of the sign\n\n Parameters\n ----------\n name : str\n Name of the output NDVar (default is the current name).\n\n Returns\n -------\n sign : NDVar\n NDVar of same shape, ``-1 if x < 0, 0 if x==0, 1 if x > 0``.\n\n Notes\n -----\n Like :func:`numpy.sign`.\n \"\"\"\n return NDVar(np.sign(self.x), self.dims, self.info.copy(),\n name or self.name)\n\n def smooth(self, dim, window_size, window='hamming', mode='center',\n name=None):\n \"\"\"Smooth data by convolving it with a window\n\n Parameters\n ----------\n dim : str\n Dimension along which to smooth.\n window_size : scalar\n Size of the window (in dimension units, i.e., for time in\n seconds). For finite windows this is the full size of the window, \n for a gaussian window it is the standard deviation.\n window : str | tuple\n Window type, input to :func:`scipy.signal.get_window`. For example\n 'boxcar', 'triang', 'hamming' (default). For dimensions with\n irregular spacing, such as :class:`SourceSpace`, only ``gaussian``\n is implemented.\n mode : 'left' | 'center' | 'right'\n Alignment of the output to the input relative to the window:\n\n - ``left``: sample in the output corresponds to the left edge of\n the window.\n - ``center``: sample in the output corresponds to the center of\n the window.\n - ``right``: sample in the output corresponds to the right edge of\n the window.\n name : str\n Name for the smoothed NDVar.\n\n Returns\n -------\n smoothed_ndvar : NDVar\n NDVar with idential dimensions containing the smoothed data.\n\n Notes\n -----\n To perform Gaussian smoothing with a given full width at half maximum,\n the standard deviation can be calculated with the following conversion::\n\n >>> std = fwhm / (2 * (sqrt(2 * log(2))))\n \"\"\"\n axis = self.get_axis(dim)\n dim_object = self.get_dim(dim)\n if window == 'gaussian':\n if mode != 'center':\n raise ValueError(\"For gaussian smoothing, mode must be \"\n \"'center'; got mode=%r\" % (mode,))\n elif dim_object._connectivity_type == 'custom':\n m = gaussian_smoother(dim_object._distances(), window_size)\n else:\n raise NotImplementedError(\"Gaussian smoothing for %s \"\n \"dimension\" % (dim_object.name,))\n x = np.tensordot(m, self.x, (1, axis))\n if axis:\n x = x.swapaxes(0, axis)\n elif dim_object._connectivity_type == 'custom':\n raise ValueError(\"For non-regular dimensions window must be \"\n \"'gaussian', got %r\" % (window,))\n else:\n if dim == 'time':\n n = int(round(window_size / dim_object.tstep))\n else:\n raise NotImplementedError(\"dim=%r\" % (dim,))\n window = scipy.signal.get_window(window, n, False)\n if not window.size:\n raise ValueError(\"window_size=%r: window too small for the \"\n \"NDVar's sampling rate\" % (window_size,))\n window /= window.sum()\n window.shape = (1,) * axis + (n,) + (1,) * (self.ndim - axis - 1)\n if mode == 'center':\n x = scipy.signal.convolve(self.x, window, 'same')\n else:\n x = scipy.signal.convolve(self.x, window, 'full')\n index = FULL_AXIS_SLICE * axis\n if mode == 'left':\n x = x[index + (slice(self.shape[axis]),)]\n elif mode == 'right':\n x = x[index + (slice(-self.shape[axis], None),)]\n else:\n raise ValueError(\"mode=%r\" % (mode,))\n return NDVar(x, self.dims, self.info.copy(), name or self.name)\n\n def std(self, dims=(), **regions):\n \"\"\"Compute the standard deviation over given dimensions\n\n Parameters\n ----------\n dims : str | tuple of str | boolean NDVar\n Dimensions over which to operate. A str is used to specify a single\n dimension, a tuple of str to specify several dimensions, None to\n compute the standard deviation over all values.\n An boolean NDVar with the same dimensions as the data can be used\n to compute the standard deviation in specific elements (if the data\n has a case dimension, the standard deviation is computed for each\n case).\n *regions*\n Regions over which to aggregate. For example, to get the STD\n between time=0.1 and time=0.2, use ``ndvar.std(time=(0.1, 0.2))``.\n name : str\n Name of the output NDVar (default is the current name).\n\n Returns\n -------\n std : NDVar | Var | float\n The standard deviation over specified dimensions. Return a Var if\n only the case dimension remains, and a float if the function\n collapses over all data.\n \"\"\"\n return self._aggregate_over_dims(dims, regions, np.std)\n\n def summary(self, *dims, **regions):\n r\"\"\"Aggregate specified dimensions.\n\n .. warning::\n Data is collapsed over the different dimensions in turn using the\n provided function with an axis argument. For certain functions\n this is not equivalent to collapsing over several axes concurrently\n (e.g., np.var).\n\n dimension:\n A whole dimension is specified as string argument. This\n dimension is collapsed over the whole range.\n range:\n A range within a dimension is specified through a keyword-argument.\n Only the data in the specified range is included. Use like the\n :py:meth:`.sub` method.\n\n\n **additional kwargs:**\n\n func : callable\n Function used to collapse the data. Needs to accept an \"axis\"\n kwarg (default: np.mean)\n name : str\n Name for the new NDVar.\n\n Returns\n -------\n summary : float | Var | NDVar\n Result of applying the summary function over specified dimensions.\n\n Examples\n --------\n Assuming ``data`` is a normal time series. Get the average in a time\n window::\n\n >>> y = data.summary(time=(.1, .2))\n\n Get the peak in a time window::\n\n >>> y = data.summary(time=(.1, .2), func=np.max)\n\n Assuming ``meg`` is an NDVar with dimensions time and sensor. Get the\n average across sensors 5, 6, and 8 in a time window::\n\n >>> roi = [5, 6, 8]\n >>> y = meg.summary(sensor=roi, time=(.1, .2))\n\n Get the peak in the same data:\n\n >>> roi = [5, 6, 8]\n >>> peak = meg.summary(sensor=roi, time=(.1, .2), func=np.max)\n\n Get the RMS over all sensors\n\n >>> meg_rms = meg.summary('sensor', func=rms)\n\n \"\"\"\n if 'func' in regions:\n func = regions.pop('func')\n elif 'summary_func' in self.info:\n func = self.info['summary_func']\n else:\n func = np.mean\n name = regions.pop('name', None)\n if len(dims) + len(regions) == 0:\n dims = ('case',)\n\n if regions:\n dims = list(dims)\n data = self.sub(**regions)\n dims.extend(dim for dim in regions if data.has_dim(dim))\n return data.summary(*dims, func=func, name=name)\n else:\n x = self.x\n axes = [self._dim_2_ax[dim] for dim in dims]\n dims = list(self.dims)\n for axis in sorted(axes, reverse=True):\n x = func(x, axis=axis)\n dims.pop(axis)\n\n # update info for summary\n info = self.info.copy()\n if 'summary_info' in info:\n info.update(info.pop('summary_info'))\n return self._package_aggregated_output(x, dims, info, name)\n\n def sub(self, *args, **kwargs):\n \"\"\"Retrieve a slice through the NDVar.\n\n Return a new NDVar with a slice of the current NDVar's data.\n The slice is specified using arguments and keyword arguments.\n\n Indexes for dimensions can be either specified as arguments in the\n order of the data axes, or with dimension names as keywords; for::\n\n >>> x = datasets.get_uts(True)['utsnd']\n >>> x\n \n >>> x.sub(time=0.1)\n \n\n ``x.sub(time=0.1)`` is equivalent to ``x.sub((), (), 0.1)`` and\n ``x[:, :, 0.1]``.\n\n Tuples are reserved for slicing and are treated like ``slice`` objects.\n Use lists for indexing arbitrary sequences of elements.\n\n The name of the new NDVar can be set with a ``name`` keyword\n (``x.sub(time=0.1, name=\"new_name\")``). The default is the name of the\n current NDVar.\n \"\"\"\n var_name = kwargs.pop('name', self.name)\n info = self.info.copy()\n dims = list(self.dims)\n n_axes = len(dims)\n index = [FULL_SLICE] * n_axes\n index_args = [None] * n_axes\n add_axis = False\n\n # sequence args\n for i, arg in enumerate(args):\n if isinstance(arg, NDVar):\n if arg.has_case:\n raise ValueError(\"NDVar with case dimension can not serve\"\n \"as NDVar index\")\n dimax = self.get_axis(arg.dims[0].name)\n if index_args[dimax] is None:\n index_args[dimax] = arg\n else:\n raise IndexError(\"Index for %s dimension specified twice.\"\n % arg.dims[0].name)\n elif arg is newaxis:\n if i > 0:\n raise IndexError(\"newaxis must be in first index position\")\n elif self.has_case:\n raise IndexError(\"NDVar already has case dimension\")\n add_axis = True\n else:\n index_args[i] = arg\n\n # sequence kwargs\n for dimname, arg in kwargs.items():\n dimax = self.get_axis(dimname)\n if index_args[dimax] is None:\n index_args[dimax] = arg\n else:\n raise RuntimeError(\"Index for %s dimension specified twice.\" % dimname)\n\n # process indexes\n for dimax, idx in enumerate(index_args):\n if idx is None:\n continue\n dim = self.dims[dimax]\n\n # find index\n idx = dim._array_index(idx)\n index[dimax] = idx\n\n # find corresponding dim\n if np.isscalar(idx):\n dims[dimax] = None\n elif dimax >= self.has_case:\n dims[dimax] = dim[idx]\n else:\n dims[dimax] = Case\n if add_axis:\n dims.insert(0, Case)\n\n # adjust index dimension\n if sum(isinstance(idx, np.ndarray) for idx in index) > 1:\n ndim_increment = 0\n for i in range(n_axes - 1, -1, -1):\n idx = index[i]\n if ndim_increment and isinstance(idx, (slice, np.ndarray)):\n if isinstance(idx, slice):\n idx = slice_to_arange(idx, self.x.shape[i])\n elif idx.dtype.kind == 'b':\n idx = np.flatnonzero(idx)\n index[i] = idx[FULL_AXIS_SLICE + (None,) * ndim_increment]\n\n if isinstance(idx, np.ndarray):\n ndim_increment += 1\n\n # create NDVar\n x = self.x[tuple(index)]\n if add_axis:\n x = np.expand_dims(x, 0)\n dims = tuple(dim for dim in dims if dim is not None)\n return self._package_aggregated_output(x, dims, info, var_name)\n\n def sum(self, dims=(), **regions):\n \"\"\"Compute the sum over given dimensions\n\n Parameters\n ----------\n dims : str | tuple of str | boolean NDVar\n Dimensions over which to operate. A str is used to specify a single\n dimension, a tuple of str to specify several dimensions, None to\n compute the sum over all dimensions.\n An boolean NDVar with the same dimensions as the data can be used\n to compute the sum in specific elements (if the data has a case\n dimension, the sum is computed for each case).\n *regions*\n Regions over which to aggregate. For example, to get the sum\n between time=0.1 and time=0.2, use ``ndvar.sum(time=(0.1, 0.2))``.\n name : str\n Name of the output NDVar (default is the current name).\n\n Returns\n -------\n sum : NDVar | Var | float\n The sum over specified dimensions. Return a Var if only the\n case dimension remains, and a float if the function collapses over\n all data.\n \"\"\"\n return self._aggregate_over_dims(dims, regions, np.sum)\n\n def threshold(self, v, tail=1, name=None):\n \"\"\"Set all values below a threshold to 0.\n\n Parameters\n ----------\n v : scalar\n Threshold value.\n tail : -1 | 0 | 1\n Tailedness.\n 1: set values below v to 0 (default);\n 0: set values between -v and v to 0;\n -1: set values above v to 0.\n name : str\n Name of the output NDVar (default is the current name).\n \"\"\"\n if tail == 0:\n v = abs(v)\n idx = self.x >= v\n np.logical_or(idx, self.x <= -v, idx)\n elif tail == 1:\n idx = self.x >= v\n elif tail == -1:\n idx = self.x <= v\n else:\n raise ValueError(\"Invalid value tail=%r; need -1, 0 or 1\" % (tail,))\n info = self.info.copy()\n return NDVar(np.where(idx, self.x, 0), self.dims, info,\n name or self.name)\n\n def var(self, dims=(), ddof=0, **regions):\n \"\"\"Compute the variance over given dimensions\n\n Parameters\n ----------\n dims : str | tuple of str | boolean NDVar\n Dimensions over which to operate. A str is used to specify a single\n dimension, a tuple of str to specify several dimensions, None to\n compute the sum over all dimensions.\n An boolean NDVar with the same dimensions as the data can be used\n to compute the variance in specific elements (if the data has a case\n dimension, the variance is computed for each case).\n ddof : int\n Degrees of freedom (default 0; see :func:`numpy.var`).\n name : str\n Name of the output NDVar (default is the current name).\n **regions\n Regions over which to aggregate. For example, to get the variance\n between time=0.1 and time=0.2, use ``ndvar.var(time=(0.1, 0.2))``.\n\n Returns\n -------\n var : NDVar | Var | float\n The variance over specified dimensions. Return a Var if only the\n case dimension remains, and a float if the function collapses over\n all data.\n \"\"\"\n return self._aggregate_over_dims(dims, regions,\n partial(np.var, ddof=ddof))\n\n def nonzero(self):\n \"\"\"Return indices where the NDVar is non-zero \n \n Like :func:`numpy.nonzero`.\n \"\"\"\n return tuple(dim._dim_index(index) for dim, index in\n zip(self.dims, self.x.nonzero()))\n\n\ndef extrema(x, axis=None):\n \"Extract the extreme values in x\"\n max = np.max(x, axis)\n min = np.min(x, axis)\n if np.isscalar(max):\n return max if abs(max) > abs(min) else min\n return np.where(np.abs(max) >= np.abs(min), max, min)\n\n\nclass Datalist(list):\n \"\"\":py:class:`list` subclass for including lists in in a Dataset.\n\n Parameters\n ----------\n items : sequence\n Content for the Datalist.\n name : str\n Name of the Datalist.\n fmt : 'repr' | 'str' | 'strlist'\n How to format items when converting Datasets to tables (default 'repr'\n uses the normal object representation).\n\n\n Notes\n -----\n Modifications:\n\n - adds certain methods that makes indexing behavior more similar to numpy\n and other data objects\n - blocks methods for in place modifications that would change the lists's\n length\n\n\n Examples\n --------\n Concise string representation:\n\n >>> l = [['a', 'b'], [], ['a']]\n >>> print(Datalist(l))\n [['a', 'b'], [], ['a']]\n >>> print(Datalist(l, fmt='strlist'))\n [[a, b], [], [a]]\n \"\"\"\n _fmt = 'repr' # for backwards compatibility with old pickles\n\n def __init__(self, items=None, name=None, fmt='repr'):\n if fmt not in ('repr', 'str', 'strlist'):\n raise ValueError(\"fmt=%s\" % repr(fmt))\n\n self.name = name\n self._fmt = fmt\n if items:\n super(Datalist, self).__init__(items)\n else:\n super(Datalist, self).__init__()\n\n def __deepcopy__(self, memo):\n return Datalist((deepcopy(item, memo) for item in self), self.name, self._fmt)\n\n def __repr__(self):\n args = super(Datalist, self).__repr__()\n if self.name is not None:\n args += ', %s' % repr(self.name)\n if self._fmt != 'repr':\n args += ', fmt=%s' % repr(self._fmt)\n return \"Datalist(%s)\" % args\n\n def __str__(self):\n return \"[%s]\" % ', '.join(self._item_repr(i) for i in self)\n\n def _item_repr(self, item):\n if self._fmt == 'str':\n return str(item)\n elif self._fmt == 'repr':\n out = repr(item)\n if len(out) > 15:\n return out[:12] + '...'\n else:\n return out\n elif self._fmt == 'strlist':\n return \"[%s]\" % ', '.join(item)\n else:\n raise RuntimeError(\"Datalist._fmt=%s\" % repr(self._fmt))\n\n def __eq__(self, other):\n if len(self) != len(other):\n raise ValueError(\"Unequal length\")\n return np.array([s == o for s, o in zip(self, other)])\n\n def __ne__(self, other):\n if len(self) != len(other):\n raise ValueError(\"Unequal length\")\n return np.array([s != o for s, o in zip(self, other)])\n\n def __getitem__(self, index):\n if isinstance(index, Integral):\n return list.__getitem__(self, index)\n elif isinstance(index, slice):\n return Datalist(list.__getitem__(self, index), fmt=self._fmt)\n else:\n return Datalist(apply_numpy_index(self, index), fmt=self._fmt)\n\n def __setitem__(self, key, value):\n if isinstance(key, LIST_INDEX_TYPES):\n list.__setitem__(self, key, value)\n elif isinstance(key, np.ndarray):\n if key.dtype.kind == 'b':\n key = np.flatnonzero(key)\n elif key.dtype.kind != 'i':\n raise TypeError(\"Array index needs to be int or bool type\")\n\n if np.iterable(value):\n if len(key) != len(value):\n raise ValueError(\"Need one value per index when setting a \"\n \"range of entries in a Datalist.\")\n for k, v in zip(key, value):\n list.__setitem__(self, k, v)\n else:\n for k in key:\n list.__setitem__(self, k, value)\n else:\n raise NotImplementedError(\"Datalist indexing with %s\" % type(key))\n\n def __getslice__(self, i, j):\n return Datalist(list.__getslice__(self, i, j), fmt=self._fmt)\n\n def __add__(self, other):\n return Datalist(super(Datalist, self).__add__(other), fmt=self._fmt)\n\n def aggregate(self, x, merge='mean'):\n \"\"\"\n Summarize cases for each cell in x\n\n Parameters\n ----------\n x : categorial\n Cells which to aggregate.\n merge : str\n How to merge entries.\n ``'mean'``: sum elements and dividie by cell length\n \"\"\"\n if len(x) != len(self):\n err = \"Length mismatch: %i (Var) != %i (x)\" % (len(self), len(x))\n raise ValueError(err)\n\n x_out = []\n for cell in x.cells:\n x_cell = self[x == cell]\n n = len(x_cell)\n if n == 1:\n x.append(x_cell)\n elif n > 1:\n if merge == 'mean':\n xc = reduce(operator.add, x_cell)\n xc /= n\n else:\n raise ValueError(\"Invalid value for merge: %r\" % merge)\n x_out.append(xc)\n\n return Datalist(x_out, fmt=self._fmt)\n\n def __iadd__(self, other):\n return self + other\n\n def append(self, p_object):\n raise TypeError(\"Datalist has fixed length to conform to Dataset\")\n\n def insert(self, index, p_object):\n raise TypeError(\"Datalist has fixed length to conform to Dataset\")\n\n def pop(self, index=None):\n raise TypeError(\"Datalist has fixed length to conform to Dataset\")\n\n def remove(self, value):\n raise TypeError(\"Datalist has fixed length to conform to Dataset\")\n\n def _update_listlist(self, other):\n \"Update list elements from another list of lists\"\n if len(self) != len(other):\n raise ValueError(\"Unequal length\")\n for i in range(len(self)):\n if any(item not in self[i] for item in other[i]):\n self[i] = sorted(set(self[i]).union(other[i]))\n\n\nlegal_dataset_key_re = re.compile(\"[_A-Za-z][_a-zA-Z0-9]*$\")\n\n\ndef assert_is_legal_dataset_key(key):\n if iskeyword(key):\n msg = (\"%r is a reserved keyword and can not be used as variable name \"\n \"in a Dataset\" % key)\n raise ValueError(msg)\n elif not legal_dataset_key_re.match(key):\n msg = (\"%r is not a valid keyword and can not be used as variable name \"\n \"in a Dataset\" % key)\n raise ValueError(msg)\n\n\ndef as_legal_dataset_key(key):\n \"Convert str to a legal dataset key\"\n if iskeyword(key):\n return \"%s_\" % key\n elif legal_dataset_key_re.match(key):\n return key\n else:\n if ' ' in key:\n key = key.replace(' ', '_')\n for c in string.punctuation:\n if c in key:\n key = key.replace(c, '_')\n\n if key == '':\n key = '_'\n elif key[0].isdigit():\n key = \"_%s\" % key\n\n if legal_dataset_key_re.match(key):\n return key\n else:\n raise RuntimeError(\"Could not convert %r to legal dataset key\")\n\n\ndef cases_arg(cases, n_cases):\n \"Coerce cases argument to iterator\"\n if isinstance(cases, Integral):\n if cases < 1:\n cases = n_cases + cases\n if cases < 0:\n raise ValueError(\"Can't get table for fewer than 0 cases\")\n else:\n cases = min(cases, n_cases)\n if cases is not None:\n return range(cases)\n else:\n return cases\n\n\nclass Dataset(OrderedDict):\n \"\"\"\n Stores multiple variables pertaining to a common set of measurement cases\n\n Superclass: :class:`collections.OrderedDict`\n\n Parameters\n ----------\n items : iterator\n Items contained in the Dataset. Items can be either named\n data-objects or ``(name, data_object)`` tuples. The Dataset stores\n the input items themselves, without making a copy().\n name : str\n Name for the Dataset.\n caption : str\n Caption for the table.\n info : dict\n Info dictionary, can contain arbitrary entries and can be accessed\n as ``.info`` attribute after initialization. The Dataset makes a\n shallow copy.\n n_cases : int\n Specify the number of cases in the Dataset if no items are added\n upon initialization (by default the number is inferred when the\n fist item is added).\n\n\n Attributes\n ----------\n n_cases : None | int\n The number of cases in the Dataset (corresponding to the number of\n rows in the table representation). None if no variables have been\n added.\n n_items : int\n The number of items (variables) in the Dataset (corresponding to the\n number of columns in the table representation).\n\n\n Notes\n -----\n A Dataset represents a data table as a ``{variable_name: value_list}``\n dictionary. Each variable corresponds to a column, and each index in the\n value list corresponds to a row, or case.\n\n The Dataset class inherits most of its behavior from its superclass\n :py:class:`collections.OrderedDict`.\n Dictionary keys are enforced to be :py:class:`str` objects and should\n correspond to the variable names.\n As for a dictionary, The Dataset's length (``len(ds)``) reflects the number\n of variables in the Dataset (i.e., the number of rows).\n\n\n **Accessing Data**\n\n Standard indexing with :class:`str` is used to access the contained Var\n and Factor objects:\n\n - ``ds['var1']`` --> ``var1``.\n - ``ds['var1',]`` --> ``Dataset([var1])``.\n - ``ds['var1', 'var2']`` --> ``Dataset([var1, var2])``\n\n When indexing numerically, the first index defines cases (rows):\n\n - ``ds[1]`` --> row 1\n - ``ds[1:5]`` or ``ds[1,2,3,4]`` --> rows 1 through 4\n - ``ds[1, 5, 6, 9]`` or ``ds[[1, 5, 6, 9]]`` --> rows 1, 5, 6 and 9\n\n The second index accesses columns, so case indexing can be combined with\n column indexing:\n\n - ``ds[:4, :2]`` --> first 4 rows of first 2 columns\n\n Index a single case retrieves an individual case as ``{name: value}``\n dictionaries:\n\n - ``ds[1]`` --> ``{'var': 1, 'factor': 'value', ...}``\n\n The :meth:`.itercases` method can be used to iterate over cases as\n :class:`dict`.\n\n\n **Naming**\n\n While Var and Factor objects themselves need not be named, they need\n to be named when added to a Dataset. This can be done by a) adding a\n name when initializing the Dataset::\n\n >>> ds = Dataset((('v1', var1), ('v2', var2)))\n\n or b) by adding the Var or Factor with a key::\n\n >>> ds['v3'] = var3\n\n If a Var/Factor that is added to a Dataset does not have a name, the new\n key is automatically assigned to the Var/Factor's ``.name`` attribute.\n\n\n Examples\n --------\n Datasets can be initialize with data-objects, or with\n ('name', data-object) tuples::\n\n >>> ds = Dataset((var1, var2))\n >>> ds = Dataset((('v1', var1), ('v2', var2)))\n\n Alternatively, variables can be added after initialization::\n\n >>> ds = Dataset(n_cases=3)\n >>> ds['var', :] = 0\n >>> ds['factor', :] = 'a'\n >>> print(ds)\n var factor\n -------------\n 0 a\n 0 a\n 0 a\n\n \"\"\"\n @staticmethod\n def _args(items=(), name=None, caption=None, info={}, n_cases=None):\n return items, name, caption, info, n_cases\n\n def __init__(self, *args, **kwargs):\n # backwards compatibility\n if args:\n if isinstance(args[0], tuple) and isinstance(args[0][0], str):\n items, name, caption, info, n_cases = self._args(args, **kwargs)\n else:\n items, name, caption, info, n_cases = self._args(*args, **kwargs)\n else:\n items, name, caption, info, n_cases = self._args(**kwargs)\n\n # unpack data-objects\n args = []\n for item in items:\n if isdataobject(item):\n if not item.name:\n raise ValueError(\"items need to be named in a Dataset; use \"\n \"Dataset(('name', item), ...), or ds = \"\n \"Dataset(); ds['name'] = item\")\n args.append((item.name, item))\n else:\n args.append(item)\n\n # set state\n self.n_cases = None if n_cases is None else int(n_cases)\n # uses __setitem__() which checks items and length:\n super(Dataset, self).__init__(args)\n self.name = name\n self.info = info.copy()\n self._caption = caption\n\n def __setstate__(self, state):\n # for backwards compatibility\n self.name = state['name']\n self.info = state['info']\n self._caption = state.get('caption', None)\n\n def __reduce__(self):\n return self.__class__, (tuple(self.items()), self.name, self._caption,\n self.info, self.n_cases)\n\n def __delitem__(self, key):\n if isinstance(key, str):\n super(Dataset, self).__delitem__(key)\n elif isinstance(key, tuple):\n m = super(Dataset, self).__delitem__\n for k in key:\n m(k)\n else:\n raise KeyError(\"Invalid Dataset key: %s\" % repr(key))\n\n def __getitem__(self, index):\n \"\"\"\n possible::\n\n >>> ds[9] (int) -> dictionary for one case\n >>> ds[9:12] (slice) -> subset with those cases\n >>> ds[[9, 10, 11]] (list) -> subset with those cases\n >>> ds['MEG1'] (strings) -> Var\n >>> ds['MEG1', 'MEG2'] (list of strings) -> list of vars; can be nested!\n\n \"\"\"\n if isinstance(index, slice):\n return self.sub(index)\n elif isinstance(index, str):\n return super(Dataset, self).__getitem__(index)\n elif isinstance(index, Integral):\n return self.get_case(index)\n elif not np.iterable(index):\n raise KeyError(\"Invalid index for Dataset: %r\" % index)\n elif all(isinstance(item, str) for item in index):\n return self.sub(keys=index)\n elif isinstance(index, tuple):\n if len(index) != 2:\n raise KeyError(\"Invalid index for Dataset: %s\" % repr(index))\n\n i0, i1 = index\n if isinstance(i0, str):\n return self[i0][i1]\n elif isinstance(i1, str):\n return self[i1][i0]\n elif np.iterable(i0) and isinstance(i0[0], str):\n return self[i1, i0]\n elif np.iterable(i1) and all(isinstance(item, str) for item in i1):\n keys = i1\n else:\n keys = Datalist(self.keys())[i1]\n if isinstance(keys, str):\n return self[i1][i0]\n return Dataset(((k, self[k][i0]) for k in keys), self.name,\n self._caption, self.info)\n else:\n return self.sub(index)\n\n def __repr__(self):\n class_name = self.__class__.__name__\n if self.n_cases is None:\n items = []\n if self.name:\n items.append('name=%r' % self.name)\n if self.info:\n info = repr(self.info)\n if len(info) > 60:\n info = '<...>'\n items.append('info=%s' % info)\n return '%s(%s)' % (class_name, ', '.join(items))\n\n rep_tmp = \"<%(class_name)s %(name)s%(N)s{%(items)s}>\"\n fmt = {'class_name': class_name}\n fmt['name'] = '%r ' % self.name if self.name else ''\n fmt['N'] = 'n_cases=%i ' % self.n_cases\n items = []\n for key in self:\n v = self[key]\n if isinstance(v, Var):\n lbl = 'V'\n elif isinstance(v, Factor):\n lbl = 'F'\n elif isinstance(v, NDVar):\n lbl = 'Vnd'\n else:\n lbl = type(v).__name__\n\n if isdataobject(v) and v.name != key:\n item = '%r:%s<%r>' % (key, lbl, v.name)\n else:\n item = '%r:%s' % (key, lbl)\n\n items.append(item)\n\n fmt['items'] = ', '.join(items)\n return rep_tmp % fmt\n\n def _repr_pretty_(self, p, cycle):\n if cycle:\n raise NotImplementedError\n p.text(self.__repr__())\n\n def __setitem__(self, index, item, overwrite=True):\n if isinstance(index, str):\n # test if name already exists\n if (not overwrite) and (index in self):\n raise KeyError(\"Dataset already contains variable of name %r\" % index)\n assert_is_legal_dataset_key(index)\n\n # coerce to data-object\n if isdataobject(item) or isinstance(object, Datalist):\n if (item.name is None or (item.name != index and\n item.name != as_legal_dataset_key(index))):\n item.name = index\n n = 0 if (isinstance(item, NDVar) and not item.has_case) else len(item)\n elif isinstance(item, (list, tuple)):\n item = Datalist(item, index)\n n = len(item)\n elif isinstance(item, np.ndarray):\n n = len(item)\n if item.ndim == 1:\n item = Var(item, index)\n else:\n try:\n n = len(item)\n except TypeError:\n raise TypeError(\"Only items with length can be assigned to \"\n \"a Dataset; got %r\" % (item,))\n\n # make sure the item has the right length\n if self.n_cases is None:\n self.n_cases = n\n elif self.n_cases != n:\n raise ValueError(\n \"Can not assign item to Dataset. The item`s length (%i) is \"\n \"different from the number of cases in the Dataset (%i).\" %\n (n, self.n_cases))\n\n super(Dataset, self).__setitem__(index, item)\n elif isinstance(index, tuple):\n if len(index) != 2:\n raise NotImplementedError(\n \"Dataset indexes can have at most two components; direct \"\n \"access to NDVars is not implemented\")\n idx, key = index\n if isinstance(idx, str):\n key, idx = idx, key\n elif not isinstance(key, str):\n raise TypeError(f\"Dataset key {key!r}; needs to be str\")\n\n if key in self:\n self[key][idx] = item\n elif isinstance(idx, slice):\n if idx.start is None and idx.stop is None:\n if isdataobject(item):\n self[key] = item\n elif self.n_cases is None:\n raise TypeError(\"Can't assign slice of empty Dataset\")\n elif isinstance(item, str):\n self[key] = Factor([item], repeat=self.n_cases)\n elif np.isscalar(item):\n self[key] = Var([item], repeat=self.n_cases)\n else:\n raise TypeError(\n f\"{item!r} is not supported for slice-assignment of \"\n f\"a new variable. Use a str for a new Factor or a \"\n f\"scalar for a new Var.\")\n else:\n raise NotImplementedError(\n \"When assigning a new item in a Dataset, all values \"\n \"need to be set (ds[:,'name'] = ...)\")\n else:\n raise NotImplementedError(\"Advanced Dataset indexing\")\n else:\n raise NotImplementedError(\"Advanced Dataset indexing\")\n\n def __str__(self):\n if sum(isuv(i) or isdatalist(i) for i in self.values()) == 0:\n return self.__repr__()\n\n maxn = preferences['dataset_str_n_cases']\n if self.n_cases > maxn:\n caption = \"... (use .as_table() method to see the whole Dataset)\"\n else:\n caption = None\n txt = self.as_table(maxn, '%.5g', midrule=True, caption=caption,\n lfmt=True)\n return str(txt)\n\n def _check_n_cases(self, x, empty_ok=True):\n \"\"\"Check that an input argument has the appropriate length.\n\n Also raise an error if empty_ok is False and the Dataset is empty.\n \"\"\"\n if self.n_cases is None:\n if not empty_ok:\n raise RuntimeError(\"Dataset is empty.\")\n elif self.n_cases != len(x):\n raise ValueError(\n f\"{dataobj_repr(x)} with length {len(x)}: The Dataset has a \"\n f\"different length ({self.n_cases})\")\n\n def add(self, item, replace=False):\n \"\"\"``ds.add(item)`` -> ``ds[item.name] = item``\n\n unless the Dataset already contains a variable named item.name, in\n which case a KeyError is raised. In order to replace existing\n variables, set ``replace`` to True::\n\n >>> ds.add(item, True)\n\n \"\"\"\n if not isdataobject(item):\n raise ValueError(\"Not a valid data-object: %r\" % item)\n elif item.name is None:\n raise ValueError(\"Dataset.add(obj) can only take named objects \"\n \"(obj.name can not be None)\")\n elif (item.name in self) and not replace:\n raise KeyError(\"Dataset already contains variable named %r\" % item.name)\n else:\n self[item.name] = item\n\n def add_empty_var(self, name, dtype=np.float64):\n \"\"\"Create an empty variable in the dataset\n\n Parameters\n ----------\n name : str\n Name for the new variable.\n dtype : numpy dtype\n Data type of the new variable (default is float64).\n\n Returns\n -------\n var : Var\n The new variable.\n \"\"\"\n if self.n_cases is None:\n err = \"Can't add variable to a Dataset without length\"\n raise RuntimeError(err)\n x = np.empty(self.n_cases, dtype=dtype)\n v = Var(x)\n self[name] = v\n return v\n\n def as_table(self, cases=0, fmt='%.6g', sfmt='%s', sort=False, header=True,\n midrule=False, count=False, title=None, caption=None,\n ifmt='%s', bfmt='%s', lfmt=False):\n r\"\"\"\n Create an fmtxt.Table containing all Vars and Factors in the Dataset.\n\n Can be used for exporting in different formats such as csv.\n\n Parameters\n ----------\n cases : int | iterator of int\n Cases to include (int includes that many cases from the beginning,\n 0 includes all; negative number works like negative indexing).\n fmt : str\n Format string for float variables (default ``'%.6g'``).\n sfmt : str | None\n Formatting for strings (None -> code; default ``'%s'``).\n sort : bool\n Sort the columns alphabetically.\n header : bool\n Include the varibale names as a header row.\n midrule : bool\n print a midrule after table header.\n count : bool\n Add an initial column containing the case number.\n title : None | str\n Title for the table.\n caption : None | str\n Caption for the table (default is the Dataset's caption).\n ifmt : str\n Formatting for integers (default ``'%s'``).\n bfmt : str\n Formatting for booleans (default ``'%s'``).\n lfmt : bool\n Include Datalists.\n \"\"\"\n cases = cases_arg(cases, self.n_cases)\n if cases is None:\n return fmtxt.Table('')\n keys = [k for k, v in self.items() if isuv(v) or (lfmt and isdatalist(v))]\n if sort:\n keys = sorted(keys)\n\n if caption is None:\n caption = self._caption\n\n values = [self[key] for key in keys]\n fmts = []\n for v in values:\n if isinstance(v, Factor):\n fmts.append(sfmt)\n elif isintvar(v):\n fmts.append(ifmt)\n elif isboolvar(v):\n fmts.append(bfmt)\n elif isdatalist(v):\n fmts.append('dl')\n else:\n fmts.append(fmt)\n\n columns = 'l' * (len(keys) + count)\n table = fmtxt.Table(columns, True, title, caption)\n\n if header:\n if count:\n table.cell('#')\n for name in keys:\n table.cell(name)\n\n if midrule:\n table.midrule()\n\n for i in cases:\n if count:\n table.cell(i)\n\n for v, fmt_ in zip(values, fmts):\n if fmt_ is None:\n table.cell(v.x[i])\n elif fmt_ == 'dl':\n table.cell(v._item_repr(v[i]))\n elif fmt_.endswith(('r', 's')):\n table.cell(fmt_ % v[i])\n else:\n table.cell(fmtxt.Number(v[i], fmt=fmt_))\n\n return table\n\n def _asfmtext(self):\n return self.as_table()\n\n def eval(self, expression):\n \"\"\"\n Evaluate an expression involving items stored in the Dataset.\n\n Parameters\n ----------\n expression : str\n Python expression to evaluate.\n\n Notes\n -----\n ``ds.eval(expression)`` is equivalent to\n ``eval(expression, globals, ds)`` with ``globals=numpy`` plus some\n Eelbrain functions.\n\n Examples\n --------\n In a Dataset containing factors 'A' and 'B'::\n\n >>> ds.eval('A % B')\n A % B\n\n \"\"\"\n if not isinstance(expression, str):\n raise TypeError(\"Eval needs expression of type unicode or str. Got \"\n \"%s\" % repr(expression))\n return eval(expression, EVAL_CONTEXT, self)\n\n @classmethod\n def from_caselist(cls, names, cases):\n \"\"\"Create a Dataset from a list of cases\n\n Parameters\n ----------\n names : sequence of str\n Names for the variables.\n cases : sequence of sequence of { str | scalar | NDVar }\n A sequence of cases, whereby each case is itself represented as a\n sequence of values (str or scalar). Variable type (Factor or Var)\n is inferred from whether values are str or not.\n \"\"\"\n if isinstance(names, Iterator):\n names = tuple(names)\n if isinstance(cases, Iterator):\n cases = tuple(cases)\n n_cases = set(map(len, cases))\n if len(n_cases) > 1:\n raise ValueError('not all cases have same length')\n n_cases = n_cases.pop()\n if len(names) != n_cases:\n raise ValueError('names=%r: %i names but %i cases' % (names, len(names), n_cases))\n\n ds = cls()\n for i, name in enumerate(names):\n ds[name] = combine(case[i] for case in cases)\n return ds\n\n @classmethod\n def from_r(cls, name):\n \"\"\"Create a Dataset from an R data frame through ``rpy2``\n\n Parameters\n ----------\n name : str\n Name of the dataframe in R.\n\n Examples\n --------\n Getting an example dataset from R:\n\n >>> from rpy2.robjects import r\n >>> r('data(sleep)')\n >>> ds = Dataset.from_r('sleep')\n >>> print(ds)\n extra group ID\n ------------------\n 0.7 1 1\n -1.6 1 2\n -0.2 1 3\n -1.2 1 4\n -0.1 1 5\n 3.4 1 6\n 3.7 1 7\n 0.8 1 8\n 0 1 9\n 2 1 10\n 1.9 2 1\n 0.8 2 2\n 1.1 2 3\n 0.1 2 4\n -0.1 2 5\n 4.4 2 6\n 5.5 2 7\n 1.6 2 8\n 4.6 2 9\n 3.4 2 10\n \"\"\"\n from rpy2 import robjects as ro\n df = ro.r[name]\n if not isinstance(df, ro.DataFrame):\n raise ValueError(\"R object %r is not a DataFrame\")\n ds = cls(name=name)\n for item_name, item in df.items():\n if isinstance(item, ro.FactorVector):\n x = np.array(item)\n labels = {i: l for i, l in enumerate(item.levels, 1)}\n ds[item_name] = Factor(x, labels=labels)\n elif isinstance(item, (ro.FloatVector, ro.IntVector)):\n x = np.array(item)\n ds[item_name] = Var(x)\n else:\n raise NotImplementedError(str(type(item)))\n return ds\n\n def get_case(self, i):\n \"The i'th case as a dictionary\"\n return {k: v[i] for k, v in self.items()}\n\n def get_subsets_by(self, x, exclude=(), name='{name}[{cell}]'):\n \"\"\"Split the Dataset by the cells of ``x``\n\n Parameters\n ----------\n x : categorial\n Model defining cells into which to split the dataset.\n exclude : sequence of str\n Cells of ``x`` which should be ignored.\n name : str\n Name for the new datasets (formatted with ``self.name`` and\n ``cell``).\n\n Returns\n -------\n sub_datasets : dict\n ``{cell: sub_dataset}`` dictionary.\n \"\"\"\n if isinstance(x, str):\n x = self.eval(x)\n return {cell: self.sub(x == cell, name.format(name=self.name, cell=cell)) for\n cell in x.cells if cell not in exclude}\n\n def aggregate(self, x=None, drop_empty=True, name='{name}', count='n',\n drop_bad=False, drop=(), equal_count=False, never_drop=()):\n \"\"\"\n Return a Dataset with one case for each cell in x.\n\n Parameters\n ----------\n x : None | str | categorial\n Model defining cells to which to reduce cases. By default (``None``)\n the Dataset is reduced to a single case.\n drop_empty : bool\n Drops empty cells in x from the Dataset. This is currently the only\n option.\n name : str\n Name of the new Dataset.\n count : None | str\n Add a variable with this name to the new Dataset, containing the\n number of cases in each cell in x.\n drop_bad : bool\n Drop bad items: silently drop any items for which compression\n raises an error. This concerns primarily factors with non-unique\n values for cells in x (if drop_bad is False, an error is raised\n when such a Factor is encountered)\n drop : sequence of str\n Additional data-objects to drop.\n equal_count : bool\n Make sure the same number of rows go into each average. First, the\n cell with the smallest number of rows is determined. Then, for each\n cell, rows beyond that number are dropped.\n never_drop : sequence of str\n If the drop_bad=True setting would lead to dropping a variable\n whose name is in never_drop, raise an error instead.\n\n Notes\n -----\n Handle mne Epoch objects by creating a list with an mne Evoked object\n for each cell.\n \"\"\"\n if not drop_empty:\n raise NotImplementedError('drop_empty = False')\n\n if x:\n if equal_count:\n self = self.equalize_counts(x)\n x = ascategorial(x, ds=self)\n else:\n x = Factor('a' * self.n_cases)\n\n ds = Dataset(name=name.format(name=self.name), info=self.info)\n\n if count:\n ds[count] = Var(filter(None, (np.sum(x == cell) for cell in x.cells)))\n\n for k, v in self.items():\n if k in drop:\n continue\n try:\n if hasattr(v, 'aggregate'):\n ds[k] = v.aggregate(x)\n elif isinstance(v, MNE_EPOCHS):\n evokeds = []\n for cell in x.cells:\n idx = (x == cell)\n if idx.sum():\n evokeds.append(v[idx].average())\n ds[k] = evokeds\n else:\n err = (\"Unsupported value type: %s\" % type(v))\n raise TypeError(err)\n except:\n if drop_bad and k not in never_drop:\n pass\n else:\n raise\n\n return ds\n\n def copy(self, name=None):\n \"\"\"Create a shallow copy of the dataset\n\n Parameters\n ----------\n name : str\n Name for the new dataset (default is ``self.name``).\n \"\"\"\n return Dataset(self.items(), name or self.name, self._caption,\n self.info, self.n_cases)\n\n def equalize_counts(self, x, n=None):\n \"\"\"Create a copy of the Dataset with equal counts in each cell of x\n\n Parameters\n ----------\n x : categorial\n Model which defines the cells in which to equalize the counts.\n n : int\n Number of cases per cell (the default is the maximum possible, i.e.\n the number of cases in the cell with the least number of cases).\n Negative numbers to subtract from maximum possible.\n\n Returns\n -------\n equalized_ds : Dataset\n Dataset with equal number of cases in each cell of x.\n\n Notes\n -----\n First, the cell with the smallest number of rows is determined (empty\n cells are ignored). Then, for each cell, rows beyond that number are\n dropped.\n \"\"\"\n x = ascategorial(x, ds=self)\n self._check_n_cases(x, empty_ok=False)\n indexes = np.array([x == cell for cell in x.cells])\n n_by_cell = indexes.sum(1)\n n_max = np.setdiff1d(n_by_cell, [0]).min()\n if n is None:\n n_ = n_max\n elif n < 0:\n n_ = n_max + n\n else:\n n_ = n\n\n if n_ < 0 or n_ > n_max:\n raise ValueError(\"Invalid value n=%i; the maximum numer of cases \"\n \"per cell is %i\" % (n, n_max))\n\n for index in indexes:\n np.logical_and(index, index.cumsum() <= n_, index)\n index = indexes.any(0)\n return self[index]\n\n def head(self, n=10):\n \"Table with the first n cases in the Dataset\"\n return self.as_table(n, '%.5g', midrule=True, lfmt=True)\n\n def index(self, name='index', start=0):\n \"\"\"Add an index to the Dataset (i.e., ``range(n_cases)``)\n\n Parameters\n ----------\n name : str\n Name of the new index variable.\n start : int\n Number at which to start the index.\n \"\"\"\n if not isinstance(name, str):\n raise TypeError(\"name=%r\" % (name,))\n self[name] = Var(np.arange(start, self.n_cases + start))\n\n def itercases(self, start=None, stop=None):\n \"Iterate through cases (each case represented as a dict)\"\n if start is None:\n start = 0\n\n if stop is None:\n stop = self.n_cases\n elif stop < 0:\n stop = self.n_cases - stop\n\n for i in range(start, stop):\n yield self.get_case(i)\n\n @property\n def n_items(self):\n return super(Dataset, self).__len__()\n\n def rename(self, old, new):\n \"\"\"Shortcut to rename a data-object in the Dataset.\n\n Parameters\n ----------\n old : str\n Current name of the data-object.\n new : str\n New name for the data-object.\n \"\"\"\n if old not in self:\n raise KeyError(\"No item named %r\" % old)\n if new in self:\n raise ValueError(\"Dataset already has variable named %r\" % new)\n assert_is_legal_dataset_key(new)\n self[new] = self.pop(old)\n\n def repeat(self, repeats, name='{name}'):\n \"\"\"\n Return a new Dataset with each row repeated ``n`` times.\n\n Parameters\n ----------\n repeats : int | array of int\n Number of repeats, either a constant or a different number for each\n element.\n name : str\n Name for the new Dataset.\n \"\"\"\n if self.n_cases is None:\n raise RuntimeError(\"Can't repeat Dataset with unspecified n_cases\")\n\n if isinstance(repeats, Integral):\n n_cases = self.n_cases * repeats\n else:\n n_cases = sum(repeats)\n\n return Dataset(((k, v.repeat(repeats)) for k, v in self.items()),\n name.format(name=self.name), self._caption, self.info,\n n_cases)\n\n @property\n def shape(self):\n return (self.n_cases, self.n_items)\n\n def sort(self, order, descending=False):\n \"\"\"Sort the Dataset in place.\n\n Parameters\n ----------\n order : str | data-object\n Data object (Var, Factor or interactions) according to whose values\n to sort the Dataset, or its name in the Dataset.\n descending : bool\n Sort in descending instead of an ascending order.\n\n See Also\n --------\n .sort_index : Create an index that could be used to sort the Dataset\n .sorted : Create a sorted copy of the Dataset\n \"\"\"\n idx = self.sort_index(order, descending)\n for k in self:\n self[k] = self[k][idx]\n\n def sort_index(self, order, descending=False):\n \"\"\"Create an index that could be used to sort the Dataset.\n\n Parameters\n ----------\n order : str | data-object\n Data object (Var, Factor or interactions) according to whose values\n to sort the Dataset, or its name in the Dataset.\n descending : bool\n Sort in descending instead of an ascending order.\n\n See Also\n --------\n .sort : sort the Dataset in place\n .sorted : Create a sorted copy of the Dataset\n \"\"\"\n if isinstance(order, str):\n order = self.eval(order)\n\n if not len(order) == self.n_cases:\n err = (\"Order must be of same length as Dataset; got length \"\n \"%i.\" % len(order))\n raise ValueError(err)\n\n return order.sort_index(descending=descending)\n\n def save(self):\n \"\"\"Shortcut to save the Dataset, will display a system file dialog\n\n Notes\n -----\n Use specific save methods for more options.\n\n See Also\n --------\n .save_pickled : Pickle the Dataset\n .save_txt : Save as text file\n .save_tex : Save as teX table\n .as_table : Create a table with more control over formatting\n \"\"\"\n title = \"Save Dataset\"\n if self.name:\n title += ' %s' % self.name\n msg = \"\"\n filetypes = [_pickled_ds_wildcard, _tsv_wildcard, _tex_wildcard]\n path = ui.ask_saveas(title, msg, filetypes, defaultFile=self.name)\n _, ext = os.path.splitext(path)\n if ext == '.pickled':\n self.save_pickled(path)\n elif ext == '.txt':\n self.save_txt(path)\n elif ext == '.tex':\n self.save_tex(path)\n else:\n err = (\"Unrecognized extension: %r. Needs to be .pickled, .txt or \"\n \".tex.\" % ext)\n raise ValueError(err)\n\n def save_rtf(self, path=None, fmt='%.3g'):\n \"\"\"Save the Dataset as TeX table.\n\n Parameters\n ----------\n path : None | str\n Target file name (if ``None`` is supplied, a save file dialog is\n displayed). If no extension is specified, '.tex' is appended.\n fmt : format string\n Formatting for scalar values.\n \"\"\"\n table = self.as_table(fmt=fmt)\n table.save_rtf(path)\n\n def save_tex(self, path=None, fmt='%.3g', header=True, midrule=True):\n \"\"\"Save the Dataset as TeX table.\n\n Parameters\n ----------\n path : None | str\n Target file name (if ``None`` is supplied, a save file dialog is\n displayed). If no extension is specified, '.tex' is appended.\n fmt : format string\n Formatting for scalar values.\n header : bool\n Include the varibale names as a header row.\n midrule : bool\n print a midrule after table header.\n \"\"\"\n if not isinstance(path, str):\n title = \"Save Dataset\"\n if self.name:\n title += ' %s' % self.name\n title += \" as TeX Table\"\n msg = \"\"\n path = ui.ask_saveas(title, msg, [_tex_wildcard],\n defaultFile=self.name)\n\n _, ext = os.path.splitext(path)\n if not ext:\n path += '.tex'\n\n table = self.as_table(fmt=fmt, header=header, midrule=midrule)\n table.save_tex(path)\n\n def save_txt(self, path=None, fmt='%s', delim='\\t', header=True):\n \"\"\"Save the Dataset as text file.\n\n Parameters\n ----------\n path : str\n Target file name (by default, a Save As dialog is displayed). If\n ``path`` is missing an extension, ``'.txt'`` is appended.\n fmt : format string\n Formatting for scalar values.\n delim : str\n Column delimiter (default is tab).\n header : bool\n write the variables' names in the first line\n \"\"\"\n if path is None:\n path = ui.ask_saveas(f\"Save {self.name or 'Dataset'} as Text\", \"\",\n [_tsv_wildcard], defaultFile=self.name)\n path = Path(path)\n if not path.suffix:\n path = path.with_suffix('.txt')\n\n table = self.as_table(fmt=fmt, header=header)\n table.save_tsv(path, fmt=fmt, delimiter=delim)\n\n def save_pickled(self, path=None):\n \"\"\"Pickle the Dataset.\n\n Parameters\n ----------\n path : None | str\n Target file name (if ``None`` is supplied, a save file dialog is\n displayed). If no extension is specified, '.pickled' is appended.\n \"\"\"\n if not isinstance(path, str):\n title = \"Pickle Dataset\"\n if self.name:\n title += ' %s' % self.name\n msg = \"\"\n path = ui.ask_saveas(title, msg, [_pickled_ds_wildcard],\n defaultFile=self.name)\n\n _, ext = os.path.splitext(path)\n if not ext:\n path += '.pickled'\n\n with open(path, 'wb') as fid:\n pickle.dump(self, fid, pickle.HIGHEST_PROTOCOL)\n\n def sorted(self, order, descending=False):\n \"\"\"Create an sorted copy of the Dataset.\n\n Parameters\n ----------\n order : str | data-object\n Data object (Var, Factor or interactions) according to whose values\n to sort the Dataset, or its name in the Dataset.\n descending : bool\n Sort in descending instead of an ascending order.\n\n See Also\n --------\n .sort : sort the Dataset in place\n .sort_index : Create an index that could be used to sort the Dataset\n \"\"\"\n idx = self.sort_index(order, descending)\n return self[idx]\n\n def sub(self, index=None, keys=None, name=None):\n \"\"\"Access a subset of the data in the Dataset.\n\n Parameters\n ----------\n index : int | array | str\n Index for selecting a subset of cases. Can be an valid numpy index\n or a string (the name of a variable in Dataset, or an expression\n to be evaluated in the Dataset's namespace).\n keys : sequence of str | str\n Only include items with those keys (default all items). Use a\n :class:`str` to retrieve a single item directly.\n name : str\n name for the new Dataset.\n\n Returns\n -------\n data : Dataset | data_object\n Either the :class:`Dataset` with cases restricted to ``index``, or,\n if ``key`` is a :class:`str`, a single item restricted to ``index``.\n\n Notes\n -----\n Index is passed on to numpy objects, which means that advanced indexing\n always returns a copy of the data, whereas basic slicing (using slices)\n returns a view.\n \"\"\"\n if index is None:\n if keys is None:\n return self.copy(name)\n elif isinstance(keys, str):\n return OrderedDict.__getitem__(self, keys)\n else:\n items = ((k, OrderedDict.__getitem__(self, k)) for k in keys)\n elif isinstance(index, Integral):\n if keys is None:\n return self.get_case(index)\n elif isinstance(keys, str):\n return OrderedDict.__getitem__(self, keys)[index]\n else:\n return {k: OrderedDict.__getitem__(self, k)[index] for k in keys}\n else:\n if isinstance(index, str):\n index = self.eval(index)\n if keys is None:\n keys = self.keys()\n elif isinstance(keys, str):\n return OrderedDict.__getitem__(self, keys)[index]\n items = ((k, OrderedDict.__getitem__(self, k)[index]) for k in keys)\n\n return Dataset(items, name or self.name, self._caption, self.info)\n\n def tail(self, n=10):\n \"Table with the last n cases in the Dataset\"\n return self.as_table(range(-n, 0), '%.5g', midrule=True, lfmt=True)\n\n def tile(self, repeats, name=None):\n \"\"\"Concatenate ``repeats`` copies of the dataset\n\n Parameters\n ----------\n repeats : int\n Number of repeats.\n name : str\n Name for the new dataset (default is ``self.name``).\n \"\"\"\n return Dataset(\n ((name, item.tile(repeats)) for name, item in self.items()),\n name or self.name, self._caption, self.info, self.n_cases * repeats)\n\n def to_r(self, name=None):\n \"\"\"Place the Dataset into R as dataframe using rpy2\n\n Parameters\n ----------\n name : str\n Name for the R dataframe (default is self.name).\n\n Examples\n --------\n >>> from rpy2.robjects import r\n >>> ds = datasets.get_uv()\n >>> print(ds[:6])\n A B rm intvar fltvar fltvar2 index\n -----------------------------------------------------\n a1 b1 s000 13 0.25614 0.7428 True\n a1 b1 s001 8 -1.5174 -0.75498 True\n a1 b1 s002 11 -0.5071 -0.13828 True\n a1 b1 s003 11 2.1491 -2.1249 True\n a1 b1 s004 15 -0.19358 -1.03 True\n a1 b1 s005 17 2.141 -0.51745 True\n >>> ds.to_r('df')\n >>> print(r(\"head(df)\"))\n A B rm intvar fltvar fltvar2 index\n 1 a1 b1 s000 13 0.2561439 0.7427957 TRUE\n 2 a1 b1 s001 8 -1.5174371 -0.7549815 TRUE\n 3 a1 b1 s002 11 -0.5070960 -0.1382827 TRUE\n 4 a1 b1 s003 11 2.1490761 -2.1249203 TRUE\n 5 a1 b1 s004 15 -0.1935783 -1.0300188 TRUE\n 6 a1 b1 s005 17 2.1410424 -0.5174519 TRUE\n\n \"\"\"\n import rpy2.robjects as ro\n\n if name is None:\n name = self.name\n if name is None:\n raise TypeError('Need a valid name for the R data frame')\n\n items = OrderedDict()\n for k, v in self.items():\n if isinstance(v, Var):\n if v.x.dtype.kind == 'b':\n item = ro.BoolVector(v.x)\n elif v.x.dtype.kind == 'i':\n item = ro.IntVector(v.x)\n else:\n item = ro.FloatVector(v.x)\n elif isinstance(v, Factor):\n x = ro.IntVector(v.x)\n codes = sorted(v._labels)\n levels = ro.IntVector(codes)\n labels = ro.StrVector(tuple(v._labels[c] for c in codes))\n item = ro.FactorVector(x, levels, labels)\n else:\n continue\n items[k] = item\n\n df = ro.DataFrame(items)\n ro.globalenv[name] = df\n\n def update(self, ds, replace=False, info=True):\n \"\"\"Update the Dataset with all variables in ``ds``.\n\n Parameters\n ----------\n ds : Dataset | dict\n A Dataset or other dictionary-like object whose keys are strings\n and whose values are data-objects.\n replace : bool\n If a variable in ds is already present, replace it. If False,\n duplicates raise a ValueError (unless they are equivalent).\n info : bool\n Also update the info dictionary.\n\n Notes\n -----\n By default, if a key is present in both Datasets, and the corresponding\n variables are not equal on all cases, a ValueError is raised. If all\n values are equal, the variable in ds is copied into the Dataset that is\n being updated (the expected behavior of .update()).\n \"\"\"\n if isinstance(ds, Dataset):\n if ds.n_cases != self.n_cases:\n raise ValueError(\"Trying to update dataset with %i cases from \"\n \"dataset with %i cases\" % (self.n_cases, ds.n_cases))\n\n if not replace:\n unequal = {}\n for key in set(self).intersection(ds):\n own = self[key]\n other = ds[key]\n if len(own) != len(other):\n unequal[key] = 'unequal length'\n elif not np.all(own == other):\n unequal[key] = \"unequal values\"\n if unequal:\n raise ValueError(\n \"The following variables are present twice but are not \"\n \"equal: %s\" % ', '.join('%r (%s)' % item for item in unequal.items()))\n\n super(Dataset, self).update(ds)\n\n if info and isinstance(ds, Dataset):\n self.info.update(ds.info)\n\n def zip(self, *variables):\n \"\"\"Iterate through the values of multiple variables\n\n ``ds.zip('a', 'b')`` is equivalent to ``zip(ds['a'], ds['b'])``.\n \"\"\"\n return zip(*map(self.eval, variables))\n\n\nclass Interaction(_Effect):\n \"\"\"Represents an Interaction effect.\n\n Usually not initialized directly but through operations on Factors/Vars.\n\n Parameters\n ----------\n base : sequence\n List of data-objects that form the basis of the interaction.\n\n Attributes\n ----------\n base : list\n All effects.\n \"\"\"\n def __init__(self, base):\n base_ = EffectList()\n n_vars = 0\n\n for b in base:\n if isuv(b):\n base_.append(b.copy())\n n_vars += isinstance(b, Var)\n elif isinstance(b, Interaction):\n base_.extend(b.base)\n n_vars += not b.is_categorial\n elif isinstance(b, NestedEffect):\n base_.append(b)\n else:\n raise TypeError(\"Invalid type for Interaction: %r\" % type(b))\n\n if n_vars > 1:\n raise TypeError(\"No Interaction between two Var objects\")\n\n if len(base_) < 2:\n raise ValueError(\"Interaction needs a base of at least two Factors \"\n \"(got %s)\" % repr(base))\n N = len(base_[0])\n if not all(len(f) == N for f in base_[1:]):\n raise ValueError(\"Interactions only between effects with the same \"\n \"number of cases\")\n self.__setstate__({'base': base_, 'is_categorial': not bool(n_vars)})\n\n def __setstate__(self, state):\n self.base = state['base']\n self.is_categorial = state['is_categorial']\n # secondary attributes\n self._n_cases = len(self.base[0])\n self.nestedin = EffectList()\n for e in self.base:\n if (isinstance(e, NestedEffect) and\n not any(np.all(e.nestedin == ne) for ne in self.nestedin)):\n self.nestedin.append(e.nestedin)\n self.base_names = [str(f.name) for f in self.base]\n self.name = ' x '.join(self.base_names)\n self.random = False\n self.df = reduce(operator.mul, [f.df for f in self.base])\n # cells\n factors = EffectList(e for e in self.base if\n isinstance(e, (Factor, NestedEffect)))\n self.cells = tuple(itertools.product(*(f.cells for f in factors)))\n self.cell_header = tuple(f.name for f in factors)\n # TODO: beta-labels\n self.beta_labels = ['?'] * self.df\n\n def __getstate__(self):\n return {'base': self.base, 'is_categorial': self.is_categorial}\n\n def __repr__(self):\n names = [UNNAMED if f.name is None else f.name for f in self.base]\n if preferences['short_repr']:\n return ' % '.join(names)\n else:\n return \"Interaction({n})\".format(n=', '.join(names))\n\n # container ---\n def __len__(self):\n return self._n_cases\n\n def __getitem__(self, index):\n if isinstance(index, Var):\n index = index.x\n\n out = tuple(f[index] for f in self.base)\n\n if index_ndim(index) == 1:\n return Interaction(out)\n else:\n return out\n\n def __contains__(self, item):\n if isinstance(item, tuple):\n return item in self._value_set\n return self.base.__contains__(item)\n\n def __iter__(self):\n for i in range(len(self)):\n yield tuple(b[i] for b in self.base)\n\n # numeric ---\n def __eq__(self, other):\n if isinstance(other, Interaction) and len(other.base) == len(self.base):\n x = np.vstack((b == bo for b, bo in zip(self.base, other.base)))\n return np.all(x, 0)\n elif isinstance(other, tuple) and len(other) == len(self.base):\n x = np.vstack(factor == level for factor, level in zip(self.base, other))\n return np.all(x, 0)\n else:\n return np.zeros(len(self), bool)\n\n def __ne__(self, other):\n if isinstance(other, Interaction) and len(other.base) == len(self.base):\n x = np.vstack((b != bo for b, bo in zip(self.base, other.base)))\n return np.any(x, 0)\n elif isinstance(other, tuple) and len(other) == len(self.base):\n x = np.vstack(factor != level for factor, level in zip(self.base, other))\n return np.any(x, 0)\n return np.ones(len(self), bool)\n\n def as_factor(self, delim=' ', name=None):\n \"\"\"Convert the Interaction to a factor\n\n Parameters\n ----------\n delim : str\n Delimiter to join factor cell values (default ``\" \"``).\n name : str\n Name for the Factor (default is None).\n\n Examples\n --------\n >>> print(ds[::20, 'A'])\n Factor(['a1', 'a1', 'a2', 'a2'], name='A')\n >>> print(ds[::20, 'B'])\n Factor(['b1', 'b2', 'b1', 'b2'], name='B')\n >>> i = ds.eval(\"A % B\")\n >>> print(i.as_factor()[::20])\n Factor(['a1 b1', 'a1 b2', 'a2 b1', 'a2 b2'], name='AxB')\n >>> print(i.as_factor(\"_\")[::20])\n Factor(['a1_b1', 'a1_b2', 'a2_b1', 'a2_b2'], name='AxB')\n \"\"\"\n return Factor(self.as_labels(delim), name)\n\n def as_cells(self):\n \"\"\"All values as a list of tuples.\"\"\"\n return [case for case in self]\n\n @LazyProperty\n def as_dummy(self):\n codelist = [f.as_dummy for f in self.base]\n return reduce(_effect_interaction, codelist)\n\n @LazyProperty\n def as_effects(self): # Effect coding\n codelist = [f.as_effects for f in self.base]\n return reduce(_effect_interaction, codelist)\n\n def _coefficient_names(self, method):\n if self.df == 1:\n return [self.name]\n return [\"%s %i\" % (self.name, i) for i in range(self.df)]\n\n def as_labels(self, delim=' '):\n \"\"\"All values as a list of strings.\n\n Parameters\n ----------\n delim : str\n Delimiter with which to join the elements of cells.\n \"\"\"\n return [delim.join(filter(None, map(str, case))) for case in self]\n\n def aggregate(self, x):\n return Interaction(f.aggregate(x) for f in self.base)\n\n def isin(self, cells):\n \"\"\"An index that is true where the Interaction equals any of the cells.\n\n Parameters\n ----------\n cells : sequence of tuples\n Cells for which the index will be true. Cells described as tuples\n of strings.\n \"\"\"\n is_v = [self == cell for cell in cells]\n return np.any(is_v, 0)\n\n @LazyProperty\n def _value_set(self):\n return set(self)\n\n\ndef box_cox_transform(x, p, name=None):\n \"\"\"The Box-Cox transform of x as :class:`Var`\n\n With ``p=0``, this is the log of x; otherwise ``(x**p - 1) / p``\n\n Parameters\n ----------\n x : Var\n Source data.\n p : scalar\n Parameter for Box-Cox transform.\n name : str\n Name for the output Var.\n \"\"\"\n if isinstance(x, Var):\n x = x.x\n\n if p == 0:\n y = np.log(x)\n else:\n y = (x ** p - 1) / p\n\n return Var(y, name)\n\n\nclass NestedEffect(_Effect):\n\n def __init__(self, effect, nestedin):\n if not isinstance(nestedin, (Factor, Interaction)):\n raise TypeError(\"Nested in %r; Effects can only be nested in Factor \"\n \"or Interaction\" % (dataobj_repr(nestedin),))\n elif not iscategorial(nestedin):\n raise ValueError(\"Effects can only be nested in categorial base\")\n\n self.effect = effect\n self.nestedin = nestedin\n self.random = effect.random\n self.cells = effect.cells\n self._n_cases = len(effect)\n\n if isinstance(self.effect, Factor):\n e_name = self.effect.name\n else:\n e_name = '(%s)' % self.effect\n self.name = \"%s(%s)\" % (e_name, nestedin.name)\n\n if len(nestedin) != self._n_cases:\n err = (\"Unequal lengths: effect %r len=%i, nestedin %r len=%i\" %\n (e_name, len(effect), nestedin.name, len(nestedin)))\n raise ValueError(err)\n\n def __repr__(self):\n return self.name\n\n def __iter__(self):\n return self.effect.__iter__()\n\n def __len__(self):\n return self._n_cases\n\n def __eq__(self, other):\n return self.effect == other\n\n def __getitem__(self, index):\n if isinstance(index, Integral):\n return self.effect[index]\n return NestedEffect(self.effect[index], self.nestedin[index])\n\n @property\n def df(self):\n return len(self.effect.cells) - len(self.nestedin.cells)\n\n @property\n def as_effects(self):\n \"Effect codes\"\n codes = np.zeros((self._n_cases, self.df))\n ix = 0\n for outer_cell in self.nestedin.cells:\n outer_idx = (self.nestedin == outer_cell)\n inner_model = self.effect[outer_idx]\n n = len(inner_model.cells)\n inner_codes = _effect_eye(n)\n for i, cell in enumerate(inner_model.cells):\n codes[self.effect == cell, ix:ix + n - 1] = inner_codes[i]\n ix += n - 1\n\n return codes\n\n def _coefficient_names(self, method):\n return [\"%s %i\" % (self.name, i) for i in range(self.df)]\n\n\nclass NonbasicEffect(object):\n\n def __init__(self, effect_codes, factors, name, nestedin=[],\n beta_labels=None):\n if beta_labels is not None and len(beta_labels) != effect_codes.shape[1]:\n raise ValueError(\"beta_labels need one entry per model column \"\n \"(%s); got %s\"\n % (effect_codes.shape[1], repr(beta_labels)))\n self.nestedin = nestedin\n self.name = name\n self.random = False\n self.as_effects = effect_codes\n self._n_cases, self.df = effect_codes.shape\n self.factors = factors\n self.beta_labels = beta_labels\n\n def __repr__(self):\n txt = \"\"\n return txt.format(n=self.name)\n\n # container ---\n def __len__(self):\n return self._n_cases\n\n def _coefficient_names(self, method):\n if self.beta_labels is None:\n return [\"%s %i\" % (self.name, i) for i in range(self.df)]\n else:\n return self.beta_labels\n\n\nclass Model(object):\n \"\"\"A list of effects.\n\n Parameters\n ----------\n x : effect | iterator of effects\n Effects to be included in the model (Var, Factor, Interaction ,\n ...). Can also contain models, in which case all the model's\n effects will be added.\n\n Attributes\n ----------\n effects : list\n Effects included in the model (:class:`Var`, :class:`Factor`, etc. \n objects)\n \"\"\"\n def __init__(self, x):\n effects = EffectList()\n\n # find effects in input\n if iseffect(x):\n effects.append(x)\n n_cases = len(x)\n elif isinstance(x, Model):\n effects += x.effects\n n_cases = len(x)\n else:\n n_cases = None\n for e in x:\n # check n_cases\n if n_cases is None:\n n_cases = len(e)\n elif len(e) != n_cases:\n e0 = effects[0]\n raise ValueError(\n \"All effects contained in a Model need to describe the \"\n \"same number of cases. %s has %i cases, %s has %i \"\n \"cases.\" %\n (dataobj_repr(e0), len(e0), dataobj_repr(e), len(e)))\n\n # find effects\n if iseffect(e):\n effects.append(e)\n elif isinstance(e, Model):\n effects += e.effects\n else:\n raise TypeError(\n \"Model needs to be initialized with effect (Var, \"\n \"Factor, Interaction, ...) and/or Model objects (got \"\n \"%s)\" % type(e))\n\n # check dfs\n df = sum(e.df for e in effects) + 1 # intercept\n if df > n_cases:\n raise ValueError(\n \"Model overspecified (%i cases for %i model df)\" %\n (n_cases, df))\n\n # beta indices\n for e in effects:\n if isinstance(e, Factor) and len(e.cells) == 1:\n raise ValueError(\"The Factor %s has only one level (%s). The \"\n \"intercept is implicit in each model and \"\n \"should not be specified explicitly.\"\n % (dataobj_repr(e), e.cells[0]))\n\n self.effects = effects\n self.df = df\n self._init_secondary()\n\n def _init_secondary(self):\n self.df_total = len(self.effects[0])\n self.df_error = self.df_total - self.df\n self.name = ' + '.join([str(e.name) for e in self.effects])\n\n def __setstate__(self, state):\n self.effects = state['effects']\n self.df = sum(e.df for e in self.effects) + 1 # intercept\n self._init_secondary()\n\n def __getstate__(self):\n return {'effects': self.effects}\n\n def __repr__(self):\n names = self.effects.names()\n if preferences['short_repr']:\n return ' + '.join(names)\n else:\n x = ', '.join(names)\n return \"Model((%s))\" % x\n\n def __str__(self):\n return str(self.as_table())\n\n def info(self):\n \"\"\"A :class:`fmtxt.Table` with information about the model\"\"\"\n table = fmtxt.Table('rl')\n table.cells('Df', 'Term')\n table.midrule()\n for e in self.effects:\n table.cells(e.df, e.name, )\n if self.df_error:\n table.midrule()\n table.cells(self.df_error, 'Residuals')\n return table\n\n # container ---\n def __len__(self):\n return self.df_total\n\n def __getitem__(self, sub):\n if isinstance(sub, str):\n for e in self.effects:\n if e.name == sub:\n return e\n raise KeyError(sub)\n elif isinstance(sub, INT_TYPES):\n return tuple(e[sub] for e in self.effects)\n else:\n return Model((x[sub] for x in self.effects))\n\n def __contains__(self, effect):\n return id(effect) in map(id, self.effects)\n\n def sorted(self):\n \"\"\"Sorted copy of the Model, interactions last\"\"\"\n out = []\n i = 1\n while len(out) < len(self.effects):\n for e in self.effects:\n if len(e.factors) == i:\n out.append(e)\n i += 1\n return Model(out)\n\n # numeric ---\n def __add__(self, other):\n return Model((self, other))\n\n def __mul__(self, other):\n return Model((self, other, self % other))\n\n def __mod__(self, other):\n out = []\n for e_self in self.effects:\n for e_other in Model(other).effects:\n if isinstance(e_self, Var) and isinstance(e_other, Var):\n out.append(e_self * e_other)\n elif not partially_nested(e_self, e_other):\n out.append(e_self % e_other)\n return Model(out)\n\n def __eq__(self, other):\n if not isinstance(other, Model):\n return False\n elif not len(self) == len(other):\n return False\n elif not len(self.effects) == len(other.effects):\n return False\n\n for e, eo in zip(self.effects, other.effects):\n if not np.all(e == eo):\n return False\n return True\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n # repr ---\n @property\n def model_eq(self):\n return self.name\n\n # coding ---\n @LazyProperty\n def _effect_to_beta(self):\n \"\"\"An array indicating for each effect which beta weights it occupies\n\n Returns\n -------\n effects_to_beta : np.ndarray (n_effects, 2)\n For each effect, indicating the first index in betas and df\n \"\"\"\n out = np.empty((len(self.effects), 2), np.intp)\n beta_start = 1\n for i, e in enumerate(self.effects):\n out[i, 0] = beta_start\n out[i, 1] = e.df\n beta_start += e.df\n return out\n\n def as_table(self, method='dummy', cases=0, group_terms=True):\n \"\"\"Return a table with the model codes\n\n Parameters\n ----------\n method : 'effect' | 'dummy'\n Coding scheme: effect coding or dummy coding.\n cases : int | iterator of int\n Cases to include (int includes that many cases from the beginning,\n 0 includes all; negative number works like negative indexing).\n group_terms : bool\n Group model columns that represent the same effect under one\n heading.\n\n Returns\n --------\n table : FMText Table\n The full model as a table.\n \"\"\"\n cases = cases_arg(cases, self.df_total)\n p = self._parametrize(method)\n table = fmtxt.Table('l' * len(p.column_names))\n\n # Header\n if group_terms:\n for term in p.effect_names:\n index = p.terms[term]\n ncolumns = index.stop - index.start\n table.cell(term, width=ncolumns)\n else:\n for term in p.column_names:\n table.cell(term)\n table.midrule()\n\n # data\n for case in cases:\n for i in p.x[case]:\n table.cell('%g' % i)\n\n return table\n\n def head(self, n=10):\n \"Table with the first n cases in the Model\"\n return self.as_table(cases=n)\n\n # checking model properties\n def check(self, v=True):\n \"Shortcut to check linear independence and orthogonality\"\n return self.lin_indep(v) + self.orthogonal(v)\n\n def lin_indep(self, v=True):\n \"Check the Model for linear independence of its factors\"\n msg = []\n ne = len(self.effects)\n codes = [e.as_effects for e in self.effects]\n for i in range(ne):\n for j in range(i + 1, ne):\n e1 = self.effects[i]\n e2 = self.effects[j]\n x = np.hstack((codes[i], codes[j]))\n if rank(x) < x.shape[1]:\n if v:\n errtxt = \"Linear Dependence Warning: {0} and {1}\"\n msg.append(errtxt.format(e1.name, e2.name))\n return msg\n\n def orthogonal(self, v=True):\n \"Check the Model for orthogonality of its factors\"\n msg = []\n ne = len(self.effects)\n codes = [e.as_effects for e in self.effects]\n# allok = True\n for i in range(ne):\n for j in range(i + 1, ne):\n ok = True\n e1 = self.effects[i]\n e2 = self.effects[j]\n e1e = codes[i]\n e2e = codes[j]\n for i1 in range(e1.df):\n for i2 in range(e2.df):\n dotp = np.dot(e1e[:, i1], e2e[:, i2])\n if dotp != 0:\n ok = False\n# allok = False\n if v and (not ok):\n errtxt = \"Not orthogonal: {0} and {1}\"\n msg.append(errtxt.format(e1.name, e2.name))\n return msg\n\n def _parametrize(self, method='effect'):\n \"Create a design matrix\"\n return Parametrization(self, method)\n\n def _incomplete_error(self, caller):\n df_table = self.info()\n df_table[-1, 1] = 'Unexplained'\n return IncompleteModel(\n \"%s requires a fully specified model, but the model has only \"\n \"%i explained degrees of freedom for %i cases:\\n%s\" %\n (caller, self.df, self.df_total, df_table))\n\n def repeat(self, n):\n \"Repeat each row of the Model ``n`` times\"\n return Model(e.repeat(n) for e in self.effects)\n\n def tail(self, n=10):\n \"Table with the last n cases in the Model\"\n return self.as_table(cases=range(-n, 0))\n\n\nclass Parametrization(object):\n \"\"\"Parametrization of a statistical model\n\n Parameters\n ----------\n model : Model\n Model to be parametrized.\n method : 'effect' | 'dummy'\n Coding scheme: effect coding or dummy coding.\n\n Attributes\n ----------\n model : Model\n The model that is parametrized.\n x : array (n_cases, n_coeffs)\n Design matrix.\n terms : {str: slice}\n Location of each term in x.\n column_names : list of str\n Name of each column.\n\n Notes\n -----\n A :class:`Model` is a list of effects. A :class:`Parametrization` contains\n a realization of those effects in a model matrix with named columns.\n \"\"\"\n def __init__(self, model, method):\n model = asmodel(model)\n x = np.empty((model.df_total, model.df))\n x[:, 0] = 1\n column_names = ['intercept']\n effect_names = ['intercept']\n higher_level_effects = {}\n terms = {'intercept': slice(0, 1)}\n i = 1\n for e in model.effects:\n j = i + e.df\n if method == 'effect':\n x[:, i:j] = e.as_effects\n elif method == 'dummy':\n x[:, i:j] = e.as_dummy\n else:\n raise ValueError(\"method=%s\" % repr(method))\n name = longname(e)\n if name in terms:\n raise KeyError(\"Duplicate term name: %s\" % repr(name))\n terms[name] = slice(i, j)\n effect_names.append(name)\n col_names = e._coefficient_names(method)\n column_names.extend(col_names)\n for col, col_name in enumerate(col_names, i):\n terms[col_name] = slice(col, col + 1)\n i = j\n\n # find comparison models\n higher_level_effects[name] = [\n e_ for e_ in model.effects if\n e_ is not e and is_higher_order_effect(e_, e)\n ]\n\n # check model\n if np.linalg.matrix_rank(x) < x.shape[1]:\n raise ValueError(\"Model is rank deficient: %r\" % model)\n\n # model basics\n self.model = model\n self.method = method\n self.x = x\n self.terms = terms\n self.column_names = column_names\n self.effect_names = effect_names\n self._higher_level_effects = higher_level_effects\n\n # projector\n x_t = x.T\n self.g = inv(x_t.dot(x))\n self.projector = self.g.dot(x_t)\n\n def reduced_model_index(self, term):\n \"Boolean index into model columns for model comparison\"\n out = np.ones(self.x.shape[1], bool)\n out[self.terms[term]] = False\n for e in self._higher_level_effects[term]:\n out[self.terms[e.name]] = False\n return out\n\n\n# ---NDVar dimensions---\n\ndef _subgraph_edges(connectivity, int_index):\n \"Extract connectivity for a subset of a graph\"\n idx = np.logical_and(np.in1d(connectivity[:, 0], int_index),\n np.in1d(connectivity[:, 1], int_index))\n if np.any(idx):\n new_c = connectivity[idx]\n\n # remap to new vertex indices\n if np.any(np.diff(int_index) < 1): # non-monotonic index\n argsort = np.argsort(int_index)\n flat_conn_ = np.digitize(new_c.ravel(), int_index[argsort], True)\n flat_conn = argsort[flat_conn_]\n else:\n flat_conn = np.digitize(new_c.ravel(), int_index, True)\n\n return flat_conn.reshape(new_c.shape).astype(np.uint32)\n else:\n return np.empty((0, 2), dtype=np.uint32)\n\n\nclass Dimension(object):\n \"\"\"Base class for dimensions.\n \n Parameters\n ----------\n name : str\n Dimension name.\n connectivity : 'grid' | 'none' | array of int, (n_edges, 2)\n Connectivity between elements. Set to ``\"none\"`` for no connections or \n ``\"grid\"`` to use adjacency in the sequence of elements as connection. \n Set to :class:`numpy.ndarray` to specify custom connectivity. The array\n should be of shape (n_edges, 2), and each row should specify one \n connection [i, j] with i < j, with rows sorted in ascending order. If\n the array's dtype is uint32, property checks are disabled to improve \n efficiency.\n\n Attributes\n ----------\n x : array_like\n Numerical values (e.g. for locating categories on an axis).\n values : sequence\n Meaningful point descriptions (e.g. time points, sensor names, ...).\n \"\"\"\n _CONNECTIVITY_TYPES = ('grid', 'none', 'custom', 'vector')\n _axis_unit = None\n _default_connectivity = 'none' # for loading old pickles\n\n def __init__(self, name, connectivity):\n # requires __len__ to work\n self.name = name\n if isinstance(connectivity, str):\n self._connectivity = None\n else:\n if not (isinstance(connectivity, np.ndarray) and connectivity.dtype == np.uint32):\n connectivity = np.asarray(connectivity)\n if connectivity.dtype.kind != 'i':\n raise TypeError(\"connectivity needs to be integer type, got\"\n \"dtype=%r\" % (connectivity.dtype,))\n elif connectivity.shape != (len(connectivity), 2):\n raise ValueError(\"connectivity requires shape (n_edges, 2), \"\n \"got array with shape %s\" %\n (connectivity.shape,))\n elif connectivity.min() < 0:\n raise ValueError(\"connectivity can not have negative values\")\n elif connectivity.max() >= len(self):\n raise ValueError(\"connectivity can not have negative values\")\n elif np.any(connectivity[:, 0] >= connectivity[:, 1]):\n raise ValueError(\"All edges [i, j] must have i < j\")\n elif np.any(np.diff(connectivity, axis=0) > 0):\n edges = list(map(tuple, connectivity))\n edges.sort()\n connectivity = np.array(edges, np.uint32)\n else:\n connectivity = connectivity.astype(np.uint32)\n self._connectivity = connectivity\n connectivity = 'custom'\n\n if not isinstance(connectivity, str):\n raise TypeError(\"connectivity=%r\" % (connectivity,))\n elif connectivity not in self._CONNECTIVITY_TYPES:\n raise ValueError(\"connectivity=%r\" % (connectivity,))\n self._connectivity_type = connectivity\n\n def __getstate__(self):\n return {'name': self.name, 'connectivity': self._connectivity,\n 'connectivity_type': self._connectivity_type}\n\n def __setstate__(self, state):\n self.name = state['name']\n self._connectivity = state.get('connectivity', None)\n self._connectivity_type = state.get('connectivity_type', self._default_connectivity)\n\n def __len__(self):\n raise NotImplementedError\n\n def __eq__(self, other):\n if isinstance(other, str):\n return False\n return self.name == other.name\n\n def __ne__(self, other):\n return not self == other\n\n def __getitem__(self, index):\n \"\"\"Array-like Indexing\n\n Possible indexes:\n\n - int -> label or value for that location\n - [int] -> Dimension object with 1 location\n - [int, ...] -> Dimension object\n \"\"\"\n raise NotImplementedError\n\n def _bin(self, start, stop, step, nbins):\n \"Divide Dimension into bins\"\n raise NotImplementedError(\n \"Binning for %s dimension\" % self.__class__.__name__)\n\n def _as_scalar_array(self):\n raise TypeError(\"%s dimension %r has no scalar representation\" %\n (self.__class__.__name__, self.name))\n\n def _as_uv(self):\n return Var(self._axis_data(), name=self.name)\n\n def _axis_data(self):\n \"x for plot command\"\n return np.arange(len(self))\n\n def _axis_im_extent(self):\n \"Extent for im plots; needs to extend beyond end point locations\"\n return -0.5, len(self) - 0.5\n\n def _axis_format(self, scalar, label):\n \"\"\"Find axis decoration parameters for this dimension\n\n Parameters\n ----------\n scalar : bool\n If True, the axis is scalar and labels should correspond to the axis\n value. If False, the axis represents categorial bins (e.g.,\n im-plots).\n label : bool | str\n Label (if True, return an appropriate axis-specific label).\n\n Returns\n -------\n formatter : matplotlib Formatter\n Axis tick formatter.\n locator : matplotlib Locator\n Tick-locator.\n label : str | None\n Return the default axis label if label==True, otherwise the label\n argument.\n \"\"\"\n raise NotImplementedError\n\n def _axis_label(self, label):\n if label is True:\n if self._axis_unit:\n return \"%s [%s]\" % (self.name.capitalize(), self._axis_unit)\n else:\n return self.name.capitalize()\n\n def dimindex(self, arg):\n \"Convert a dimension index to an array index\"\n # backwards compatibility\n return self._array_index(arg)\n\n def _array_index(self, arg):\n \"\"\"Convert a dimension-semantic index to an array-like index\n\n Subclasses need to handle dimension-specific cases\n\n args that are handled by the Dimension baseclass:\n - None\n - boolean array\n - boolean NDVars\n args handled recursively:\n - list\n - tuple -> slice(*tuple)\n - Var -> Var.x\n \"\"\"\n if arg is None:\n return None # pass through None, for example for slice\n elif isinstance(arg, NDVar):\n return self._array_index_for_ndvar(arg)\n elif isinstance(arg, Var):\n return self._array_index(arg.x)\n elif isinstance(arg, np.ndarray):\n if arg.dtype.kind != 'b':\n raise TypeError(\"array of type %r not supported as index for \"\n \"%s\" % (arg.dtype.kind, self._dimname()))\n elif arg.ndim != 1:\n raise IndexError(\"Boolean index for %s needs to be 1d, got \"\n \"array of shape %s\" %\n (self._dimname(), arg.shape))\n elif len(arg) != len(self):\n raise IndexError(\n \"Got boolean index of length %i for %s of length %i\" %\n (len(arg), self._dimname(), len(self)))\n return arg\n elif isinstance(arg, tuple):\n if len(arg) > 3:\n raise ValueError(\"Tuple indexes signify intervals and need to \"\n \"be of length 1, 2 or 3 (got %r for %s)\" %\n (arg, self._dimname()))\n return self._array_index_for_slice(*arg)\n elif isinstance(arg, list):\n if len(arg) == 0:\n return np.empty(0, np.intp)\n return np.array([self._array_index(a) for a in arg])\n elif isinstance(arg, slice):\n return self._array_index_for_slice(arg.start, arg.stop, arg.step)\n else:\n raise TypeError(\"Unknown index type for %s: %r\" %\n (self._dimname(), arg))\n\n def _array_index_for_ndvar(self, arg):\n if arg.x.dtype.kind != 'b':\n raise IndexError(f\"{arg}: only NDVars with boolean data can serve as indexes\")\n elif arg.ndim != 1:\n raise IndexError(f\"{arg}: only NDVars with ndim == 1 can serve as indexes\")\n dim = arg.dims[0]\n if not isinstance(dim, self.__class__):\n raise IndexError(f\"{arg}: must have {self.__class__} dimension\")\n elif dim == self:\n return arg.x\n index_to_arg = self._array_index_to(dim)\n return index_to_arg[arg.x]\n\n def _array_index_for_slice(self, start=None, stop=None, step=None):\n if step is not None and not isinstance(step, Integral):\n raise TypeError(\"Slice index step for %s must be int, not %r\" %\n (self._dimname(), step))\n\n if start is None:\n start_ = None\n else:\n start_ = self._array_index(start)\n if not isinstance(start_, int):\n raise TypeError(\"%r is not an unambiguous slice start for %s\" %\n (start, self._dimname()))\n\n if stop is None:\n stop_ = None\n else:\n stop_ = self._array_index(stop)\n if not isinstance(stop_, int):\n raise TypeError(\"%r is not an unambiguous slice start for %s\" %\n (stop, self._dimname()))\n\n return slice(start_, stop_, step)\n\n def _array_index_to(self, other):\n \"Int index to access data from self in an order consistent with other\"\n raise NotImplementedError(f\"Internal alignment for {self.__class__}\")\n\n def _dimname(self):\n if self.name.lower() == self.__class__.__name__.lower():\n return self.__class__.__name__ + ' dimension'\n else:\n return '%s dimension (%r)' % (self.__class__.__name__, self.name)\n\n def _dim_index(self, arg):\n \"Convert an array index to a dimension index\"\n if isinstance(arg, slice):\n return slice(None if arg.start is None else self._dim_index(arg.start),\n None if arg.stop is None else self._dim_index(arg.stop),\n arg.step)\n elif np.isscalar(arg):\n return arg\n else:\n return [self._dim_index(i) for i in index_to_int_array(arg, len(self))]\n\n def _distances(self):\n \"Distance matrix for dimension elements\"\n raise NotImplementedError(\"Distances for %s\" % self.__class__.__name__)\n\n def intersect(self, dim, check_dims=True):\n \"\"\"Create a Dimension that is the intersection with dim\n\n Parameters\n ----------\n dim : Dimension\n Dimension to intersect with.\n check_dims : bool\n Check dimensions for consistency (not applicaple).\n\n Returns\n -------\n intersection : Dimension\n The intersection with dim (returns itself if dim and self are\n equal).\n \"\"\"\n raise NotImplementedError\n\n def _union(self, other):\n \"\"\"Create a Dimension that is the union with dim\n\n Parameters\n ----------\n other : Dimension\n Dimension to form union with.\n\n Returns\n -------\n union : Dimension\n The union with dim (returns itself if dim and self are\n equal).\n \"\"\"\n raise NotImplementedError\n\n def _cluster_properties(self, x):\n \"\"\"Find cluster properties for this dimension\n\n Parameters\n ----------\n x : array of bool, (n_clusters, len(self))\n The cluster extents, with different clusters stacked along the\n first axis.\n\n Returns\n -------\n cluster_properties : None | Dataset\n A dataset with variables describing cluster properties.\n \"\"\"\n return None\n\n def connectivity(self):\n \"\"\"Retrieve the dimension's connectivity graph\n\n Returns\n -------\n connectivity : array of int, (n_pairs, 2)\n array of sorted ``[src, dst]`` pairs, with all ``src < dst``.\n\n See Also\n --------\n .set_connectivity() : define the connectivity\n .neighbors() : Neighboring sensors for each sensor in a dictionary.\n \"\"\"\n if self._connectivity is None:\n self._connectivity = self._generate_connectivity()\n return self._connectivity\n\n def _generate_connectivity(self):\n raise NotImplementedError(\"Connectivity for %s dimension.\" % self.name)\n\n def _subgraph(self, index=None):\n \"\"\"Connectivity parameter for new Dimension instance\n\n Parameters\n ----------\n index : array_like\n Index if the new dimension is a subset of the current dimension.\n \"\"\"\n if self._connectivity_type == 'custom':\n if self._connectivity is None:\n return 'custom'\n elif index is None:\n return self._connectivity\n return _subgraph_edges(self._connectivity,\n index_to_int_array(index, len(self)))\n return self._connectivity_type\n\n\nclass Case(Dimension):\n \"\"\"Case dimension\n \n Parameters\n ----------\n n : int\n Number of cases.\n connectivity : 'grid' | 'none' | array of int, (n_edges, 2)\n Connectivity between elements. Set to ``\"none\"`` for no connections or \n ``\"grid\"`` to use adjacency in the sequence of elements as connection. \n Set to :class:`numpy.ndarray` to specify custom connectivity. The array\n should be of shape (n_edges, 2), and each row should specify one \n connection [i, j] with i < j, with rows sorted in ascending order. If\n the array's dtype is uint32, property checks are disabled to improve \n efficiency.\n \n Examples\n --------\n When initializing an :class:`NDVar`, the case dimension can be speciied\n with the bare class and the number of cases will be inferred from the data:\n \n >>> NDVar([[1, 2], [3, 4]], (Case, Categorial('column', ['1', '2'])))\n \n \"\"\"\n _DIMINDEX_RAW_TYPES = INT_TYPES + (slice, list)\n\n def __init__(self, n, connectivity='none'):\n Dimension.__init__(self, 'case', connectivity)\n self.n = int(n)\n\n def __getstate__(self):\n out = Dimension.__getstate__(self)\n out['n'] = self.n\n return out\n\n def __setstate__(self, state):\n Dimension.__setstate__(self, state)\n self.n = state['n']\n\n def __repr__(self):\n if self._connectivity_type == 'none':\n return \"Case(%i)\" % self.n\n elif self._connectivity_type == 'grid':\n return \"Case(%i, 'grid')\" % self.n\n else:\n return \"Case(%i, )\" % self.n\n\n def __len__(self):\n return self.n\n\n def __eq__(self, other):\n return isinstance(other, Case) and other.n == self.n\n\n def __getitem__(self, item):\n if isinstance(item, Integral):\n if item < 0:\n if item < -self.n:\n raise IndexError(item)\n item += self.n\n elif item > self.n:\n raise IndexError(item)\n return item\n else:\n return Case(index_length(item, self.n), self._subgraph(item))\n\n def __iter__(self):\n return iter(range(self.n))\n\n def _as_scalar_array(self):\n return np.arange(self.n)\n\n def _axis_format(self, scalar, label):\n if scalar:\n fmt = FormatStrFormatter('%i')\n else:\n fmt = IndexFormatter(np.arange(self.n))\n return (fmt,\n None if scalar else FixedLocator(np.arange(len(self)), 10),\n self._axis_label(label))\n\n def _array_index(self, arg):\n if isinstance(arg, self._DIMINDEX_RAW_TYPES):\n return arg\n elif isinstance(arg, Var) and arg.x.dtype.kind in 'bi':\n return arg.x\n elif isinstance(arg, np.ndarray) and arg.dtype.kind in 'bi':\n return arg\n elif isinstance(arg, tuple):\n return slice(*arg) if arg else FULL_SLICE\n else:\n raise TypeError(f\"Index {arg} of type {type(arg)} for Case dimension\")\n\n def _dim_index(self, arg):\n return arg\n\n\nclass Space(Dimension):\n \"\"\"Represent multiple directions in space\n\n Parameters\n ----------\n directions : str\n A sequence of directions, each indicated by a single capitalized\n character, from the following set: [A]nterior, [P]osterior, [L]eft,\n [R]ight, [S]uperior and [I]nferior.\n name : str\n Dimension name.\n\n Notes\n -----\n Connectivity is set to ``'none'``, but :class:`Space` is not a valid\n dimension to treat as mass-univariate.\n \"\"\"\n\n _DIRECTIONS = {\n 'A': 'anterior',\n 'P': 'posterior',\n 'L': 'left',\n 'R': 'right',\n 'S': 'superior',\n 'I': 'inferior',\n }\n\n def __init__(self, directions, name='space'):\n if not isinstance(directions, str):\n raise TypeError(\"directions=%r\" % (directions,))\n n = len(directions)\n all_directions = set(directions)\n if len(all_directions) != n:\n raise ValueError(\"directions=%r contains duplicate direction\"\n % (directions,))\n invalid = all_directions.difference(self._DIRECTIONS)\n if invalid:\n raise ValueError(\"directions=%r contains invalid directions: %s\"\n % (directions, ', '.join(map(repr, invalid))))\n Dimension.__init__(self, name, 'vector')\n self._directions = directions\n\n def __getstate__(self):\n out = Dimension.__getstate__(self)\n out['directions'] = self._directions\n return out\n\n def __setstate__(self, state):\n Dimension.__setstate__(self, state)\n self._directions = state['directions']\n\n def __repr__(self):\n return \"Space(%r)\" % self._directions\n\n def __len__(self):\n return len(self._directions)\n\n def __eq__(self, other):\n return isinstance(other, Space) and other._directions == self._directions\n\n def __getitem__(self, item):\n if not all(i in self._directions for i in item):\n raise IndexError(item)\n return Space(item)\n\n def __iter__(self):\n return iter(self._directions)\n\n def _axis_format(self, scalar, label):\n # like Categorial\n return (IndexFormatter(self._directions),\n FixedLocator(np.arange(len(self._directions))),\n self._axis_label(label))\n\n def _array_index(self, arg):\n if isinstance(arg, str) and len(arg) == 1:\n return self._directions.index(arg)\n elif isinstance(arg, tuple):\n return slice(*map(self._array_index, arg)) if arg else FULL_SLICE\n elif isinstance(arg, slice):\n return slice(\n None if arg.start is None else self._array_index(arg.start),\n None if arg.stop is None else self._array_index(arg.stop),\n arg.step)\n else:\n return [self._directions.index(s) for s in arg]\n\n def _dim_index(self, arg):\n if isinstance(arg, Integral):\n return self._directions[arg]\n else:\n return ''.join(self._directions[i] for i in arg)\n\n def intersect(self, dim, check_dims=True):\n \"\"\"Create a dimension object that is the intersection with dim\n\n Parameters\n ----------\n dim : Space\n Dimension to intersect with.\n check_dims : bool\n Check dimensions for consistency.\n\n Returns\n -------\n intersection : Space\n The intersection with ``dim`` (returns itself if ``dim`` and\n ``self`` are equal)\n \"\"\"\n if self.name != dim.name:\n raise DimensionMismatchError(\"Dimensions don't match\")\n elif self._directions == dim._directions:\n return self\n self_dirs = set(self._directions)\n dim_dirs = set(dim._directions)\n out_dirs = self_dirs.intersection(dim_dirs)\n if self_dirs == out_dirs:\n return self\n elif dim_dirs == out_dirs:\n return dim\n else:\n directions = ''.join(c for c in self._directions if c in dim._directions)\n return Space(directions, self.name)\n\n\nclass Categorial(Dimension):\n \"\"\"Simple categorial dimension\n\n Parameters\n ----------\n name : str\n Dimension name.\n values : sequence of str\n Names of the entries.\n connectivity : 'grid' | 'none' | array of int, (n_edges, 2)\n Connectivity between elements. Set to ``\"none\"`` for no connections or \n ``\"grid\"`` to use adjacency in the sequence of elements as connection. \n Set to :class:`numpy.ndarray` to specify custom connectivity. The array\n should be of shape (n_edges, 2), and each row should specify one \n connection [i, j] with i < j, with rows sorted in ascending order. If\n the array's dtype is uint32, property checks are disabled to improve \n efficiency.\n \"\"\"\n def __init__(self, name, values, connectivity='none'):\n self.values = tuple(values)\n if len(set(self.values)) < len(self.values):\n raise ValueError(\"Dimension can not have duplicate values\")\n if not all(isinstance(x, str) for x in self.values):\n raise ValueError(\"All Categorial values must be strings; got %r.\" %\n (self.values,))\n Dimension.__init__(self, name, connectivity)\n\n def __getstate__(self):\n out = Dimension.__getstate__(self)\n out['values'] = self.values\n return out\n\n def __setstate__(self, state):\n # backwards compatibility\n if 'connectivity' not in state:\n state['connectivity'] = None\n state['connectivity_type'] = 'none'\n self.values = state['values']\n if isinstance(self.values, np.ndarray):\n self.values = tuple(str(v) for v in self.values)\n # /backwards compatibility\n Dimension.__setstate__(self, state)\n\n def __repr__(self):\n args = (repr(self.name), str(self.values))\n return \"%s(%s)\" % (self.__class__.__name__, ', '.join(args))\n\n def __len__(self):\n return len(self.values)\n\n def __eq__(self, other):\n return Dimension.__eq__(self, other) and self.values == other.values\n\n def __getitem__(self, index):\n if isinstance(index, Integral):\n return self.values[index]\n else:\n return self.__class__(self.name,\n apply_numpy_index(self.values, index),\n self._subgraph(index))\n\n def _as_uv(self):\n return Factor(self.values, name=self.name)\n\n def _axis_format(self, scalar, label):\n return (IndexFormatter(self.values),\n FixedLocator(np.arange(len(self))),\n self._axis_label(label))\n\n def _array_index(self, arg):\n if isinstance(arg, str):\n if arg in self.values:\n return self.values.index(arg)\n else:\n raise IndexError(arg)\n elif isinstance(arg, self.__class__):\n return [self._array_index(v) for v in arg.values]\n else:\n return super(Categorial, self)._array_index(arg)\n\n def _dim_index(self, index):\n if isinstance(index, Integral):\n return self.values[index]\n else:\n return Dimension._dim_index(self, index)\n\n def intersect(self, dim, check_dims=False):\n \"\"\"Create a dimension object that is the intersection with dim\n\n Parameters\n ----------\n dim : type(self)\n Dimension to intersect with.\n check_dims : bool\n Check dimensions for consistency (not applicaple to this subclass).\n\n Returns\n -------\n intersection : type(self)\n The intersection with dim (returns itself if dim and self are\n equal)\n \"\"\"\n if self.name != dim.name:\n raise DimensionMismatchError(\"Dimensions don't match\")\n\n if self.values == dim.values:\n return self\n index = np.array([v in dim.values for v in self.values])\n if np.all(index):\n return self\n elif index.sum() == len(dim):\n return dim\n else:\n return self[index]\n\n\nclass Scalar(Dimension):\n \"\"\"Scalar dimension\n\n Parameters\n ----------\n name : str\n Name fo the dimension.\n values : array_like\n Scalar value for each sample of the dimension.\n unit : str (optional)\n Unit of the values.\n tick_format : str (optional)\n Format string for formatting axis tick labels ('%'-format, e.g. '%.2f').\n connectivity : 'grid' | 'none' | array of int, (n_edges, 2)\n Connectivity between elements. Set to ``\"none\"`` for no connections or \n ``\"grid\"`` to use adjacency in the sequence of elements as connection. \n Set to :class:`numpy.ndarray` to specify custom connectivity. The array\n should be of shape (n_edges, 2), and each row should specify one \n connection [i, j] with i < j, with rows sorted in ascending order. If\n the array's dtype is uint32, property checks are disabled to improve \n efficiency.\n \"\"\"\n _default_connectivity = 'grid'\n\n def __init__(self, name, values, unit=None, tick_format=None,\n connectivity='grid'):\n values = np.asarray(values)\n if values.ndim != 1:\n raise ValueError(\"values needs to be one-dimensional array, got \"\n \"array of shape %s\" % repr(values.shape))\n elif np.any(np.diff(values) <= 0):\n raise ValueError(\"Values for Scalar must increase monotonically\")\n elif tick_format and '%' not in tick_format:\n raise ValueError(\"tick_format needs to include '%%'; got %r\" %\n (tick_format,))\n self.values = values\n self.unit = unit\n self._axis_unit = unit\n self.tick_format = tick_format\n Dimension.__init__(self, name, connectivity)\n\n def __getstate__(self):\n out = Dimension.__getstate__(self)\n out.update(values=self.values, unit=self.unit,\n tick_format=self.tick_format)\n return out\n\n def __setstate__(self, state):\n # backwards compatibility\n if 'connectivity' not in state:\n state['connectivity'] = None\n state['connectivity_type'] = 'grid'\n Dimension.__setstate__(self, state)\n self.values = state['values']\n self.unit = self._axis_unit = state.get('unit')\n self.tick_format = state.get('tick_format')\n\n def __repr__(self):\n args = [repr(self.name), array_repr(self.values)]\n if self.unit is not None or self.tick_format is not None:\n args.append(repr(self.unit))\n if self.tick_format is not None:\n args.append(repr(self.tick_format))\n return \"%s(%s)\" % (self.__class__.__name__, ', '.join(args))\n\n def __len__(self):\n return len(self.values)\n\n def __eq__(self, other):\n return (Dimension.__eq__(self, other) and\n np.array_equal(self.values, other.values))\n\n def __getitem__(self, index):\n if isinstance(index, Integral):\n return self.values[index]\n return self.__class__(self.name, self.values[index], self.unit,\n self.tick_format, self._subgraph(index))\n\n def _as_scalar_array(self):\n return self.values\n\n def _axis_data(self):\n return self.values\n\n def _axis_format(self, scalar, label):\n if scalar:\n if self.tick_format:\n fmt = FormatStrFormatter(self.tick_format)\n else:\n fmt = None\n elif self.tick_format:\n fmt = IndexFormatter([self.tick_format % v for v in self.values])\n else:\n fmt = IndexFormatter(self.values)\n return (fmt,\n None if scalar else FixedLocator(np.arange(len(self)), 10),\n self._axis_label(label))\n\n def _bin(self, start, stop, step, nbins):\n if start is None:\n start = self.values[0]\n\n if nbins is not None:\n istop = len(self) if stop is None else self._array_index(stop)\n istart = 0 if start is None else self._array_index(start)\n n_source_steps = istop - istart\n if n_source_steps % nbins != 0:\n raise ValueError(\"length %i dimension %s can not be divided \"\n \"equally into %i bins\" %\n (n_source_steps, self.name, nbins))\n istep = int(n_source_steps / nbins)\n ilast = istep - 1\n out_values = [(self[i] + self[i + ilast]) / 2. for i in\n range(istart, istop, istep)]\n edges = list(self.values[istart:istop:istep])\n else:\n if stop is None:\n n_bins_fraction = (self[-1] - start) / step\n n_bins = int(ceil(n_bins_fraction))\n # if the last value would fall into a new bin\n if n_bins == n_bins_fraction:\n n_bins += 1\n else:\n n_bins = int(ceil((stop - start) / step))\n\n # new dimensions\n dim_start = start + step / 2\n dim_stop = dim_start + n_bins * step\n out_values = np.arange(dim_start, dim_stop, step)\n edges = [start + n * step for n in range(n_bins)]\n edges.append(stop)\n out_dim = Scalar(self.name, out_values, self.unit, self.tick_format)\n return edges, out_dim\n\n def _cluster_properties(self, x):\n \"\"\"Find cluster properties for this dimension\n\n Parameters\n ----------\n x : array of bool, (n_clusters, len(self))\n The cluster extents, with different clusters stacked along the\n first axis.\n\n Returns\n -------\n cluster_properties : None | Dataset\n A dataset with variables describing cluster properties.\n \"\"\"\n ds = Dataset()\n where = [np.flatnonzero(cluster) for cluster in x]\n ds['%s_min' % self.name] = Var([self.values[w[0]] for w in where])\n ds['%s_max' % self.name] = Var([self.values[w[-1]] for w in where])\n return ds\n\n @classmethod\n def _concatenate(cls, scalars):\n \"Concatenate multiple Scalar instances\"\n scalars = tuple(scalars)\n attrs = {}\n for attr in ('name', 'unit', 'tick_format'):\n values = {getattr(s, attr) for s in scalars}\n if len(values) > 1:\n raise DimensionMismatchError(\n \"Trying to concatenate %s dimensions with different %ss: \"\n \"%s\" % (cls.__name__, attr, values))\n attrs[attr] = values.pop()\n values = np.concatenate(tuple(s.values for s in scalars))\n return cls(attrs['name'], values, attrs['unit'], attrs['tick_format'])\n\n def _array_index(self, arg):\n if isinstance(arg, self.__class__):\n s_idx, a_idx = np.nonzero(self.values[:, None] == arg.values)\n return s_idx[np.argsort(a_idx)]\n elif np.isscalar(arg):\n try:\n return digitize_index(arg, self.values, 0.3)\n except IndexError as error:\n raise IndexError(\"Ambiguous index for %s: %s\" %\n (self._dimname(), error.args[0]))\n elif isinstance(arg, np.ndarray) and arg.dtype.kind == self.values.dtype.kind:\n if np.setdiff1d(arg, self.values):\n raise IndexError(\"Index %r includes values not in dimension: %s\" %\n (arg, np.setdiff1d(arg, self.values)))\n return np.digitize(arg, self.values, True)\n else:\n return Dimension._array_index(self, arg)\n\n def _array_index_for_slice(self, start, stop=None, step=None):\n if start is not None:\n start = digitize_slice_endpoint(start, self.values)\n if stop is not None:\n stop = digitize_slice_endpoint(stop, self.values)\n return slice(start, stop, step)\n\n def _dim_index(self, index):\n if np.isscalar(index):\n return self.values[index]\n else:\n return Dimension._dim_index(self, index)\n\n def intersect(self, dim, check_dims=False):\n \"\"\"Create a dimension object that is the intersection with dim\n\n Parameters\n ----------\n dim : type(self)\n Dimension to intersect with.\n check_dims : bool\n Check dimensions for consistency (not applicaple to this subclass).\n\n Returns\n -------\n intersection : type(self)\n The intersection with dim (returns itself if dim and self are\n equal)\n \"\"\"\n if self.name != dim.name:\n raise DimensionMismatchError(\"Dimensions don't match\")\n\n if np.all(self.values == dim.values):\n return self\n index = np.in1d(self.values, dim.values)\n if np.all(index):\n return self\n elif index.sum() == len(dim):\n return dim\n return self[index]\n\n\n# for unpickling backwards compatibility\nOrdered = Scalar\n\n\nclass Sensor(Dimension):\n \"\"\"Dimension class for representing sensor information\n\n Parameters\n ----------\n locs : array_like (n_sensor, 3)\n list of sensor locations in ALS coordinates, i.e., for each sensor a\n ``(anterior, left, superior)`` coordinate triplet.\n names : list of str\n Sensor names, same order as ``locs`` (default is ``['0', '1', '2',\n ...]``).\n sysname : str\n Name of the sensor system.\n proj2d : str\n Default 2d projection (default is ``'z-root'``; for options see notes\n below).\n connectivity : 'grid' | 'none' | array of int, (n_edges, 2)\n Connectivity between elements. Set to ``\"none\"`` for no connections or \n ``\"grid\"`` to use adjacency in the sequence of elements as connection. \n Set to :class:`numpy.ndarray` to specify custom connectivity. The array\n should be of shape (n_edges, 2), and each row should specify one \n connection [i, j] with i < j, with rows sorted in ascending order. If\n the array's dtype is uint32, property checks are disabled to improve \n efficiency.\n\n Attributes\n ----------\n channel_idx : dict\n Dictionary mapping channel names to indexes.\n locs : array (n_sensors, 3)\n Spatial position of all sensors.\n names : list of str\n Ordered list of sensor names.\n x, y, z : array (n_sensors,)\n Sensor position x, y and z coordinates.\n\n Notes\n -----\n The following are possible 2d-projections:\n\n ``'z root'``:\n the radius of each sensor is set to equal the root of the vertical\n distance from the top of the net.\n ``'cone'``:\n derive x/y coordinate from height based on a cone transformation.\n ``'lower cone'``:\n only use cone for sensors with z < 0.\n Axis and sign :\n For example, ``x+`` for anterior, ``x-`` for posterior.\n\n Examples\n --------\n >>> locs = [(0, 0, 0),\n ... (0, -.25, -.45)]\n >>> sensor_dim = Sensor(locs, names=[\"Cz\", \"Pz\"])\n \"\"\"\n _default_connectivity = 'custom'\n _proj_aliases = {'left': 'x-', 'right': 'x+', 'back': 'y-', 'front': 'y+',\n 'top': 'z+', 'bottom': 'z-'}\n\n def __init__(self, locs, names=None, sysname=None, proj2d='z root',\n connectivity='custom'):\n # 'z root' transformation fails with 32-bit floats\n self.locs = locs = np.asarray(locs, dtype=np.float64)\n n = len(locs)\n if locs.shape != (n, 3):\n raise ValueError(\"locs needs to have shape (n_sensors, 3), got \"\n \"array of shape %s\" % (locs.shape,))\n self.sysname = sysname\n self.default_proj2d = self._interpret_proj(proj2d)\n\n if names is None:\n names = [str(i) for i in range(n)]\n elif len(names) != n:\n raise ValueError(\"Length mismatch: got %i locs but %i names\" %\n (n, len(names)))\n self.names = Datalist(names)\n Dimension.__init__(self, 'sensor', connectivity)\n self._init_secondary()\n\n def _init_secondary(self):\n self.x = self.locs[:, 0]\n self.y = self.locs[:, 1]\n self.z = self.locs[:, 2]\n\n self.channel_idx = {name: i for i, name in enumerate(self.names)}\n # short names\n prefix = os.path.commonprefix(self.names)\n if prefix:\n n_pf = len(prefix)\n self.channel_idx.update({name[n_pf:]: i for i, name in\n enumerate(self.names)})\n\n # cache for transformed locations\n self._transformed = {}\n\n def __getstate__(self):\n out = Dimension.__getstate__(self)\n out.update(proj2d=self.default_proj2d, locs=self.locs, names=self.names,\n sysname=self.sysname)\n return out\n\n def __setstate__(self, state):\n if 'name' not in state:\n state['name'] = 'sensor'\n state['connectivity_type'] = 'custom'\n Dimension.__setstate__(self, state)\n self.locs = state['locs']\n self.names = state['names']\n self.sysname = state['sysname']\n self.default_proj2d = state['proj2d']\n self._init_secondary()\n\n def __repr__(self):\n return \"\" % (len(self), self.sysname)\n\n def __len__(self):\n return len(self.locs)\n\n def __eq__(self, other): # Based on equality of sensor names\n return (Dimension.__eq__(self, other) and len(self) == len(other) and\n all(n == no for n, no in zip(self.names, other.names)))\n\n def __getitem__(self, index):\n if np.isscalar(index):\n return self.names[index]\n else:\n return Sensor(self.locs[index], self.names[index], self.sysname,\n self.default_proj2d, self._subgraph(index))\n\n def _as_uv(self):\n return Factor(self.names, name=self.name)\n\n def _axis_format(self, scalar, label):\n return (IndexFormatter(self.names),\n FixedLocator(np.arange(len(self)), 10),\n self._axis_label(label))\n\n def _cluster_properties(self, x):\n \"\"\"Find cluster properties for this dimension\n\n Parameters\n ----------\n x : array of bool (n_clusters, n_sensors)\n The cluster extents, with different clusters stacked along the\n first axis.\n\n Returns\n -------\n cluster_properties : None | Dataset\n A dataset with variables describing cluster properties.\n \"\"\"\n return Dataset(('n_sensors', Var(x.sum(1))))\n\n def _array_index(self, arg):\n \"Convert a dimension-semantic index to an array-like index\"\n if isinstance(arg, str):\n return self.channel_idx[arg]\n elif isinstance(arg, Sensor):\n return np.array([self.names.index(name) for name in arg.names])\n elif isinstance(arg, Integral) or (isinstance(arg, np.ndarray) and\n arg.dtype.kind == 'i'):\n return arg\n else:\n return super(Sensor, self)._array_index(arg)\n\n def _array_index_to(self, other):\n \"Int index to access data from self in an order consistent with other\"\n try:\n return np.array([self.names.index(name) for name in other.names])\n except ValueError:\n missing = (name for name in other.names if name not in self.names)\n raise IndexError(f\"{other}: contains different sensors {', '.join(missing)}\")\n\n def _dim_index(self, index):\n if np.isscalar(index):\n return self.names[index]\n else:\n return Dimension._dim_index(self, index)\n\n def _generate_connectivity(self):\n raise RuntimeError(\"Sensor connectivity is not defined. Use \"\n \"Sensor.set_connectivity().\")\n\n @classmethod\n def from_xyz(cls, path=None, **kwargs):\n \"\"\"Create a Sensor instance from a text file with xyz coordinates\"\"\"\n locs = []\n names = []\n with open(path) as f:\n l1 = f.readline()\n n = int(l1.split()[0])\n for line in f:\n elements = line.split()\n if len(elements) == 4:\n x, y, z, name = elements\n x = float(x)\n y = float(y)\n z = float(z)\n locs.append((x, y, z))\n names.append(name)\n assert len(names) == n\n return cls(locs, names, **kwargs)\n\n @classmethod\n def from_sfp(cls, path=None, **kwargs):\n \"\"\"Create a Sensor instance from an sfp file\"\"\"\n locs = []\n names = []\n for line in open(path):\n elements = line.split()\n if len(elements) == 4:\n name, x, y, z = elements\n x = float(x)\n y = float(y)\n z = float(z)\n locs.append((x, y, z))\n names.append(name)\n return cls(locs, names, **kwargs)\n\n @classmethod\n def from_lout(cls, path=None, transform_2d=None, **kwargs):\n \"\"\"Create a Sensor instance from a ``*.lout`` file\"\"\"\n kwargs['transform_2d'] = transform_2d\n locs = []\n names = []\n with open(path) as fileobj:\n fileobj.readline()\n for line in fileobj:\n w, x, y, t, f, name = line.split('\\t')\n x = float(x)\n y = float(y)\n locs.append((x, y, 0))\n names.append(name)\n return cls(locs, names, **kwargs)\n\n def _interpret_proj(self, proj):\n if proj == 'default':\n return self.default_proj2d\n elif proj in self._proj_aliases:\n return self._proj_aliases[proj]\n elif proj is None:\n return 'z+'\n else:\n return proj\n\n def get_locs_2d(self, proj='default', extent=1, frame=0, invisible=True):\n \"\"\"Compute a 2 dimensional projection of the sensor locations\n\n Parameters\n ----------\n proj : str\n How to transform 3d coordinates into a 2d map; see class\n documentation for options.\n extent : int\n coordinates will be scaled with minimum value 0 and maximum value\n defined by the value of ``extent``.\n frame : scalar\n Distance of the outermost points from 0 and ``extent`` (default 0).\n invisible : bool\n Return invisible sensors (sensors that would be hidden behind the\n head; default True).\n\n Returns\n -------\n locs_2d : array (n_sensor, 2)\n Sensor position 2d projection in x, y coordinates.\n \"\"\"\n proj = self._interpret_proj(proj)\n\n index = (proj, extent, frame)\n if index in self._transformed:\n locs2d = self._transformed[index]\n else:\n locs2d = self._make_locs_2d(proj, extent, frame)\n self._transformed[index] = locs2d\n\n if not invisible:\n visible = self._visible_sensors(proj)\n if visible is not None:\n return locs2d[visible]\n return locs2d\n\n @LazyProperty\n def _sphere_fit(self):\n \"\"\"Fit the 3d sensor locations to a sphere\n\n Returns\n -------\n params : tuple\n Radius and center (r, cx, cy, cz).\n \"\"\"\n locs = self.locs\n\n # error function\n def err(params):\n # params: [r, cx, cy, cz]\n out = np.sum((locs - params[1:]) ** 2, 1)\n out -= params[0] ** 2\n return out\n\n # initial guess of sphere parameters (radius and center)\n center_0 = np.mean(locs, 0)\n r_0 = np.mean(np.sqrt(np.sum((locs - center_0) ** 2, axis=1)))\n start_params = np.hstack((r_0, center_0))\n # do fit\n estimate, _ = leastsq(err, start_params)\n return tuple(estimate)\n\n def _make_locs_2d(self, proj, extent, frame):\n if proj in ('cone', 'lower cone', 'z root'):\n r, cx, cy, cz = self._sphere_fit\n\n # center the sensor locations based on the sphere and scale to\n # radius 1\n sphere_center = np.array((cx, cy, cz))\n locs3d = self.locs - sphere_center\n locs3d /= r\n\n # implement projection\n locs2d = np.copy(locs3d[:, :2])\n\n if proj == 'cone':\n locs2d[:, [0, 1]] *= (1 - locs3d[:, [2]])\n elif proj == 'lower cone':\n lower_half = locs3d[:, 2] < 0\n if any(lower_half):\n locs2d[lower_half] *= (1 - locs3d[lower_half][:, [2]])\n elif proj == 'z root':\n z = locs3d[:, 2]\n z_dist = (z.max() + 0.01) - z # distance form top (add a small\n # buffer so that multiple points at z-max don't get stuck\n # together)\n r = np.sqrt(z_dist) # desired 2d radius\n r_xy = np.sqrt(np.sum(locs3d[:, :2] ** 2, 1)) # current radius in xy\n idx = (r_xy != 0) # avoid zero division\n F = r[idx] / r_xy[idx] # stretching Factor accounting for current r\n locs2d[idx, :] *= F[:, None]\n else:\n match = re.match('([xyz])([+-])', proj)\n if match:\n ax, sign = match.groups()\n if ax == 'x':\n locs2d = np.copy(self.locs[:, 1:])\n if sign == '-':\n locs2d[:, 0] *= -1\n elif ax == 'y':\n locs2d = np.copy(self.locs[:, [0, 2]])\n if sign == '+':\n locs2d[:, 0] *= -1\n elif ax == 'z':\n locs2d = np.copy(self.locs[:, :2])\n if sign == '-':\n locs2d[:, 1] *= -1\n else:\n raise ValueError(\"invalid proj kwarg: %r\" % proj)\n\n # correct extent\n if extent:\n locs2d -= np.min(locs2d, axis=0) # move to bottom left\n locs2d /= (np.max(locs2d) / extent) # scale to extent\n locs2d += (extent - np.max(locs2d, axis=0)) / 2 # center\n if frame:\n locs2d *= (1 - 2 * frame)\n locs2d += frame\n\n return locs2d\n\n def _topomap_outlines(self, proj):\n \"Outline argument for mne-python topomaps\"\n proj = self._interpret_proj(proj)\n if proj in ('cone', 'lower cone', 'z root', 'z+'):\n return 'top'\n else:\n return None\n\n def _visible_sensors(self, proj):\n \"Create an index for sensors that are visible under a given proj\"\n proj = self._interpret_proj(proj)\n match = re.match('([xyz])([+-])', proj)\n if match:\n # logger.debug(\"Computing sensors visibility for %s\" % proj)\n ax, sign = match.groups()\n\n # depth: + = closer\n depth = self.locs[:, 'xyz'.index(ax)]\n if sign == '-':\n depth = -depth\n\n locs2d = self.get_locs_2d(proj)\n\n n_vertices = len(locs2d)\n all_vertices = np.arange(n_vertices)\n out = np.ones(n_vertices, bool)\n\n # find duplicate points\n # TODO OPT: use pairwise distance\n x, y = np.where(cdist(locs2d, locs2d) == 0)\n duplicate_vertices = ((v1, v2) for v1, v2 in zip(x, y) if v1 < v2)\n for v1, v2 in duplicate_vertices:\n if depth[v1] > depth[v2]:\n out[v2] = False\n # logger.debug(\"%s is hidden behind %s\" % (self.names[v2], self.names[v1]))\n else:\n out[v1] = False\n # logger.debug(\"%s is hidden behind %s\" % (self.names[v1], self.names[v2]))\n use_vertices = all_vertices[out] # use for hull check\n\n hull = ConvexHull(locs2d[use_vertices])\n hull_vertices = use_vertices[hull.vertices]\n\n # for each point:\n # find the closest point on the hull\n # determine whether it's in front or behind\n non_hull_vertices = np.setdiff1d(use_vertices, hull_vertices, True)\n\n hull_locs = locs2d[hull_vertices]\n non_hull_locs = locs2d[non_hull_vertices]\n dists = cdist(non_hull_locs, hull_locs)\n\n closest = np.argmin(dists, 1)\n hide_non_hull_vertices = depth[non_hull_vertices] < depth[hull_vertices][closest]\n hide_vertices = non_hull_vertices[hide_non_hull_vertices]\n # logger.debug(\"%s are hidden behind convex hull\" % ' '.join(self.names[hide_vertices]))\n out[hide_vertices] = False\n return out\n else:\n return None\n\n def index(self, exclude=None, names=False):\n \"\"\"Construct an index for specified sensors\n\n Parameters\n ----------\n exclude : None | list of str, int\n Sensors to exclude (by name or index).\n names : bool\n Return channel names instead of index array (default False).\n\n Returns\n -------\n index : array of int (if ``names==False``)\n Numpy index indexing good channels.\n names : Datalist of str (if ``names==True``)\n List of channel names.\n \"\"\"\n if exclude is None:\n return FULL_SLICE\n\n index = np.ones(len(self), dtype=bool)\n for ch in exclude:\n try:\n index[self.channel_idx[ch]] = False\n except KeyError:\n raise ValueError(\"Invalid channel name: %s\" % repr(ch))\n\n if names:\n return self.names[index]\n else:\n return index\n\n def _normalize_sensor_names(self, names, missing='raise'):\n \"Process a user-input list of sensor names\"\n valid_chs = set()\n missing_chs = set()\n for name in names:\n if isinstance(name, Integral):\n name = '%03i' % name\n\n if name.isdigit():\n if name in self.names:\n valid_chs.add(name)\n continue\n else:\n name = 'MEG %s' % name\n\n if name in self.names:\n valid_chs.add(name)\n else:\n missing_chs.add(name)\n\n if missing == 'raise':\n if missing_chs:\n msg = (\"The following channels are not in the raw data: \"\n \"%s\" % ', '.join(sorted(missing_chs)))\n raise ValueError(msg)\n return sorted(valid_chs)\n elif missing == 'return':\n return sorted(valid_chs), missing_chs\n else:\n raise ValueError(\"missing=%s\" % repr(missing))\n\n def intersect(self, dim, check_dims=True):\n \"\"\"Create a Sensor dimension that is the intersection with dim\n\n Parameters\n ----------\n dim : Sensor\n Sensor dimension to intersect with.\n check_dims : bool\n Check dimensions for consistency (e.g., channel locations). Default\n is ``True``. Set to ``False`` to intersect channels based on names\n only and ignore mismatch between locations for channels with the\n same name.\n\n Returns\n -------\n sensor : Sensor\n The intersection with dim (returns itself if dim and self are\n equal)\n \"\"\"\n if self.name != dim.name:\n raise DimensionMismatchError(\"Dimensions don't match\")\n\n n_self = len(self)\n names = set(self.names)\n names.intersection_update(dim.names)\n n_intersection = len(names)\n if n_intersection == n_self:\n return self\n elif n_intersection == len(dim.names):\n return dim\n\n index = np.array([name in names for name in self.names])\n if check_dims:\n other_index = np.array([name in names for name in dim.names])\n if not np.all(self.locs[index] == dim.locs[other_index]):\n raise ValueError(\"Sensor locations don't match between \"\n \"dimension objects\")\n return self[index]\n\n def neighbors(self, connect_dist):\n \"\"\"Find neighboring sensors.\n\n Parameters\n ----------\n connect_dist : scalar\n For each sensor, neighbors are defined as those sensors within\n ``connect_dist`` times the distance of the closest neighbor.\n\n Returns\n -------\n neighbors : dict\n Dictionaries whose keys are sensor indices, and whose values are\n lists of neighbors represented as sensor indices.\n \"\"\"\n nb = {}\n pd = pdist(self.locs)\n pd = squareform(pd)\n n = len(self)\n for i in range(n):\n d = pd[i, np.arange(n)]\n d[i] = d.max()\n idx = np.nonzero(d < d.min() * connect_dist)[0]\n nb[i] = idx\n\n return nb\n\n def set_connectivity(self, neighbors=None, connect_dist=None):\n \"\"\"Define the sensor connectivity through neighbors or distance\n\n Parameters\n ----------\n neighbors : sequence of (str, str)\n A list of connections, all assumed to be bidirectional.\n connect_dist : None | scalar\n For each sensor, neighbors are defined as those sensors within\n ``connect_dist`` times the distance of the closest neighbor.\n e.g., 1.75 or 1.6\n \"\"\"\n pairs = set()\n if neighbors is not None and connect_dist is not None:\n raise TypeError(\"Can only specify either neighbors or connect_dist\")\n elif connect_dist is None:\n for src, dst in neighbors:\n a = self.names.index(src)\n b = self.names.index(dst)\n if a < b:\n pairs.add((a, b))\n else:\n pairs.add((b, a))\n else:\n nb = self.neighbors(connect_dist)\n for k, vals in nb.items():\n for v in vals:\n if k < v:\n pairs.add((k, v))\n else:\n pairs.add((v, k))\n\n self._connectivity = np.array(sorted(pairs), np.uint32)\n self._connectivity_type = 'custom'\n\n def set_sensor_positions(self, pos, names=None):\n \"\"\"Set the sensor positions\n\n Parameters\n ----------\n pos : array (n_locations, 3) | MNE Montage\n Array with 3 columns describing sensor locations (x, y, and z), or\n an MNE Montage object describing the sensor layout.\n names : None | list of str\n If locations is an array, names should specify a name\n corresponding to each entry.\n \"\"\"\n # MNE Montage\n if hasattr(pos, 'pos') and hasattr(pos, 'ch_names'):\n if names is not None:\n raise TypeError(\"Can't specify names parameter with Montage\")\n names = pos.ch_names\n pos = pos.pos\n elif names is not None and len(names) != len(pos):\n raise ValueError(\"Mismatch between number of locations (%i) and \"\n \"number of names (%i)\" % (len(pos), len(names)))\n\n if names is not None:\n missing = [name for name in self.names if name not in names]\n if missing:\n raise ValueError(\"The following sensors are missing: %r\" % missing)\n index = np.array([names.index(name) for name in self.names])\n pos = pos[index]\n elif len(pos) != len(self.locs):\n raise ValueError(\"If names are not specified pos must specify \"\n \"exactly one position per channel\")\n self.locs[:] = pos\n\n @property\n def values(self):\n return self.names\n\n\ndef as_sensor(obj):\n \"Coerce to Sensor instance\"\n if isinstance(obj, Sensor):\n return obj\n elif isinstance(obj, NDVar) and obj.has_dim('sensor'):\n return obj.sensor\n elif hasattr(obj, 'pos') and hasattr(obj, 'ch_names') and hasattr(obj, 'kind'):\n return Sensor(obj.pos, obj.ch_names, obj.kind)\n else:\n raise TypeError(\"Can't get sensors from %r\" % (obj,))\n\n\ndef _point_graph(coords, dist_threshold):\n \"Connectivity graph for points based on distance\"\n n = len(coords)\n dist = pdist(coords)\n\n # construct vertex pairs corresponding to dist\n graph = np.empty((len(dist), 2), np.uint32)\n i0 = 0\n for vert, di in enumerate(range(n - 1, 0, -1)):\n i1 = i0 + di\n graph[i0:i1, 0] = vert\n graph[i0:i1, 1] = np.arange(vert + 1, n)\n i0 = i1\n\n return graph[dist < dist_threshold]\n\n\ndef _matrix_graph(matrix):\n \"Create connectivity from matrix\"\n coo = matrix.tocoo()\n assert np.all(coo.data)\n edges = {(min(a, b), max(a, b)) for a, b in zip(coo.col, coo.row) if a != b}\n return np.array(sorted(edges), np.uint32)\n\n\ndef _tri_graph(tris):\n \"\"\"Create connectivity graph from triangles\n\n Parameters\n ----------\n tris : array_like, (n_tris, 3)\n Triangles.\n\n Returns\n -------\n edges : array (n_edges, 2)\n All edges between vertices of tris.\n \"\"\"\n pairs = set()\n for tri in tris:\n a, b, c = sorted(tri)\n pairs.add((a, b))\n pairs.add((a, c))\n pairs.add((b, c))\n return np.array(sorted(pairs), np.uint32)\n\n\ndef _mne_tri_soure_space_graph(source_space, vertices_list):\n \"Connectivity graph for a triangulated mne source space\"\n i = 0\n graphs = []\n for ss, verts in zip(source_space, vertices_list):\n if len(verts) == 0:\n continue\n\n tris = ss['use_tris']\n if tris is None:\n raise ValueError(\"Connectivity unavailable. The source space does \"\n \"not seem to be an ico source space.\")\n\n # graph for the whole source space\n src_vertices = ss['vertno']\n graph = _tri_graph(tris)\n\n # select relevant edges\n if not np.array_equal(verts, src_vertices):\n if not np.all(np.in1d(verts, src_vertices)):\n raise RuntimeError(\"Not all vertices are in the source space\")\n edge_in_use = np.logical_and(np.in1d(graph[:, 0], verts),\n np.in1d(graph[:, 1], verts))\n graph = graph[edge_in_use]\n\n # reassign vertex ids based on present vertices\n if len(verts) != verts.max() + 1:\n graph = (np.digitize(graph.ravel(), verts, True)\n .reshape(graph.shape).astype(np.uint32))\n\n # account for index of previous source spaces\n if i > 0:\n graph += i\n i += len(verts)\n\n graphs.append(graph)\n return np.vstack(graphs)\n\n\nclass SourceSpaceBase(Dimension):\n kind = None\n _default_connectivity = 'custom'\n _SRC_PATH = os.path.join(\n '{subjects_dir}', '{subject}', 'bem', '{subject}-{src}-src.fif')\n _ANNOT_PATH = os.path.join(\n '{subjects_dir}', '{subject}', 'label', '{hemi}.{parc}.annot')\n\n _vertex_re = re.compile('([RL])(\\d+)')\n\n def __init__(self, vertices, subject, src, subjects_dir, parc, connectivity, name):\n self.vertices = vertices\n self.subject = subject\n self.src = src\n self._subjects_dir = subjects_dir\n self._init_secondary()\n Dimension.__init__(self, name, connectivity)\n\n # parc\n if parc is None or parc is False:\n self.parc = None\n elif isinstance(parc, Factor):\n if len(parc) != len(self):\n raise ValueError(\"parc has wrong length (%i) for SourceSpace \"\n \"with %i vertices\" % (len(parc), self._n_vert))\n self.parc = parc\n elif isinstance(parc, str):\n self.parc = self._read_parc(parc)\n else:\n raise TypeError(\"Parc needs to be Factor or string, got %r\" % (parc,))\n\n def _read_parc(self, parc):\n raise NotImplementedError(\n f\"parc={parc!r}: can't set parcellation from annotation files for \"\n f\"{self.__class__.__name__}. Consider using a Factor instead.\")\n\n def _init_secondary(self):\n self._n_vert = sum(len(v) for v in self.vertices)\n # The source-space type is needed to determine connectivity\n m = SRC_RE.match(self.src)\n if not m:\n raise ValueError(f\"src={self.src!r}; needs to be '{self.kind}-i' where i is an integer\")\n kind, grade, suffix = m.groups()\n if kind != self.kind:\n raise ValueError(f'src={self.src!r}: {self.__class__.__name__} is wrong class')\n self.grade = int(grade)\n\n @classmethod\n def from_file(cls, subjects_dir, subject, src, parc=None):\n \"\"\"SourceSpace dimension from MNE source space file\"\"\"\n if parc is None and cls is SourceSpace:\n parc = 'aparc'\n filename = cls._SRC_PATH.format(subjects_dir=subjects_dir,\n subject=subject, src=src)\n source_spaces = mne.read_source_spaces(filename)\n return cls.from_mne_source_spaces(source_spaces, src, subjects_dir, parc)\n\n @classmethod\n def from_mne_source_spaces(cls, source_spaces, src, subjects_dir,\n parc='aparc', label=None):\n \"\"\"SourceSpace dimension from MNE SourceSpaces object\"\"\"\n if label is None:\n vertices = [ss['vertno'] for ss in source_spaces]\n else:\n vertices, _ = label_src_vertno_sel(label, source_spaces)\n\n return cls(vertices, source_spaces[0]['subject_his_id'], src,\n subjects_dir, parc)\n\n @LazyProperty\n def subjects_dir(self):\n try:\n return mne.utils.get_subjects_dir(self._subjects_dir, True)\n except KeyError:\n raise TypeError(\"subjects_dir was neither specified on SourceSpace \"\n \"dimension nor as environment variable\")\n\n def __getstate__(self):\n state = Dimension.__getstate__(self)\n state.update(vertno=self.vertices, subject=self.subject, src=self.src,\n subjects_dir=self._subjects_dir, parc=self.parc)\n return state\n\n def __setstate__(self, state):\n if 'name' not in state:\n state['name'] = 'source'\n state['connectivity_type'] = 'custom'\n Dimension.__setstate__(self, state)\n self.vertices = state['vertno']\n self.subject = state['subject']\n self.src = state['src']\n self._subjects_dir = state['subjects_dir']\n self.parc = state['parc']\n self._init_secondary()\n\n def __repr__(self):\n out = \"<\" + self.__class__.__name__\n if self.name != 'source':\n out += ' ' + self.name\n vert_repr = ', '.join(str(len(v)) for v in self.vertices)\n out += \" [%s], %r\" % (vert_repr, self.subject)\n if self.src is not None:\n out += ', %r' % self.src\n if self.parc is not None:\n out += ', parc=%s' % self.parc.name\n return out + '>'\n\n def __len__(self):\n return self._n_vert\n\n def __eq__(self, other):\n return (\n Dimension.__eq__(self, other) and\n self.subject == other.subject and\n self.src == other.src and\n len(self) == len(other) and\n all(np.array_equal(s, o) for s, o in zip(self.vertices, other.vertices))\n )\n\n def _assert_same_base(self, other):\n \"Assert that ``other`` is based on the same source space\"\n if self.subject != other.subject:\n raise IndexError(f\"Source spaces can not be compared because they are defined on different MRI subjects ({self.subject} vs {other.subject}). Consider using eelbrain.morph_source_space().\")\n elif self.src != other.src:\n raise IndexError(f\"Source spaces of different types ({self.src} vs {other.src})\")\n elif self.subjects_dir != other.subjects_dir:\n raise IndexError(f\"Source spaces have differing subjects_dir:\\n{self.subjects_dir}\\n{other.subjects_dir}\")\n\n def __getitem__(self, index):\n raise NotImplementedError\n\n def _axis_format(self, scalar, label):\n return (FormatStrFormatter('%i'),\n FixedLocator(np.arange(len(self)), 10),\n self._axis_label(label))\n\n def _cluster_properties(self, x):\n \"\"\"Find cluster properties for this dimension\n\n Parameters\n ----------\n x : array of bool, (n_clusters, len(self))\n The cluster extents, with different clusters stacked along the\n first axis.\n\n Returns\n -------\n cluster_properties : Dataset\n A dataset with variables describing cluster properties along this\n dimension: \"n_sources\".\n \"\"\"\n if np.any(np.sum(x, 1) == 0):\n raise ValueError(\"Empty cluster\")\n\n ds = Dataset()\n\n # no clusters\n if len(x) == 0:\n ds['n_sources'] = Var([])\n ds['hemi'] = Factor([])\n if self.parc is not None:\n ds['location'] = Factor([])\n return ds\n\n # n sources\n ds['n_sources'] = Var(x.sum(1))\n\n # location\n if self.parc is not None:\n locations = []\n for x_ in x:\n parc_entries = self.parc[x_]\n argmax = np.argmax(np.bincount(parc_entries.x))\n location = parc_entries[argmax]\n locations.append(location)\n ds['location'] = Factor(locations)\n\n return ds\n\n def _distances(self):\n \"Surface distances between source space vertices\"\n dist = -np.ones((self._n_vert, self._n_vert))\n sss = self.get_source_space()\n i0 = 0\n for vertices, ss in zip(self.vertices, sss):\n if ss['dist'] is None:\n path = self._SRC_PATH.format(\n subjects_dir=self.subjects_dir, subject=self.subject,\n src=self.src)\n raise RuntimeError(\n f\"Source space does not contain source distance \"\n f\"information. To add distance information, run:\\n\"\n f\"src = mne.read_source_spaces({path!r})\\n\"\n f\"mne.add_source_space_distances(src)\\n\"\n f\"src.save({path!r}, overwrite=True)\")\n i = i0 + len(vertices)\n dist[i0:i, i0:i] = ss['dist'][vertices, vertices[:, None]].toarray()\n i0 = i\n return dist\n\n def connectivity(self, disconnect_parc=False):\n \"\"\"Create source space connectivity\n\n Parameters\n ----------\n disconnect_parc : bool\n Reduce connectivity to label-internal connections.\n\n Returns\n -------\n connetivity : array of int, (n_pairs, 2)\n array of sorted [src, dst] pairs, with all src < dts.\n \"\"\"\n if self._n_vert == 0:\n return np.empty((0, 2), np.uint32)\n elif self._connectivity is None:\n if self.src is None or self.subject is None or self.subjects_dir is None:\n raise ValueError(\n \"In order for a SourceSpace dimension to provide \"\n \"connectivity information it needs to be initialized with \"\n \"src, subject and subjects_dir parameters\")\n\n self._connectivity = connectivity = self._compute_connectivity()\n assert connectivity.max() < len(self)\n else:\n connectivity = self._connectivity\n\n if disconnect_parc:\n parc = self.parc\n if parc is None:\n raise RuntimeError(\"SourceSpace has no parcellation (use \"\n \".set_parc())\")\n idx = np.array([parc[s] == parc[d] for s, d in connectivity])\n connectivity = connectivity[idx]\n\n return connectivity\n\n def _compute_connectivity(self):\n raise NotImplementedError(\"Connectivity for %r source space\" % self.kind)\n\n def circular_index(self, seeds, extent=0.05, name=\"globe\"):\n \"\"\"Return an index into all vertices closer than ``extent`` of a seed\n\n Parameters\n ----------\n seeds : array_like, (3,) | (n, 3)\n Seed location(s) around which to build index.\n extent : float\n Index vertices closer than this (in m in 3d space).\n name : str\n Name of the NDVar.\n\n Returns\n -------\n roi : NDVar ('source',)\n Index into the spherical area around ``seeds``.\n \"\"\"\n seeds = np.atleast_2d(seeds)\n dist = cdist(self.coordinates, seeds)\n mindist = np.min(dist, 1)\n x = mindist < extent\n dims = (self,)\n info = {'seeds': seeds, 'extent': extent}\n return NDVar(x, dims, info, name)\n\n @LazyProperty\n def coordinates(self):\n sss = self.get_source_space()\n coords = (ss['rr'][v] for ss, v in zip(sss, self.vertices))\n return np.vstack(coords)\n\n @LazyProperty\n def normals(self):\n sss = self.get_source_space()\n normals = (ss['nn'][v] for ss, v in zip(sss, self.vertices))\n return np.vstack(normals)\n\n def _array_index(self, arg):\n if isinstance(arg, SourceSpace):\n sv = self.vertices\n ov = arg.vertices\n if all(np.array_equal(s, o) for s, o in zip(sv, ov)):\n return FULL_SLICE\n elif any(any(np.setdiff1d(o, s)) for o, s in zip(ov, sv)):\n raise IndexError(\"Index contains unknown sources\")\n else:\n return np.hstack([np.in1d(s, o, True) for s, o in zip(sv, ov)])\n elif isinstance(arg, Integral) or (isinstance(arg, np.ndarray) and\n arg.dtype.kind == 'i'):\n return arg\n elif isinstance(arg, Sequence) and all(isinstance(label, str) for\n label in arg):\n if self.parc is not None and all(a in self.parc.cells for a in arg):\n return self.parc.isin(arg)\n else:\n return [self._array_index(a) for a in arg]\n else:\n return Dimension._array_index(self, arg)\n\n def _array_index_label(self, label):\n if isinstance(label, str):\n if self.parc is None:\n raise RuntimeError(\"SourceSpace has no parcellation\")\n elif label not in self.parc:\n raise KeyError(\"SourceSpace parcellation has no label called \"\n \"%r\" % label)\n idx = self.parc == label\n elif label.hemi == 'both':\n lh_idx = self._array_index_hemilabel(label.lh)\n rh_idx = self._array_index_hemilabel(label.rh)\n idx = np.hstack((lh_idx, rh_idx))\n else:\n idx = np.zeros(len(self), dtype=np.bool8)\n idx_part = self._array_index_hemilabel(label)\n if label.hemi == 'lh':\n idx[:self.lh_n] = idx_part\n elif label.hemi == 'rh':\n idx[self.lh_n:] = idx_part\n else:\n err = \"Unknown value for label.hemi: %s\" % repr(label.hemi)\n raise ValueError(err)\n\n return idx\n\n def _array_index_hemilabel(self, label):\n stc_vertices = self.vertices[label.hemi == 'rh']\n idx = np.in1d(stc_vertices, label.vertices, True)\n return idx\n\n def _array_index_to(self, other):\n \"Int index to access data from self in an order consistent with other\"\n self._assert_same_base(other)\n if any(np.any(np.setdiff1d(o, s, True)) for s, o in zip(self.vertices, other.vertices)):\n raise IndexError(f\"{other}: contains sources not in {self}\")\n bool_index = np.hstack(np.in1d(s, o) for s, o in zip(self.vertices, other.vertices))\n return np.flatnonzero(bool_index)\n\n def get_source_space(self, subjects_dir=None):\n \"Read the corresponding MNE source space\"\n if self.src is None:\n raise TypeError(\"Unknown source-space. Specify the src parameter \"\n \"when initializing SourceSpace.\")\n path = self._SRC_PATH.format(\n subjects_dir=subjects_dir or self.subjects_dir,\n subject=self.subject, src=self.src)\n if not os.path.exists(path):\n raise IOError(\n f\"Can't load source space because {path} does not exist; if \"\n f\"the MRI files for {self.subject} were moved, use \"\n f\"eelbrain.load.update_subjects_dir()\")\n return mne.read_source_spaces(path)\n\n def index_for_label(self, label):\n \"\"\"Return the index for a label\n\n Parameters\n ----------\n label : str | Label | BiHemiLabel\n The name of a region in the current parcellation, or a Label object\n (as created for example by mne.read_label). If the label does not\n match any sources in the SourceEstimate, a ValueError is raised.\n\n Returns\n -------\n index : NDVar of bool\n Index into the source space dim that corresponds to the label.\n \"\"\"\n idx = self._array_index_label(label)\n if isinstance(label, str):\n name = label\n else:\n name = label.name\n return NDVar(idx, (self,), {}, name)\n\n def intersect(self, other, check_dims=True):\n \"\"\"Create a Source dimension that is the intersection with dim\n\n Parameters\n ----------\n dim : Source\n Dimension to intersect with.\n check_dims : bool\n Check dimensions for consistency (not applicaple to this subclass).\n\n Returns\n -------\n intersection : Source\n The intersection with dim (returns itself if dim and self are\n equal)\n \"\"\"\n self._assert_same_base(other)\n index = np.hstack(np.in1d(s, o) for s, o in zip(self.vertices, other.vertices))\n return self[index]\n\n @property\n def values(self):\n raise NotImplementedError\n\n\nclass SourceSpace(SourceSpaceBase):\n \"\"\"MNE surface-based source space\n\n Parameters\n ----------\n vertices : list of 2 int arrays\n The vertex identities of the dipoles in the source space (left and\n right hemisphere separately).\n subject : str\n The mri-subject name.\n src : str\n The kind of source space used (e.g., 'ico-4'; only ``ico`` is currently\n supported.\n subjects_dir : str\n The path to the subjects_dir (needed to locate the source space\n file).\n parc : None | str\n Add a parcellation to the source space to identify vertex location.\n Only applies to ico source spaces, default is 'aparc'.\n connectivity : 'grid' | 'none' | array of int, (n_edges, 2)\n Connectivity between elements. Set to ``\"none\"`` for no connections or\n ``\"grid\"`` to use adjacency in the sequence of elements as connection.\n Set to :class:`numpy.ndarray` to specify custom connectivity. The array\n should be of shape (n_edges, 2), and each row should specify one\n connection [i, j] with i < j, with rows sorted in ascending order. If\n the array's dtype is uint32, property checks are disabled to improve\n efficiency.\n name : str\n Dimension name (default ``\"source\"``).\n\n Attributes\n ----------\n coordinates : array (n_sources, 3)\n Spatial coordinate for each source.\n normals : array (n_sources, 3)\n Orientation (direction) of each source.\n parc : Factor\n Parcellation (one label for each source).\n\n See Also\n --------\n VolumeSourceSpace : volume source space\n\n Notes\n -----\n besides numpy indexing, the following indexes are possible:\n\n - mne Label objects\n - 'lh' or 'rh' to select an entire hemisphere\n\n \"\"\"\n kind = 'ico'\n\n def __init__(self, vertices, subject=None, src=None, subjects_dir=None,\n parc='aparc', connectivity='custom', name='source'):\n SourceSpaceBase.__init__(self, vertices, subject, src, subjects_dir, parc, connectivity, name)\n\n def _init_secondary(self):\n SourceSpaceBase._init_secondary(self)\n assert len(self.vertices) == 2, \"ico-based SourceSpaces need \" \\\n \"exactly two vertices arrays\"\n self.lh_vertices = self.vertices[0]\n self.rh_vertices = self.vertices[1]\n self.lh_n = len(self.lh_vertices)\n self.rh_n = len(self.rh_vertices)\n\n def _read_parc(self, parc):\n fname = self._ANNOT_PATH.format(\n subjects_dir=self.subjects_dir, subject=self.subject,\n hemi='%s', parc=parc)\n labels_lh, _, names_lh = read_annot(fname % 'lh')\n labels_rh, _, names_rh = read_annot(fname % 'rh')\n x_lh = labels_lh[self.lh_vertices]\n x_lh[x_lh == -1] = -2\n x_rh = labels_rh[self.rh_vertices]\n x_rh[x_rh >= 0] += len(names_lh)\n names = chain(('unknown-lh', 'unknown-rh'),\n (name.decode() + '-lh' for name in names_lh),\n (name.decode() + '-rh' for name in names_rh))\n return Factor(np.hstack((x_lh, x_rh)), parc,\n labels={i: name for i, name in enumerate(names, -2)})\n\n def __iter__(self):\n return (temp % v for temp, vertices in\n zip(('L%i', 'R%i'), self.vertices) for v in vertices)\n\n def __getitem__(self, index):\n if isinstance(index, Integral):\n if index < self.lh_n:\n return 'L%i' % self.lh_vertices[index]\n elif index < self._n_vert:\n return 'R%i' % self.rh_vertices[index - self.lh_n]\n else:\n raise IndexError(\"SourceSpace Index out of range: %i\" % index)\n\n int_index = index_to_int_array(index, self._n_vert)\n bool_index = np.bincount(int_index, minlength=self._n_vert).astype(bool)\n\n # vertices\n boundaries = np.cumsum(tuple(chain((0,), (len(v) for v in self.vertices))))\n vertices = [v[bool_index[boundaries[i]:boundaries[i + 1]]]\n for i, v in enumerate(self.vertices)]\n\n # parc\n parc = None if self.parc is None else self.parc[index]\n\n return SourceSpace(vertices, self.subject, self.src, self.subjects_dir,\n parc, self._subgraph(int_index), self.name)\n\n def _as_uv(self):\n return Factor(('%s%i' % (hemi, i) for hemi, vertices in\n zip(('L', 'R'), self.vertices) for i in vertices),\n name=self.name)\n\n def _cluster_properties(self, x):\n ds = SourceSpaceBase._cluster_properties(self, x)\n # hemi\n hemis = []\n for x_ in x:\n where = np.flatnonzero(x_)\n src_in_lh = (where < self.lh_n)\n if np.all(src_in_lh):\n hemis.append('lh')\n elif np.any(src_in_lh):\n hemis.append('bh')\n else:\n hemis.append('rh')\n ds['hemi'] = Factor(hemis)\n return ds\n\n def _link_midline(self, maxdist=0.015):\n \"\"\"Link sources in the left and right hemispheres\n\n Link each source to the nearest source in the opposing hemisphere if\n that source is closer than ``maxdist``.\n\n Parameters\n ----------\n maxdist : scalar [m]\n Add an interhemispheric connection between any two vertices whose\n distance is less than this number (in meters; default 0.015).\n \"\"\"\n if self.kind != 'ico':\n raise ValueError(\"Can only link hemispheres in 'ico' source \"\n \"spaces, not in %s\" % repr(self.kind))\n old_con = self.connectivity()\n\n # find vertices to connect\n coords_lh = self.coordinates[:self.lh_n]\n coords_rh = self.coordinates[self.lh_n:]\n dists = cdist(coords_lh, coords_rh)\n close_lh, close_rh = np.nonzero(dists < maxdist)\n unique_close_lh = np.unique(close_lh)\n unique_close_rh = np.unique(close_rh)\n new_con = {(lh, np.argmin(dists[lh]) + self.lh_n) for lh in\n unique_close_lh}\n new_con.update((np.argmin(dists[:, rh]), rh + self.lh_n) for rh in\n unique_close_rh)\n new_con = np.array(sorted(new_con), np.uint32)\n self._connectivity = np.vstack((old_con, new_con))\n\n def _compute_connectivity(self):\n src = self.get_source_space()\n return _mne_tri_soure_space_graph(src, self.vertices)\n\n def _array_index(self, arg):\n if isinstance(arg, MNE_LABEL):\n return self._array_index_label(arg)\n elif isinstance(arg, str):\n if arg == 'lh':\n return slice(self.lh_n)\n elif arg == 'rh':\n if self.rh_n:\n return slice(self.lh_n, None)\n else:\n return slice(0, 0)\n else:\n m = self._vertex_re.match(arg)\n if m is None:\n return self._array_index_label(arg)\n else:\n hemi, vertex = m.groups()\n vertex = int(vertex)\n vertices = self.vertices[hemi == 'R']\n i = int(np.searchsorted(vertices, vertex))\n if vertices[i] == vertex:\n if hemi == 'R':\n return i + self.lh_n\n else:\n return i\n else:\n raise IndexError(\"SourceSpace does not contain vertex \"\n \"%r\" % (arg,))\n return SourceSpaceBase._array_index(self, arg)\n\n def _dim_index(self, index):\n if np.isscalar(index):\n if index >= self.lh_n:\n return 'R%i' % (self.rh_vertices[index - self.lh_n])\n else:\n return 'L%i' % (self.lh_vertices[index])\n else:\n return SourceSpaceBase._dim_index(self, index)\n\n def _label(self, vertices, name, color, subjects_dir=None, sss=None):\n lh_vertices, rh_vertices = vertices\n if sss is None:\n sss = self.get_source_space(subjects_dir)\n\n if len(lh_vertices):\n lh = mne.Label(lh_vertices, hemi='lh', color=color).fill(sss, name + '-lh')\n else:\n lh = None\n\n if len(rh_vertices):\n rh = mne.Label(rh_vertices, hemi='rh', color=color).fill(sss, name + '-rh')\n else:\n rh = None\n\n return lh, rh\n\n def _mask_label(self, subjects_dir=None):\n \"Create a Label that masks the areas not covered in this SourceSpace\"\n sss = self.get_source_space(subjects_dir)\n if self.lh_n:\n lh_verts = np.setdiff1d(sss[0]['vertno'], self.lh_vertices)\n else:\n lh_verts = ()\n\n if self.rh_n:\n rh_verts = np.setdiff1d(sss[1]['vertno'], self.rh_vertices)\n else:\n rh_verts = ()\n\n return self._label((lh_verts, rh_verts), 'mask', (0, 0, 0),\n subjects_dir, sss)\n\n def _mask_ndvar(self, subjects_dir=None):\n if subjects_dir is None:\n subjects_dir = self.subjects_dir\n sss = self.get_source_space(subjects_dir)\n vertices = [sss[0]['vertno'], sss[1]['vertno']]\n data = [np.in1d(vert, self_vert) for vert, self_vert in\n zip(vertices, self.vertices)]\n source = SourceSpace(vertices, self.subject, self.src, subjects_dir,\n self.parc.name, name=self.name)\n return NDVar(np.concatenate(data), (source,))\n\n def surface_coordinates(self, surf='white'):\n \"\"\"Load surface coordinates for any FreeSurfer surface\n\n Parameters\n ----------\n surf : str\n Name of the FreeSurfer surface.\n\n Returns\n -------\n coords : array (n_sources, 3)\n Coordinates for each source contained in the source space.\n \"\"\"\n out = []\n for hemi, vertices in zip(('lh', 'rh'), self.vertices):\n if len(vertices) == 0:\n continue\n path = Path(f'{self.subjects_dir}/{self.subject}/surf/{hemi}.{surf}')\n coords, tris = mne.read_surface(str(path))\n out.append(coords[vertices])\n\n if len(out) == 1:\n return out[0]\n else:\n return np.vstack(out)\n\n\nclass VolumeSourceSpace(SourceSpaceBase):\n \"\"\"MNE volume source space\n\n Parameters\n ----------\n vertices : list of 2 int arrays\n The vertex identities of the dipoles in the source space (left and\n right hemisphere separately).\n subject : str\n The mri-subject name.\n src : str\n The kind of source space used (e.g., 'ico-4'; only ``ico`` is currently\n supported.\n subjects_dir : str\n The path to the subjects_dir (needed to locate the source space\n file).\n parc : None | str\n Add a parcellation to the source space to identify vertex location.\n Only applies to ico source spaces, default is 'aparc'.\n connectivity : 'grid' | 'none' | array of int, (n_edges, 2)\n Connectivity between elements. Set to ``\"none\"`` for no connections or\n ``\"grid\"`` to use adjacency in the sequence of elements as connection.\n Set to :class:`numpy.ndarray` to specify custom connectivity. The array\n should be of shape (n_edges, 2), and each row should specify one\n connection [i, j] with i < j, with rows sorted in ascending order. If\n the array's dtype is uint32, property checks are disabled to improve\n efficiency.\n name : str\n Dimension name (default ``\"source\"``).\n\n See Also\n --------\n SourceSpace : surface-based source space\n \"\"\"\n kind = 'vol'\n\n def __init__(self, vertices, subject=None, src=None, subjects_dir=None,\n parc=None, connectivity='custom', name='source'):\n if isinstance(parc, str):\n raise NotImplementedError(f\"parc={parc!r}: specify parcellation as Factor\")\n if isinstance(vertices, np.ndarray):\n vertices = [vertices]\n SourceSpaceBase.__init__(self, vertices, subject, src, subjects_dir, parc, connectivity, name)\n\n def _init_secondary(self):\n SourceSpaceBase._init_secondary(self)\n if len(self.vertices) != 1:\n raise ValueError(\"A VolumeSourceSpace needs exactly one vertices \"\n \"array\")\n\n def __iter__(self):\n return iter(self.vertices[0])\n\n def __getitem__(self, index):\n if isinstance(index, Integral):\n try:\n return str(self.vertices[0][index])\n except IndexError:\n raise IndexError(\"VolumeSourceSpace Index out of range: %i\" % index)\n else:\n parc = None if self.parc is None else self.parc[index]\n return VolumeSourceSpace(\n [self.vertices[0][index]], self.subject, self.src,\n self.subjects_dir, parc, self._subgraph(index), self.name)\n\n def _as_uv(self):\n return Factor(self.vertices[0], name=self.name)\n\n def _compute_connectivity(self):\n src = self.get_source_space()\n coords = src[0]['rr'][self.vertices[0]]\n dist_threshold = self.grade * 0.0011\n return _point_graph(coords, dist_threshold)\n\n def _distances(self):\n sss = self.get_source_space()\n coords = sss[0]['rr'][self.vertices[0]]\n return squareform(pdist(coords))\n\n def _array_index(self, arg):\n if isinstance(arg, str):\n m = re.match('\\d+$', arg)\n if m:\n return np.searchsorted(self.vertices[0], int(m.groups(1)))\n return SourceSpaceBase._array_index(self, arg)\n\n def _dim_index(self, index):\n if np.isscalar(index):\n return str(self.vertices[0][index])\n else:\n return SourceSpaceBase._dim_index(self, index)\n\n\nclass UTS(Dimension):\n \"\"\"Dimension object for representing uniform time series\n\n Parameters\n ----------\n tmin : float\n First time point (inclusive).\n tstep : float\n Time step between samples.\n nsamples : int\n Number of samples.\n\n Notes\n -----\n Special indexing:\n\n (tstart, tstop) : tuple\n Restrict the time to the indicated window (either end-point can be\n None).\n\n \"\"\"\n _default_connectivity = 'grid'\n unit = 's'\n _tol = 0.000001 # tolerance for deciding if time values are equal\n\n def __init__(self, tmin, tstep, nsamples):\n Dimension.__init__(self, 'time', 'grid')\n self.tmin = float(tmin) # Python float has superior precision\n self.tstep = float(tstep)\n self.nsamples = int(nsamples)\n self._init_secondary()\n\n def _init_secondary(self):\n self.tmax = self.tmin + self.tstep * (self.nsamples - 1)\n self.tstop = self.tmin + self.tstep * self.nsamples\n self._times = None\n self._n_decimals = max(n_decimals(self.tmin), n_decimals(self.tstep))\n\n @property # not a LazyProperty because ithas to change after .set_time()\n def times(self):\n if self._times is None:\n self._times = self.tmin + np.arange(self.nsamples) * self.tstep\n return self._times\n\n @classmethod\n def from_int(cls, first, last, sfreq):\n \"\"\"Create a UTS dimension from sample index and sampling frequency\n\n Parameters\n ----------\n first : int\n Index of the first sample, relative to 0.\n last : int\n Index of the last sample, relative to 0.\n sfreq : scalar\n Sampling frequency, in Hz.\n \"\"\"\n tmin = first / sfreq\n nsamples = last - first + 1\n tstep = 1. / sfreq\n return cls(tmin, tstep, nsamples)\n\n def __getstate__(self):\n out = Dimension.__getstate__(self)\n out.update(tmin=self.tmin, tstep=self.tstep, nsamples=self.nsamples)\n return out\n\n def __setstate__(self, state):\n if 'name' not in state:\n state['name'] = 'time'\n state['connectivity'] = None\n state['connectivity_type'] = 'grid'\n Dimension.__setstate__(self, state)\n self.tmin = state['tmin']\n self.tstep = state['tstep']\n self.nsamples = state['nsamples']\n self._init_secondary()\n\n def __repr__(self):\n return \"UTS(%s, %s, %s)\" % (self.tmin, self.tstep, self.nsamples)\n\n def _as_scalar_array(self):\n return self.times\n\n def _axis_data(self):\n return self.times\n\n def _axis_im_extent(self):\n return self.tmin - 0.5 * self.tstep, self.tmax + 0.5 * self.tstep\n\n def _axis_format(self, scalar, label):\n use_s = max(self.tmax, -self.tmin) >= 10.\n if label is True:\n label = \"Time [%s]\" % ('s' if use_s else 'ms')\n\n if use_s:\n if scalar:\n fmt = FuncFormatter(lambda x, pos: '%.5g' % x)\n else:\n fmt = FuncFormatter(lambda x, pos: '%.5g' % self.times[x])\n elif scalar:\n fmt = FuncFormatter(lambda x, pos: '%i' % round(1e3 * x))\n else:\n fmt = FuncFormatter(lambda x, pos:\n '%i' % round(1e3 * self.times[int(round(x))]))\n return fmt, None, label\n\n def _bin(self, start, stop, step, nbins):\n if nbins is not None:\n raise NotImplementedError(\"nbins for UTS dimension\")\n\n if start is None:\n start = self.tmin\n\n if stop is None:\n stop = self.tstop\n\n n_bins = int(ceil(round((stop - start) / step, 2)))\n edges = [start + n * step for n in range(n_bins)]\n edges.append(stop)\n out_dim = UTS(start + step / 2, step, n_bins)\n return edges, out_dim\n\n def __len__(self):\n return self.nsamples\n\n def __eq__(self, other):\n return (Dimension.__eq__(self, other) and\n self.nsamples == other.nsamples and\n abs(self.tmin - other.tmin) < self._tol and\n abs(self.tstep - other.tstep) < self._tol)\n\n def __contains__(self, index):\n return self.tmin - self.tstep / 2 < index < self.tstop - self.tstep / 2\n\n def __getitem__(self, index):\n if isinstance(index, Integral):\n return self.times[index]\n elif not isinstance(index, slice):\n # convert index to slice\n int_index = index_to_int_array(index, self.nsamples)\n start = int_index[0]\n steps = np.unique(np.diff(int_index))\n if len(steps) > 1:\n raise NotImplementedError(\"non-uniform time series\")\n step = steps[0]\n stop = int_index[-1] + step\n index = slice(start, stop, step)\n\n start = 0 if index.start is None else index.start\n if start < 0:\n start += self.nsamples\n stop = self.nsamples if index.stop is None else index.stop\n if stop < 0:\n stop += self.nsamples\n\n tmin = self.times[start]\n nsamples = stop - start\n if nsamples < 0:\n raise IndexError(\"Time index out of range: %s.\" % repr(index))\n\n if index.step is None or index.step == 1:\n tstep = self.tstep\n else:\n tstep = self.tstep * index.step\n nsamples = int(ceil(nsamples / index.step))\n\n return UTS(tmin, tstep, nsamples)\n\n def _cluster_bounds(self, x):\n \"\"\"Cluster start and stop in samples\n\n Parameters\n ----------\n x : array of bool, (n_clusters, len(self))\n The cluster extents, with different clusters stacked along the\n first axis.\n \"\"\"\n # find indices of cluster extent\n row, col = np.nonzero(x)\n try:\n ts = [col[row == i][[0, -1]] for i in range(len(x))]\n except IndexError:\n raise ValueError(\"Empty cluster\")\n ts = np.array(ts)\n return ts\n\n def _cluster_properties(self, x):\n \"\"\"Find cluster properties for this dimension\n\n Parameters\n ----------\n x : array of bool, (n_clusters, len(self))\n The cluster extents, with different clusters stacked along the\n first axis.\n\n Returns\n -------\n cluster_properties : Dataset\n A dataset with variables describing cluster properties along this\n dimension: \"tstart\", \"tstop\", \"duration\".\n \"\"\"\n ds = Dataset()\n\n # no clusters\n if len(x) == 0:\n ds['tstart'] = Var([])\n ds['tstop'] = Var([])\n ds['duration'] = Var([])\n return ds\n\n # create time values\n bounds = self._cluster_bounds(x)\n tmin = self.times[bounds[:, 0]]\n tmax = self.times[bounds[:, 1]]\n ds['tstart'] = Var(tmin)\n ds['tstop'] = Var(tmax + self.tstep)\n ds['duration'] = ds.eval(\"tstop - tstart\")\n return ds\n\n def _array_index(self, arg):\n if np.isscalar(arg):\n i = int(round((arg - self.tmin) / self.tstep))\n if 0 <= i < self.nsamples:\n return i\n else:\n raise ValueError(\"Time index %s out of range (%s, %s)\"\n % (arg, self.tmin, self.tmax))\n elif isinstance(arg, UTS):\n if self.tmin == arg.tmin:\n start = None\n stop = arg.nsamples\n elif arg.tmin < self.tmin:\n err = (\"The index time dimension starts before the reference \"\n \"time dimension\")\n raise DimensionMismatchError(err)\n else:\n start_float = (arg.tmin - self.tmin) / self.tstep\n start = int(round(start_float))\n if abs(start_float - start) > self._tol:\n err = (\"The index time dimension contains values not \"\n \"contained in the reference time dimension\")\n raise DimensionMismatchError(err)\n stop = start + arg.nsamples\n\n if self.tstep == arg.tstep:\n step = None\n elif self.tstep > arg.tstep:\n err = (\"The index time dimension has a higher sampling rate \"\n \"than the reference time dimension\")\n raise DimensionMismatchError(err)\n else:\n step_float = arg.tstep / self.tstep\n step = int(round(step_float))\n if abs(step_float - step) > self._tol:\n err = (\"The index time dimension contains values not \"\n \"contained in the reference time dimension\")\n raise DimensionMismatchError(err)\n\n if stop == self.nsamples:\n stop = None\n\n return slice(start, stop, step)\n elif isinstance(arg, np.ndarray) and arg.dtype.kind in 'fi':\n return np.array([self._array_index(i) for i in arg])\n else:\n return super(UTS, self)._array_index(arg)\n\n def _array_index_for_slice(self, start, stop=None, step=None):\n \"Create a slice into the time axis\"\n if (start is not None) and (stop is not None) and (start >= stop):\n raise ValueError(\"tstart must be smaller than tstop\")\n\n if start is None:\n start_ = None\n elif start <= self.tmin - self.tstep:\n raise IndexError(\"Time index slice out of range: start=%s\" % start)\n else:\n start_float = (start - self.tmin) / self.tstep\n start_ = int(start_float)\n if start_float - start_ > 0.000001:\n start_ += 1\n\n if stop is None:\n stop_ = None\n elif stop - self.tstop > self._tol:\n raise ValueError(\"Time index slice out of range: stop=%s\" % stop)\n else:\n stop_float = (stop - self.tmin) / self.tstep\n stop_ = int(stop_float)\n if stop_float - stop_ > 0.000001:\n stop_ += 1\n\n if step is None:\n step_ = None\n else:\n step_float = step / self.tstep\n step_ = int(round(step_float))\n if step_ != round(step_float, 4):\n raise ValueError(\"Time index slice step needs to be a multiple \"\n \"of the data time step (%s), got %s\" %\n (self.tstep, step))\n\n return slice(start_, stop_, step_)\n\n def _dim_index(self, arg):\n if isinstance(arg, slice):\n return slice(None if arg.start is None else self._dim_index(arg.start),\n None if arg.stop is None else self._dim_index(arg.stop),\n None if arg.step is None else arg.step * self.tstep)\n elif np.isscalar(arg):\n return round(self.tmin + arg * self.tstep, self._n_decimals)\n else:\n return Dimension._dim_index(self, arg)\n\n def intersect(self, dim, check_dims=True):\n \"\"\"Create a UTS dimension that is the intersection with ``dim``\n\n Parameters\n ----------\n dim : UTS\n Dimension to intersect with.\n check_dims : bool\n Check dimensions for consistency (not applicaple to this subclass).\n\n Returns\n -------\n intersection : UTS\n The intersection with dim (returns itself if dim and self are\n equal)\n \"\"\"\n if self == dim:\n return self\n elif self.tstep != dim.tstep:\n raise NotImplementedError(\"Intersection of UTS with unequal tstep :(\")\n\n tstep = self.tstep\n tmin_diff = abs(self.tmin - dim.tmin) / tstep\n if abs(tmin_diff - round(tmin_diff)) > self._tol:\n raise DimensionMismatchError(\"UTS dimensions have different times\")\n tmin = max(self.tmin, dim.tmin)\n\n tmax = min(self.tmax, dim.tmax)\n nsamples = int(round((tmax - tmin) / tstep)) + 1\n if nsamples <= 0:\n raise DimensionMismatchError(\"UTS dimensions don't overlap\")\n\n return UTS(tmin, tstep, nsamples)\n\n def _union(self, other):\n # sloppy implementation\n if self == other:\n return self\n tstep = min(self.tstep, other.tstep)\n tmin = min(self.tmin, other.tmin)\n n_samples = int(round((max(self.tstop, other.tstop) - tmin) / tstep))\n return UTS(tmin, tstep, n_samples)\n\n\ndef intersect_dims(dims1, dims2, check_dims=True):\n \"\"\"Find the intersection between two multidimensional spaces\n\n Parameters\n ----------\n dims1, dims2 : tuple of dimension objects\n Two spaces involving the same dimensions with overlapping values.\n check_dims : bool\n Check dimensions for consistency (e.g., channel locations in a Sensor\n dimension). Default is ``True``. Set to ``False`` to ignore non-fatal\n mismatches.\n\n Returns\n -------\n dims : tuple of Dimension objects\n Intersection of dims1 and dims2.\n \"\"\"\n return tuple(d1.intersect(d2, check_dims=check_dims) for d1, d2 in zip(dims1, dims2))\n\n\nEVAL_CONTEXT.update(Var=Var, Factor=Factor, extrema=extrema)\n","sub_path":"eelbrain/_data_obj.py","file_name":"_data_obj.py","file_ext":"py","file_size_in_byte":333768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}
+{"seq_id":"616269749","text":"from __future__ import annotations\n\nimport importlib\nimport os\nimport sys\nfrom datetime import datetime, timezone\nfrom functools import lru_cache\nfrom textwrap import dedent as _dedent\n\nimport tomli_w\n\nfrom hatch.config.user import RootConfig\nfrom hatch.env.utils import add_verbosity_flag\nfrom hatch.utils.toml import load_toml_file\n\n\ndef dedent(text):\n return _dedent(text[1:])\n\n\ndef remove_trailing_spaces(text):\n return ''.join(f'{line.rstrip()}\\n' for line in text.splitlines(True))\n\n\ndef extract_requirements(lines):\n for line in lines:\n line = line.rstrip()\n if line and not line.startswith('#'):\n yield line\n\n\ndef get_current_timestamp():\n return datetime.now(timezone.utc).timestamp()\n\n\ndef assert_plugin_installation(subprocess_run, dependencies: list[str], *, verbosity=0):\n command = [\n sys.executable,\n '-u',\n '-m',\n 'pip',\n 'install',\n '--disable-pip-version-check',\n '--no-python-version-warning',\n ]\n add_verbosity_flag(command, verbosity, adjustment=-1)\n command.extend(dependencies)\n\n subprocess_run.assert_called_once_with(command, shell=False)\n\n\ndef assert_files(directory, expected_files, *, check_contents=True):\n start = str(directory)\n expected_relative_files = {str(f.path): f.contents for f in expected_files}\n seen_relative_file_paths = set()\n\n for root, _, files in os.walk(directory):\n relative_path = os.path.relpath(root, start)\n\n # First iteration\n if relative_path == '.':\n relative_path = ''\n\n for file_name in files:\n relative_file_path = os.path.join(relative_path, file_name)\n seen_relative_file_paths.add(relative_file_path)\n\n if check_contents and relative_file_path in expected_relative_files:\n with open(os.path.join(start, relative_file_path), encoding='utf-8') as f:\n assert f.read() == expected_relative_files[relative_file_path], relative_file_path\n else: # no cov\n pass\n\n expected_relative_file_paths = set(expected_relative_files)\n\n missing_files = expected_relative_file_paths - seen_relative_file_paths\n assert not missing_files, f'Missing files: {\", \".join(sorted(missing_files))}'\n\n extra_files = seen_relative_file_paths - expected_relative_file_paths\n assert not extra_files, f'Extra files: {\", \".join(sorted(extra_files))}'\n\n\ndef get_template_files(template_name, project_name, **kwargs):\n kwargs['project_name'] = project_name\n kwargs['project_name_normalized'] = project_name.lower().replace('.', '-')\n kwargs['package_name'] = kwargs['project_name_normalized'].replace('-', '_')\n\n config = RootConfig({})\n kwargs.setdefault('author', config.template.name)\n kwargs.setdefault('email', config.template.email)\n kwargs.setdefault('year', str(datetime.now(timezone.utc).year))\n\n return __load_template_module(template_name)(**kwargs)\n\n\n@lru_cache\ndef __load_template_module(template_name):\n template = importlib.import_module(f'..templates.{template_name}', __name__)\n return template.get_files\n\n\ndef update_project_environment(project, name, config):\n project_file = project.root / 'pyproject.toml'\n raw_config = load_toml_file(str(project_file))\n\n env_config = raw_config.setdefault('tool', {}).setdefault('hatch', {}).setdefault('envs', {}).setdefault(name, {})\n env_config.update(config)\n\n project.config.envs[name] = project.config.envs.get(name, project.config.envs['default']).copy()\n project.config.envs[name].update(env_config)\n\n with open(str(project_file), 'w', encoding='utf-8') as f:\n f.write(tomli_w.dumps(raw_config))\n","sub_path":"tests/helpers/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":3704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}
+{"seq_id":"338141801","text":"from tensorflow.keras.layers import Dense, Activation, Flatten, SimpleRNN\r\nfrom tensorflow.keras.models import Sequential\r\nfrom tensorflow.keras.utils import plot_model\r\nfrom tensorflow.keras.regularizers import l2\r\nimport tensorflow as tf\r\nimport preprocess\r\nfrom tensorflow.keras.callbacks import TensorBoard\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\nimport time\r\nimport os\r\nimport noise\r\n\r\n\r\n#一些基本设定\r\ncheckpoint_flag = int(input(\"是否进行断点续训?是:1;否:0\")) #询问是否使用以往最优的参数进行测试\r\nsnr=0 #目标领域添加高斯白噪音后的信噪比\r\n\r\n\r\n# 训练参数\r\nbatch_size = 512\r\nepochs = 1\r\nnum_classes = 10\r\nlength = 2048\r\nBatchNorm = True # 是否批量归一化\r\nnumber = 1000 # 每类样本的数量\r\nnormal = True # 是否标准化\r\nrate = [0.7,0.2,0.1] # 测试集验证集划分比例\r\n\r\n\r\npath = r'data\\0HP' #训练和源领域测试的数据集路径\r\ntarget_path = r'data\\1HP' #使用马力=n的数据集作为目标测试领域,检验模型泛用性\r\n\r\nx_train, y_train, x_valid, y_valid, x_test, y_test = preprocess.prepro(d_path=path,length=length,\r\n number=number,\r\n normal=normal,\r\n rate=rate,\r\n enc=True, enc_step=28)\r\n#插入新维度,方便卷积网络输入\r\nx_train, x_valid, x_test = x_train[:,:,np.newaxis], x_valid[:,:,np.newaxis], x_test[:,:,np.newaxis]\r\n# 输入数据的维度\r\ninput_shape =x_train.shape[1:]\r\n\r\nprint('训练样本维度:', x_train.shape)\r\nprint(x_train.shape[0], '训练样本个数')\r\nprint('验证样本的维度', x_valid.shape)\r\nprint(x_valid.shape[0], '验证样本个数')\r\nprint('测试样本的维度', x_test.shape)\r\nprint(x_test.shape[0], '测试样本个数')\r\n\r\nmodel_name = \"1-RNN\"\r\n\r\n# 实例化一个Sequential\r\nmodel = Sequential()\r\n#第一层RNN\r\nmodel.add(SimpleRNN(16, activation='tanh', kernel_initializer='glorot_uniform',\r\n recurrent_initializer='orthogonal', bias_initializer='zeros', return_sequences=True))\r\n\r\n#第二层RNN\r\nmodel.add(SimpleRNN(16, activation='tanh', kernel_initializer='glorot_uniform',\r\n recurrent_initializer='orthogonal', bias_initializer='zeros', return_sequences=True))\r\n\r\n#第三层RNN\r\nmodel.add(SimpleRNN(16, activation='tanh', kernel_initializer='glorot_uniform',\r\n recurrent_initializer='orthogonal', bias_initializer='zeros', return_sequences=True))\r\n\r\n#拉直\r\nmodel.add(Flatten())\r\n\r\n# 添加全连接层1\r\nmodel.add(Dense(100))\r\nmodel.add(Activation(\"relu\"))\r\n\r\n# 增加输出层,共num_classes个单元\r\n#model.add(Dense(units=num_classes, activation='softmax', kernel_regularizer=l2(1e-4)))\r\nmodel.add(Dense(units=num_classes, activation='softmax'))\r\n\r\n# 编译模型\r\nmodel.compile(optimizer=tf.keras.optimizers.Adam(lr=0.001), loss='categorical_crossentropy',\r\n metrics=['accuracy'])\r\n\r\nlogdir = os.path.join('.\\logs\\RNN-1_logs')\r\nsummary = tf.keras.callbacks.TensorBoard(log_dir=logdir, histogram_freq=1)\r\n\r\nnow_time = time.time() #记录训练开始时间\r\nif checkpoint_flag :\r\n print('开始断点续训')\r\n checkpoint_save_path = \"./checkpoint/3-RNN.ckpt\"\r\n if os.path.exists(checkpoint_save_path + '.index'):\r\n print('-------------load the model-----------------')\r\n model.load_weights(checkpoint_save_path)\r\n cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_save_path,\r\n save_weights_only=True,\r\n save_best_only=True)\r\n # 开始模型训练\r\n history = model.fit(x=x_train, y=y_train, batch_size=batch_size, epochs=epochs,\r\n verbose=1, validation_data=(x_valid, y_valid), shuffle=True,\r\n callbacks=[cp_callback,summary])\r\nelse :#开始模型训练\r\n print('未进行断点续训')\r\n history = model.fit(x=x_train, y=y_train, batch_size=batch_size, epochs=epochs,\r\n verbose=1, validation_data=(x_valid, y_valid), shuffle=True,\r\n callbacks=[summary])\r\n\r\ntotal_time = time.time() - now_time #记录训练总时间\r\n\r\n# 评估模型\r\nnow_time = time.time() #所有测试集-开始测试时间\r\nscore = model.evaluate(x=x_test, y=y_test, verbose=0)\r\ntest_time = time.time() - now_time #测试总时间\r\nprint(\"测试集上的损失率:\", score[0])\r\nprint(\"测试集上的准确率:\", score[1])\r\n\r\n\r\n#测试当目标领域分布与源分布差异较大时,模型的自适应情况\r\nt_x_train, t_y_train, t_x_valid, t_y_valid, t_x_test, t_y_test = preprocess.prepro(d_path=target_path,length=length,\r\n number=number,\r\n normal=normal,\r\n rate=[0.2,0.3,0.5],\r\n enc=True, enc_step=28) #最后俩个:是否数据增强,数据增强的顺延间隔\r\n\r\nx_test=noise.wgn(t_x_test,snr) #给目标领域信号加入高斯白噪音,其中snr表示信噪比\r\n#插入新维度,方便卷积网络输入\r\nt_x_train, t_x_valid, t_x_test = t_x_train[:,:,np.newaxis], t_x_valid[:,:,np.newaxis], t_x_test[:,:,np.newaxis]\r\n\r\n\r\n#目标领域数据集测试\r\nt_score = model.evaluate(x=t_x_test, y=t_y_test, verbose=0)\r\nprint(\"目标领域的损失率:\", t_score[0])\r\nprint(\"目标领域的准确率:\", t_score[1])\r\n\r\n#打印训练、测试耗时\r\nprint(\"训练总耗时/s:\", total_time) #打印训练总耗时\r\nprint(\"测试总耗时/s:\", test_time/x_test.shape[0]) #打印测试总耗时\r\n\r\n\r\n############################################## show #####################################\r\n# 显示训练集和验证集的acc和loss曲线\r\nacc = history.history['accuracy'] #训练集准确率\r\nval_acc = history.history['val_accuracy'] #测试集准确率\r\nloss = history.history['loss'] #训练集损失函数\r\nval_loss = history.history['val_loss'] #测试集损失函数\r\n\r\n#将行表转换为列数组,便于保存与处理\r\ns_acc=np.array(acc)\r\ns_acc = s_acc.reshape(s_acc.shape[0],1)\r\ns_val_acc=np.array(val_acc)\r\ns_val_acc = s_acc.reshape(s_val_acc.shape[0],1)\r\ns_loss=np.array(loss)\r\ns_loss= s_loss.reshape(s_loss.shape[0],1)\r\ns_val_loss=np.array(val_loss)\r\ns_val_loss= s_val_loss.reshape(s_val_loss.shape[0],1)\r\n\r\nplt.subplot(1, 2, 1)\r\nplt.plot(acc, label='Training Accuracy')\r\nplt.plot(val_acc, label='Validation Accuracy')\r\nplt.title('Training and Validation Accuracy')\r\nplt.legend()\r\n\r\nplt.subplot(1, 2, 2)\r\nplt.plot(loss, label='Training Loss')\r\nplt.plot(val_loss, label='Validation Loss')\r\nplt.title('Training and Validation Loss')\r\nplt.legend()\r\nplt.show()\r\n\r\nfile = open(r'save_txt\\weights.txt', 'w')\r\nfor v in model.trainable_variables:\r\n file.write(str(v.name) + '\\n')\r\n file.write(str(v.shape) + '\\n')\r\n file.write(str(v.numpy()) + '\\n')\r\nfile.close()\r\n\r\n#保存acc,loss数据\r\nnp.savetxt(r'save_txt\\acc.txt',s_acc)\r\nnp.savetxt(r'save_txt\\val_acc.txt',s_val_acc)\r\nnp.savetxt(r'save_txt\\loss.txt',s_loss)\r\nnp.savetxt(r'save_txt\\cal_loss.txt',s_val_loss)\r\n","sub_path":"1-RNN.py","file_name":"1-RNN.py","file_ext":"py","file_size_in_byte":7426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}
+{"seq_id":"99373511","text":"import os\r\nfrom functools import wraps\r\n\r\nfrom flask import redirect\r\nfrom flask import session\r\nfrom flask import url_for\r\n\r\n\r\n\r\ndef getImagePath(userID):\r\n currentPath = os.getcwd()\r\n imagePath = os.path.join(currentPath, \"static\", \"Image\", \"Users\", str(userID))\r\n return imagePath\r\n\r\n\r\ndef formatSex(gender):\r\n if gender == 1:\r\n sex = '女'\r\n elif gender == 0:\r\n sex = '男'\r\n else:\r\n sex = '不明'\r\n return sex\r\n\r\n\r\ndef listUser(users):\r\n ret = []\r\n for user in users:\r\n ret.append(user['userName'])\r\n return ret\r\n\r\n\r\ndef isLogin():\r\n if 'user' in session:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef formatRights(rights):\r\n return \"管理员\" if rights == 1 else \"普通用户\"\r\n\r\n\r\ndef formatRoles(roles):\r\n if roles:\r\n result = \"\"\r\n for role in roles:\r\n result += role + \"、\"\r\n return result\r\n else:\r\n return \"无\"\r\n\r\n\r\ndef loginRequired(f):\r\n @wraps(f)\r\n def decorated_function(*args, **kwargs):\r\n if 'user' not in session:\r\n return redirect(url_for('users.login', returnUrl=\"/\"))\r\n return f(*args, **kwargs)\r\n return decorated_function\r\n","sub_path":"Helper/UserHelper.py","file_name":"UserHelper.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}
+{"seq_id":"257503847","text":"from transformers import XLMRobertaModel\nimport torch.nn as nn\n\nclass XLMRoBERTaNER(nn.Module):\n def __init__(self, num_classes):\n super(XLMRoBERTaNER, self).__init__()\n self.embedding_dim = 768\n self.num_classes = num_classes\n\n self.RoBERTa = XLMRobertaModel.from_pretrained(\"xlm-roberta-base\")\n self.linear = nn.Linear(self.embedding_dim, self.num_classes)\n\n def forward(self, tokens):\n embeddings = self.RoBERTa(tokens)[0]\n predictions = self.linear(embeddings)\n\n return predictions","sub_path":"multilingual_ner/XLM/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}
+{"seq_id":"63155555","text":"# -*- coding: utf-8 -*-\n\nfrom PIL import Image, ImageDraw, ImageFont\nimport aircv as ac\nimport cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n#在图片上写文字\ndef write():\n txt = \"你这个死宅说话\"\n txt2 = \" 还挺搞笑的\"\n font_img = Image.open(\"a2afcc6134a85edfdd475f6845540923df5475d5.jpg\")\n draw = ImageDraw.Draw(font_img)\n ttfront = ImageFont.truetype('./simhei.ttf',20)\n draw.text((40,60),txt, fill=(0,0,0), font=ttfront)\n draw.text((60,130),txt2, fill=(0,0,0), font=ttfront)\n font_img.save(\"./out.jpg\")\n \n#裁剪\ndef imgChuli():\n im = Image.open(\"a2afcc6134a85edfdd475f6845540923df5475d5.jpg\")\n box = (10,10,100,100)\n region = im.crop(box)\n region.save(\"cutting.jpg\")\n\n#图片的拼合\ndef paste():\n img = Image.open(\"a2afcc6134a85edfdd475f6845540923df5475d5.jpg\")\n jgz = Image.open(\"cutting.jpg\")\n img.paste(jgz,(196,139))\n img.save(\"./out.jpg\") \n\n#查找图片在原始图片上的坐标点\n#https://www.cnblogs.com/meitian/p/7417582.html\ndef matchImg(imgsrc,imgobj,confidence=0.5):#imgsrc=原始图像,imgobj=待查找的图片\n imsrc = ac.imread(imgsrc)\n imobj = ac.imread(imgobj)\n \n match_result = ac.find_template(imsrc,imobj,confidence) # {'confidence': 0.5435812473297119, 'rectangle': ((394, 384), (394, 416), (450, 384), (450, 416)), 'result': (422.0, 400.0)}\n if match_result is not None:\n match_result['shape']=(imsrc.shape[1],imsrc.shape[0])#0为高,1为宽\n\n return match_result\npaste()\nim1 = \"1f77a900213fb80e4ac8c09c3ad12f2eb83894fb.jpg\"\nim2 = \"cutting.jpg\"\nresult = matchImg(im1,im2)\nprint(result)\n\n#print( ac.find_all_sift(im1, im2, 2))\n \n#https://blog.csdn.net/firemicrocosm/article/details/48374979\n#Python+OpenCV学习(7)---模板匹配 \ndef test(): \n img = cv2.imread(\"a2afcc6134a85edfdd475f6845540923df5475d5.jpg\",0)\n img2 = img.copy()\n template = cv2.imread(\"cutting.png\",0)\n w,h = template.shape[::-1]\n \n # 6 中匹配效果对比算法\n methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR',\n 'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']\n \n for meth in methods:\n img = img2.copy()\n \n method = eval(meth)\n \n res = cv2.matchTemplate(img,template,method)\n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)\n \n if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:\n top_left = min_loc\n else:\n top_left = max_loc\n bottom_right = (top_left[0] + w, top_left[1] + h)\n \n cv2.rectangle(img,top_left, bottom_right, 255, 2)\n \n print (meth)\n plt.subplot(221), plt.imshow(img2,cmap= \"gray\")\n plt.title('Original Image'), plt.xticks([]),plt.yticks([])\n plt.subplot(222), plt.imshow(template,cmap= \"gray\")\n plt.title('template Image'),plt.xticks([]),plt.yticks([])\n plt.subplot(223), plt.imshow(res,cmap= \"gray\")\n plt.title('Matching Result'), plt.xticks([]),plt.yticks([])\n plt.subplot(224), plt.imshow(img,cmap= \"gray\")\n plt.title('Detected Point'),plt.xticks([]),plt.yticks([])\n plt.show()\n \n","sub_path":"image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}
+{"seq_id":"352106553","text":"name = []\ndata = \"\"\n\nwhile data != 'q':\n print('Please enter your name : ')\n data = input()\n name.append(data)\nname.remove(data)\n\nprint('Please enter any one letter')\nlettersearch=input()\n\nwhile len(lettersearch) != 1:\n print('Please enter correct search')\n print('Please enter any one letter Again')\n lettersearch = input()\n\ncount = 0\nfor v in range(len(name)):\n for i in range(len(name[v])):\n if lettersearch.lower() in name[v][i] or lettersearch.upper() in name[v][i]:\n count += 1\nprint(lettersearch,'appeared for', count, 'times')\n","sub_path":"Count_Letter_in_Listnames.py","file_name":"Count_Letter_in_Listnames.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}
+{"seq_id":"454686469","text":"'''\nEscreva os textos a seguir capitalizados, exceto as preposições 'da', 'de', 'di', 'do', 'du', 'para'.\n\n'joaquim josé da silva xavier'\n'pedro de souza'\n'fui para são paulo'\n'''\n\nimport unittest\n\ntext1 = 'joaquim josé da silva xavier'\ntext2 = 'pedro de souza'\ntext3 = 'fui para são paulo'\n\n\ndef upper(text):\n preprositions = ['da', 'de', 'di', 'do', 'du', 'para']\n new_text = text.split(' ')\n final_text = ''\n for word in new_text:\n if word not in preprositions:\n word = word.capitalize()\n final_text = final_text + word + ' '\n else:\n final_text = final_text + word + ' '\n return final_text.strip()\nprint(upper(text1))\nprint(upper(text2))\nprint(upper(text3))\n\n\nclass CapitalizeTest(unittest.TestCase):\n\n def test_joaquim(self):\n self.assertEqual(upper(text1), 'Joaquim José da Silva Xavier')\n\n def test_pedro(self):\n self.assertEqual(upper(text2), 'Pedro de Souza')\n\n def test_sao_paulo(self):\n self.assertEqual(upper(text3), 'Fui para São Paulo')\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"exercises/ex02.py","file_name":"ex02.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}
+{"seq_id":"170237485","text":"from django.conf import settings\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.template.loader import render_to_string\n\n\ndef send_email_confirm(to: str, url_email_confirm: str):\n \"\"\"\n Отправка ссылки для подтверждения емейл адреса\n\n :param to: Адрес электронной почты получателя\n :param url_email_confirm: url, который будет отправлен на почту, для ее подтверждения\n \"\"\"\n\n context_data = {\n 'url_email_confirm': url_email_confirm,\n 'confirm_text': 'Подтвердить электронную почту'\n }\n\n body_message = render_to_string('email/send_confirm_email.html', context_data)\n\n subject = 'Подтверждение email'\n from_email = settings.EMAIL_HOST_USER\n recipient_list = [to]\n\n msg = EmailMultiAlternatives(subject, body_message, from_email, recipient_list)\n msg.attach_alternative(body_message, 'text/html')\n return msg.send()\n","sub_path":"src/blog_backend/notifications/email/send.py","file_name":"send.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}
+{"seq_id":"289936860","text":"import urllib\nimport psycopg2\n\nconn = psycopg2.connect(\n database='d3v993qatlo7q6',\n user='wchwxpeyviwuks',\n password='NmYj0QqRCGKYG-V4PsqwiGFmLq',\n host='ec2-54-235-65-139.compute-1.amazonaws.com',\n port=5432\n)\n\nticker_symbols = raw_input(\"Enter Ticker Symbols separated by commas: \")\n\nurl = \"http://finance.yahoo.com/d/quotes.csv?s=%s&f=sd1t1aba5b6\" % ticker_symbols.upper()\nf = urllib.urlopen(url)\nlines = f.readlines()\n\ncur = conn.cursor()\n\nfor line in lines:\n q = line.split(',')\n data = (q[0].replace('\"', ''), q[1] + ' ' + q[2], q[3], q[4], q[5], q[6])\n sql = \"\"\"INSERT INTO stock_quotes (\n ticker, ts, bid, ask, bid_size, ask_size)\n VALUES (\n %s, timestamp %s, %s, %s, %s, %s)\"\"\"\n cur.execute(sql, data)\n conn.commit()\n\ncur.close()\nconn.close()\n","sub_path":"get_quotes.py","file_name":"get_quotes.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}
+{"seq_id":"42859879","text":"import os\nimport playsound\nimport speech_recognition as sr\nfrom gtts import gTTS\n\n\ndef speak(text):\n tts = gTTS(text=text, lang=\"en\")\n filename = \"voice.mp3\"\n tts.save(filename)\n playsound.playsound(filename)\n\n\nended = False\n\nwhile not ended:\n print(\"Type: \")\n text = input()\n text = text.lower()\n if text == \"end\":\n ended = True\n elif text != \"\":\n speak(text)\n os.remove('voice.mp3')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}
+{"seq_id":"459645222","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.15-x86_64/egg/easyweb3/easyweb3.py\n# Compiled at: 2020-01-18 08:10:27\n# Size of source mod 2**32: 10602 bytes\nfrom web3 import Web3, HTTPProvider\nfrom web3.middleware import geth_poa_middleware\nfrom eth_account.messages import defunct_hash_message\nimport json, time, logging\nfrom hexbytes import HexBytes\nfrom easysolc import Solc\nfrom math import ceil\n\nclass EasyWeb3:\n DEFAULT_GAS = int(4000000.0)\n WAIT_LOOP_SECONDS = 0.1\n WAIT_LOG_LOOP_SECONDS = 10\n DEFAULT_CONNECTION_TIMEOUT = 10\n\n @classmethod\n def init_class(cls):\n cls.web3 = Web3()\n\n @staticmethod\n def read(contract, method, parameters=None):\n if parameters is None:\n parameters = []\n return (getattr(contract.functions, method))(*parameters).call()\n\n @staticmethod\n def get_rsv_from_signature(signature):\n if signature[0] == '0':\n if signature[1] == 'x':\n signature = signature[2:]\n r = signature[:64]\n s = signature[64:128]\n v = signature[128:]\n return (r, s, int(v, 16))\n\n @classmethod\n def recover_address(cls, text, signature):\n if not hasattr(cls, 'eth'):\n cls.eth = Web3().eth\n prefixed_hash = defunct_hash_message(text=text)\n return cls.eth.account.recoverHash(prefixed_hash, signature=signature)\n\n @staticmethod\n def keccak256(item):\n return Web3.sha3(text=(str(item))).hex()[2:]\n\n @staticmethod\n def hash(item):\n return EasyWeb3.keccak256(item)\n\n def __init__(self, account=None, password='', http_provider=None, http_providers=None, http_providers_file=None, proof_of_authority=False, timeout=None):\n self.proof_of_authority = proof_of_authority\n self.http_providers = None\n self.web3 = None\n self.account = None\n if timeout is None:\n self.timeout = EasyWeb3.DEFAULT_CONNECTION_TIMEOUT\n else:\n self.timeout = timeout\n if http_providers or http_providers_file:\n self.http_provider_index = -1\n if http_providers:\n if type(http_providers) == str:\n http_providers = http_providers.replace(' ', '').split(',')\n else:\n if type(http_providers) != list:\n raise ValueError\n self.http_providers = http_providers\n else:\n if http_providers_file:\n self.set_http_providers_from_file(http_providers_file)\n else:\n raise ValueError\n self.next_http_provider()\n else:\n if http_provider:\n self.set_http_provider(http_provider)\n else:\n self.web3 = Web3()\n self.eth = self.web3.eth\n if account:\n if isinstance(account, dict):\n self.set_account_from_dict(account, password)\n else:\n self.set_account_from_file(account, password)\n logging.info(f\"loaded account: {self.account.address}\")\n\n def set_http_provider(self, http_provider):\n self.web3 = Web3(HTTPProvider(http_provider, request_kwargs={'timeout': self.timeout}))\n if self.proof_of_authority:\n self.web3.middleware_stack.inject(geth_poa_middleware, layer=0)\n logging.info(f\"trying to connect to {http_provider}\")\n if not self.web3.isConnected():\n raise ConnectionError\n\n def set_account_from_dict(self, keystore, password):\n private_key = self.eth.account.decrypt(keystore, password)\n self.account = self.eth.account.privateKeyToAccount(private_key)\n\n def set_account_from_file(self, filename, password):\n try:\n with open(filename, 'r') as (keystore_file):\n self.set_account_from_dict(next(keystore_file), password)\n except FileNotFoundError:\n logging.exception('')\n\n def set_http_providers_from_file(self, http_providers_file):\n if not http_providers_file:\n raise ValueError\n with open(http_providers_file, 'r') as (json_file):\n self.http_providers = json.load(json_file)['nodes']\n\n def next_http_provider(self):\n self.http_provider_index = (self.http_provider_index + 1) % len(self.http_providers)\n http_provider = self.http_providers[self.http_provider_index]\n try:\n self.set_http_provider(http_provider)\n except Exception:\n self.next_http_provider()\n\n def get_tx(self, to, value=0, data=None, nonce=None, gas=None, gas_price=None, gas_price_multiplier=1.0, pending=True):\n if nonce is None:\n nonce = self._get_nonce(pending)\n tx_dict = {'from':self.account.address, 'to':to, 'nonce':nonce, 'value':value}\n if data is not None:\n tx_dict.update({'data': data})\n self._update_tx_dict_gas_params(tx_dict, gas, gas_price, gas_price_multiplier)\n return tx_dict\n\n def get_contract_tx(self, contract, method='constructor', parameters=None, nonce=None, gas=None, gas_price=None, gas_price_multiplier=1.0, pending=True):\n if parameters is None:\n parameters = []\n elif method == 'constructor':\n invocation = (contract.constructor)(*parameters)\n else:\n invocation = (getattr(contract.functions, method))(*parameters)\n if nonce is None:\n nonce = self._get_nonce(pending)\n tx_dict = invocation.buildTransaction({'from':self.account.address, 'nonce':nonce, 'gas':0, 'gasPrice':0})\n self._update_tx_dict_gas_params(tx_dict, gas, gas_price, gas_price_multiplier)\n return tx_dict\n\n def sign_tx(self, tx):\n return self.account.signTransaction(tx)\n\n def transact(self, tx=None, signed_tx=None, asynchronous=False):\n if tx is None:\n if signed_tx is None or tx is not None:\n if signed_tx is not None:\n raise AttributeError\n else:\n if tx is not None:\n signed_tx = self.sign_tx(tx)\n if type(signed_tx) is not HexBytes:\n raw_tx = signed_tx.rawTransaction\n else:\n raw_tx = signed_tx\n tx_hash = self.eth.sendRawTransaction(raw_tx)\n if asynchronous:\n return {'transactionHash': tx_hash}\n receipt = None\n attempts = 0\n while not receipt:\n elapsed_seconds = attempts * EasyWeb3.WAIT_LOOP_SECONDS\n if elapsed_seconds % EasyWeb3.WAIT_LOG_LOOP_SECONDS == 0:\n logging.info(f\"waiting to be included in a block ({int(elapsed_seconds)} elapsed seconds)\")\n receipt = self.eth.getTransactionReceipt(tx_hash)\n attempts += 1\n time.sleep(EasyWeb3.WAIT_LOOP_SECONDS)\n\n logging.info(f\"transaction {tx_hash.hex()} included in block #{receipt['blockNumber']}\")\n return receipt\n\n def write(self, *args, **kwargs):\n return (self._build_tx_and_transact)(*args, **kwargs)\n\n def deploy(self, *args, **kwargs):\n kwargs['method'] = 'constructor'\n return (self._build_tx_and_transact)(*args, **kwargs)\n\n def get_contract(self, contract_dict=None, source=None, contract_name=None, address=None, abi_file=None, bytecode_file=None):\n contract = None\n if source:\n if contract_name:\n if not hasattr(self, 'solc'):\n self.solc = Solc()\n contract_dict = self.solc.compile(source=source)[contract_name]\n if contract_dict:\n contract = self.eth.contract(abi=(contract_dict['abi']), bytecode=(contract_dict['bytecode']), address=address)\n else:\n if abi_file:\n with open(abi_file, 'r') as (abi_file):\n abi = json.loads(abi_file.read())\n if address:\n contract = self.eth.contract(abi=abi, address=address)\n else:\n if bytecode_file:\n bytecode = None\n if bytecode_file:\n with open(bytecode_file, 'r') as (bytecode_file):\n bytecode = bytecode_file.read()\n contract = self.eth.contract(abi=abi, bytecode=bytecode)\n else:\n raise ValueError('The bytecode or the address must be provided')\n return contract\n\n def sign(self, text):\n prefixed_hash = defunct_hash_message(text=text)\n signature = self.account.signHash(prefixed_hash)['signature'].hex()[2:]\n return signature\n\n def _get_nonce(self, pending=True):\n if pending:\n return self.eth.getTransactionCount(self.account.address, 'pending')\n return self.eth.getTransactionCount(self.account.address)\n\n def _get_gas_price(self, gas_price, multiplier):\n if gas_price is None:\n gas_price = self.eth.gasPrice\n return ceil(multiplier * gas_price)\n\n def _get_gas_limit(self, tx_dict, gas=None):\n if gas is None:\n try:\n gas = self.eth.estimateGas(tx_dict)\n except Exception:\n gas = int(EasyWeb3.DEFAULT_GAS)\n logging.warn('failed to estimate gas, using default.')\n\n else:\n gas = int(gas)\n if gas >= self.eth.getBlock('latest').gasLimit or gas == 0:\n raise ValueError(f\"gas limit not valid: {gas}\")\n return gas\n\n def _update_tx_dict_gas_params(self, tx_dict, gas, gas_price, gas_price_multiplier):\n gas = self._get_gas_limit(tx_dict, gas=gas)\n logging.info(f\"gas limit: {gas:,}\")\n tx_dict.update({'gas': gas})\n gas_price = self._get_gas_price(gas_price, gas_price_multiplier)\n tx_dict.update({'gasPrice': gas_price})\n logging.info(f\"network gas price: {self.web3.fromWei(self.eth.gasPrice, 'gwei')} Gwei; using {self.web3.fromWei(gas_price, 'gwei')} Gwei (x{gas_price_multiplier})\")\n\n def _build_tx_and_transact(self, *args, **kwargs):\n tx = (self.get_contract_tx)(*args, **kwargs)\n asynchronous = False\n if 'asynchronous' in kwargs:\n asynchronous = kwargs['asynchronous']\n return self.transact(tx=tx, asynchronous=asynchronous)\n\n\nEasyWeb3.init_class()","sub_path":"pycfiles/easyweb3-0.3.0-py3.7/easyweb3.cpython-37.py","file_name":"easyweb3.cpython-37.py","file_ext":"py","file_size_in_byte":10499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}
+{"seq_id":"803767","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Apr 28 17:16:22 2018\r\n\r\n@author: lsjws2008\r\n\"\"\"\r\n\r\nfrom tools.NMS import com_loc\r\nfrom tools.PAFs import coms\r\nimport numpy as np\r\n\r\ndef nms(cmap, score_threshold=0.65, score_rate_threshold=0.65):\r\n\r\n points = []\r\n cmap[cmap< score_threshold] = 0\r\n for i in range(cmap.shape[2]):\r\n cm = cmap[:, :, i]\r\n cm[cm < np.amax(cm)*score_rate_threshold] = 0\r\n point = com_loc(cm)\r\n points.append(point)\r\n\r\n return points","sub_path":"realtime_multi_person_coco_without_save/tools/paf_nms.py","file_name":"paf_nms.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}
+{"seq_id":"473183729","text":"import numpy as np\r\nimport random\r\nimport os\r\n\r\nclass Adeline(object):\r\n def __init__(self,alfa,treshold,size):\r\n # self.arrData = arrData\r\n self.alfa = alfa\r\n self.treshold = treshold\r\n self.bobot = [random.random() for _ in range(0,size)]\r\n self.bias = random.random()\r\n # self.target = target\r\n\r\n def bacaFile(self, directory):\r\n # directoryTraining = \"E:/JST/train\"\r\n # directoryTraining = \"E:/JST/datacontoh\"\r\n iterasi = 0\r\n # arrbobot = []\r\n isiFile = []\r\n self.huruf = []\r\n for file in os.listdir(directory): #Perulangan untuk mencari setiap file di direktori\r\n iterasi += 1\r\n if file.lower().endswith('.txt') and not file.startswith('._'): #Jika file berupa .txt maka dijalankan\r\n Openfile = open(directory + '/' + file, 'r', encoding=\"ISO-8859-1\") #membuka file\r\n isiFile.append(Openfile.read()) #membaca semua isi file\r\n Openfile.close() #menutup file agak terhapus dari memori\r\n # print (IsiFile)\r\n self.huruf.append(file[0:1])\r\n return isiFile\r\n \r\n def membuatArray(self, dataTxt):\r\n arr = []\r\n for i in range(len(dataTxt)) :\r\n dataTxt[i]=dataTxt[i].replace(\"#\",\"1,\").replace(\".\",\"-1,\").replace(\"\\n\",'')\r\n arr.append(np.fromstring(dataTxt[i][:],dtype=int,sep=','))\r\n return arr\r\n \r\n def getTarget(self):\r\n target = []\r\n for i in range(len(self.huruf)):\r\n # print(str(len(self.huruf)))\r\n # print(\"coba \" + str(i))\r\n if (self.huruf[i] == \"x\"):\r\n # print(\"coba2 \" + str(self.huruf[i]))\r\n target.append(1)\r\n # target = -1\r\n else :\r\n target.append(-1) \r\n # target = 1\r\n return target\r\n\r\n def hitungOutput(self,arrData):\r\n # print (\"arr Data\" + str(arrData))\r\n # print (\"bobot\" + str(self.bobot))\r\n # print (\"bias\" + str(self.bias))\r\n self.y = np.matmul(arrData, self.bobot)+self.bias\r\n # print (np.matmul(arrData, self.bobot))\r\n return self.y\r\n \r\n def updateBobot(self,arrData,target):\r\n # print(\"bobotLama = \"+ str(self.bobot))\r\n bobotBaru = self.bobot + self.alfa * (target-self.y) * arrData\r\n biasBaru = self.bias + self.alfa * (target-self.y)\r\n return (bobotBaru,biasBaru)\r\n\r\n def fungsiyTest(self,v):\r\n if(v>=0):\r\n return 1\r\n else:\r\n return -1\r\n\r\n def testing(self,arrDataTesting):\r\n print(\"TESTING\")\r\n print(\"bobot = \"+str(self.bobot))\r\n print(\"bias = \"+str(self.bias))\r\n target = self.getTarget()\r\n true = 0\r\n false = 0\r\n print (\"-------------------------------\")\r\n for i in range (len(arrDataTesting)) :\r\n print(\"Hasil Numeric Data Latih Huruf \" + self.huruf[i] + \" : \")\r\n print(\"data : \"+ str(arrDataTesting[i]))\r\n print(\"target : \" + str(target[i]))\r\n v = self.hitungOutput(arrDataTesting[i])\r\n print(\"v = \" + str(v))\r\n yTest = self.fungsiyTest(v)\r\n print(\"yTest = \" + str(yTest))\r\n print(\"Apakah y=target? \")\r\n if(target[i]==yTest):\r\n print(\"Sesuai\")\r\n true +=1\r\n\r\n else:\r\n print(\"Tidak Sesuai\")\r\n false +=1\r\n # persamaan = self.menyamakan(yTest,arrDataTesting)\r\n # print(str(persamaan))\r\n print (\"-------------------------------\")\r\n print (\"-------------------------------\")\r\n return(true,false)\r\n\r\n def akurasi(self,true,false):\r\n akurasi = (true/(true+false))*100\r\n return \"hasil akurasi : \"+str(akurasi)+\" %\"\r\n\r\n\r\n def training(self, arrData):\r\n maxBobot = 1\r\n epoch = 0\r\n # while maxBobot > self.treshold and epoch <= 1 :\r\n while maxBobot > self.treshold :\r\n epoch += 1\r\n # directoryTraining = \"E:/JST/train\"\r\n # directoryTraining = \"E:/JST/datacontoh\"\r\n iterasi = 0\r\n arrbobot = []\r\n arrbobot.append(self.bobot)\r\n target = self.getTarget()\r\n # print(str(target))\r\n print(\"alfa = \" + str(self.alfa))\r\n print(\"treshold = \" + str(self.treshold))\r\n for i in range (len(arrData)) :\r\n # print (str(len(dataTxt)))\r\n # print(str(len(target)))\r\n print(\"Hasil Numeric Data Latih Huruf \" + self.huruf[i] + \" : \")\r\n print(\"data : \"+ str(arrData[i]))\r\n print(\"target : \" + str(target[i]))\r\n # if (iterasi == 1 & epoch = 1):\r\n # alfa, treshold, bobot, bias = inisialisasi(x)\r\n # arrbobot.append(bobot)\r\n # if (epoch > 1):\r\n # bobot\r\n # print(\"target = \" + str(target))\r\n # print(\"alfa = \" + str(alfa))\r\n print(\"bobot = \" + str(self.bobot)) \r\n print(\"bias = \" + str(self.bias))\r\n y=self.hitungOutput(arrData[i])\r\n print(\"y = \" + str(y))\r\n self.bobot,self.bias = self.updateBobot(arrData[i],target[i])\r\n arrbobot.append(self.bobot)\r\n print(\"bobot = \" + str(self.bobot))\r\n print(\"bias = \" + str(self.bias))\r\n print (\"-------------------------------\")\r\n print (\"-------------------------------\")\r\n # print (str(arrbobot))\r\n\r\n deltabobot = []\r\n \r\n for i in range(len(arrbobot)-1):\r\n # print(str(arrbobot[i]))\r\n arr = np.subtract(arrbobot[i+1],arrbobot[i])\r\n deltabobot.append(abs(arr[0]))\r\n print(str(deltabobot))\r\n maxBobot = np.amax(deltabobot)\r\n print('Max bobot epoch ke '+ str(epoch) + ' : ', maxBobot)\r\n # print(str(maxBobot > self.treshold))\r\n # print(str(epoch <= 10))\r\n\r\nclass Main():\r\n alfa = 0.1\r\n treshold = 0.2\r\n directoryTraining = \"E:/JST/train\"\r\n # file = open(\"E:/JST/o.txt\", 'r')\r\n# f = file.read()\r\n# file.close()\r\n# Adeline.bacaFile(directoryTraining)\r\n\r\n# x = membuatArray(f)\r\n# print(\"Hasil Numeric Data Latih Huruf O: \")\r\n# print(x)\r\n adln = Adeline(alfa,treshold,25)\r\n dataTxt = adln.bacaFile(directoryTraining)\r\n arrData = adln.membuatArray(dataTxt)\r\n adln.training(arrData)\r\n\r\n\r\n directoryTesting = \"E:/JST/test\"\r\n dataTesting = adln.bacaFile(directoryTesting)\r\n arrDataTesting =adln.membuatArray(dataTesting)\r\n true,false = adln.testing(arrDataTesting)\r\n print(adln.akurasi(true,false))\r\n\r\n # for i in range(len(dataTxt)):\r\n \r\n # adeline1.__init__(alfa,treshold)","sub_path":"coba.py","file_name":"coba.py","file_ext":"py","file_size_in_byte":6828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"}