content
stringlengths
7
2.61M
""" byceps.blueprints.site.ticketing.views ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :Copyright: 2006-2021 <NAME> :License: Revised BSD (see `LICENSE` file for details) """ from flask import abort, g, request from flask_babel import gettext from ....services.orga_team import service as orga_team_service from ....services.party import service as party_service from ....services.ticketing import ( barcode_service, category_service as ticket_category_service, ticket_service, ticket_seat_management_service, ticket_user_management_service, ) from ....util.framework.blueprint import create_blueprint from ....util.framework.flash import flash_error, flash_success from ....util.iterables import find from ....util.framework.templating import templated from ....util.views import login_required, redirect_to, respond_no_content from .forms import SpecifyUserForm from . import notification_service blueprint = create_blueprint('ticketing', __name__) @blueprint.get('/mine') @login_required @templated def index_mine(): """List tickets related to the current user.""" if g.party_id is None: # No party is configured for the current site. abort(404) party = party_service.get_party(g.party_id) user = g.user tickets = ticket_service.find_tickets_related_to_user_for_party( user.id, party.id ) tickets = [ticket for ticket in tickets if not ticket.revoked] ticket_user_ids = {ticket.used_by_id for ticket in tickets} orga_ids = orga_team_service.select_orgas_for_party( ticket_user_ids, g.party_id ) current_user_uses_any_ticket = find( tickets, lambda t: t.used_by_id == user.id ) return { 'party_title': party.title, 'tickets': tickets, 'orga_ids': orga_ids, 'current_user_uses_any_ticket': current_user_uses_any_ticket, 'is_user_allowed_to_print_ticket': _is_user_allowed_to_print_ticket, 'ticket_management_enabled': _is_ticket_management_enabled(), } @blueprint.get('/tickets/<uuid:ticket_id>/printable.html') @login_required @templated def view_printable_html(ticket_id): """Show a form to select a user to appoint for the ticket.""" ticket = _get_ticket_or_404(ticket_id) if not _is_user_allowed_to_print_ticket(ticket, g.user.id): # Hide ticket ID validity rather than openly denying access. abort(404) ticket_category = ticket_category_service.get_category(ticket.category_id) party = party_service.get_party(ticket_category.party_id) barcode_svg = barcode_service.render_svg(ticket.code) # Encode SVG to be used inline as part of a data URI. # Replacements are not complete, but sufficient for this case. # # See https://codepen.io/tigt/post/optimizing-svgs-in-data-uris # for details. barcode_svg_inline = barcode_svg \ .replace('\n', '%0A') \ .replace('#', '%23') \ .replace('<', '%3C') \ .replace('>', '%3E') \ .replace('"', '\'') return { 'party_title': party.title, 'ticket_code': ticket.code, 'ticket_category_title': ticket_category.title, 'ticket_owner': ticket.owned_by, 'ticket_user': ticket.used_by, 'occupied_seat': ticket.occupied_seat, 'barcode_svg_inline': barcode_svg_inline, } # -------------------------------------------------------------------- # # user @blueprint.get('/tickets/<uuid:ticket_id>/appoint_user') @login_required @templated def appoint_user_form(ticket_id, erroneous_form=None): """Show a form to select a user to appoint for the ticket.""" _abort_if_ticket_management_disabled() ticket = _get_ticket_or_404(ticket_id) _abort_if_ticket_user_checked_in(ticket) manager = g.user if not ticket.is_user_managed_by(manager.id): abort(403) form = erroneous_form if erroneous_form else SpecifyUserForm() return { 'ticket': ticket, 'form': form, } @blueprint.post('/tickets/<uuid:ticket_id>/user') def appoint_user(ticket_id): """Appoint a user for the ticket.""" _abort_if_ticket_management_disabled() ticket = _get_ticket_or_404(ticket_id) _abort_if_ticket_user_checked_in(ticket) form = SpecifyUserForm(request.form) if not form.validate(): return appoint_user_form(ticket_id, form) manager = g.user if not ticket.is_user_managed_by(manager.id): abort(403) previous_user = ticket.used_by if ticket.used_by_id != g.user.id else None new_user = form.user.data ticket_user_management_service.appoint_user( ticket.id, new_user.id, manager.id ) flash_success( gettext( '%(screen_name)s has been assigned as user of ticket %(ticket_code)s.', screen_name=new_user.screen_name, ticket_code=ticket.code, ) ) if previous_user: notification_service.notify_withdrawn_user( ticket, previous_user, manager ) notification_service.notify_appointed_user(ticket, new_user, manager) return redirect_to('.index_mine') @blueprint.delete('/tickets/<uuid:ticket_id>/user') @respond_no_content def withdraw_user(ticket_id): """Withdraw the ticket's user and appoint its owner instead.""" _abort_if_ticket_management_disabled() ticket = _get_ticket_or_404(ticket_id) _abort_if_ticket_user_checked_in(ticket) manager = g.user if not ticket.is_user_managed_by(manager.id): abort(403) previous_user = ticket.used_by if ticket.used_by_id != g.user.id else None ticket_user_management_service.appoint_user( ticket.id, manager.id, manager.id ) flash_success( gettext( 'You have been assigned as user of ticket %(ticket_code)s.', ticket_code=ticket.code, ) ) if previous_user: notification_service.notify_withdrawn_user( ticket, previous_user, manager ) # -------------------------------------------------------------------- # # user manager @blueprint.get('/tickets/<uuid:ticket_id>/appoint_user_manager') @login_required @templated def appoint_user_manager_form(ticket_id, erroneous_form=None): """Show a form to select a user to appoint as user manager for the ticket.""" _abort_if_ticket_management_disabled() ticket = _get_ticket_or_404(ticket_id) _abort_if_ticket_user_checked_in(ticket) manager = g.user if not ticket.is_owned_by(manager.id): abort(403) form = erroneous_form if erroneous_form else SpecifyUserForm() return { 'ticket': ticket, 'form': form, } @blueprint.post('/tickets/<uuid:ticket_id>/user_manager') def appoint_user_manager(ticket_id): """Appoint a user manager for the ticket.""" _abort_if_ticket_management_disabled() ticket = _get_ticket_or_404(ticket_id) _abort_if_ticket_user_checked_in(ticket) form = SpecifyUserForm(request.form) if not form.validate(): return appoint_user_manager_form(ticket_id, form) manager = g.user if not ticket.is_owned_by(manager.id): abort(403) user = form.user.data ticket_user_management_service.appoint_user_manager( ticket.id, user.id, manager.id ) flash_success( gettext( '%(screen_name)s has been assigned as user manager ' 'of ticket %(ticket_code)s.', screen_name=user.screen_name, ticket_code=ticket.code, ) ) notification_service.notify_appointed_user_manager(ticket, user, manager) return redirect_to('.index_mine') @blueprint.delete('/tickets/<uuid:ticket_id>/user_manager') @respond_no_content def withdraw_user_manager(ticket_id): """Withdraw the ticket's user manager.""" _abort_if_ticket_management_disabled() ticket = _get_ticket_or_404(ticket_id) _abort_if_ticket_user_checked_in(ticket) manager = g.user if not ticket.is_owned_by(manager.id): abort(403) user = ticket.user_managed_by ticket_user_management_service.withdraw_user_manager(ticket.id, manager.id) flash_success( gettext( 'User manager of ticket %(ticket_code)s has been removed.', ticket_code=ticket.code, ) ) notification_service.notify_withdrawn_user_manager(ticket, user, manager) # -------------------------------------------------------------------- # # seat manager @blueprint.get('/tickets/<uuid:ticket_id>/appoint_seat_manager') @login_required @templated def appoint_seat_manager_form(ticket_id, erroneous_form=None): """Show a form to select a user to appoint as seat manager for the ticket.""" _abort_if_ticket_management_disabled() ticket = _get_ticket_or_404(ticket_id) manager = g.user if not ticket.is_owned_by(manager.id): abort(403) form = erroneous_form if erroneous_form else SpecifyUserForm() return { 'ticket': ticket, 'form': form, } @blueprint.post('/tickets/<uuid:ticket_id>/seat_manager') def appoint_seat_manager(ticket_id): """Appoint a seat manager for the ticket.""" _abort_if_ticket_management_disabled() form = SpecifyUserForm(request.form) if not form.validate(): return appoint_seat_manager_form(ticket_id, form) ticket = _get_ticket_or_404(ticket_id) manager = g.user if not ticket.is_owned_by(manager.id): abort(403) user = form.user.data ticket_seat_management_service.appoint_seat_manager( ticket.id, user.id, manager.id ) flash_success( gettext( '%(screen_name)s has been assigned as seat manager ' 'of ticket %(ticket_code)s.', screen_name=user.screen_name, ticket_code=ticket.code, ) ) notification_service.notify_appointed_seat_manager(ticket, user, manager) return redirect_to('.index_mine') @blueprint.delete('/tickets/<uuid:ticket_id>/seat_manager') @respond_no_content def withdraw_seat_manager(ticket_id): """Withdraw the ticket's seat manager.""" _abort_if_ticket_management_disabled() ticket = _get_ticket_or_404(ticket_id) manager = g.user if not ticket.is_owned_by(manager.id): abort(403) user = ticket.seat_managed_by ticket_seat_management_service.withdraw_seat_manager(ticket.id, manager.id) flash_success( gettext( 'Seat manager of ticket %(ticket_code)s has been removed.', ticket_code=ticket.code, ) ) notification_service.notify_withdrawn_seat_manager(ticket, user, manager) # -------------------------------------------------------------------- # def _abort_if_ticket_management_disabled(): if not _is_ticket_management_enabled(): flash_error(gettext('Tickets cannot be updated at this time.')) abort(403) def _is_ticket_management_enabled(): if not g.user.authenticated: return False if g.party_id is None: return False party = party_service.get_party(g.party_id) return party.ticket_management_enabled def _get_ticket_or_404(ticket_id): ticket = ticket_service.find_ticket(ticket_id) if (ticket is None) or ticket.revoked: abort(404) return ticket def _abort_if_ticket_user_checked_in(ticket): if ticket.user_checked_in: flash_error( gettext('Somebody has already been checked in with this ticket.') ) abort(403) def _is_user_allowed_to_print_ticket(ticket, user_id): """Return `True` only if the user is allowed to print the ticket.""" return ( ticket.is_owned_by(user_id) or ticket.is_managed_by(user_id) or ticket.used_by_id == user_id )
Disgruntled customers, empty store shelves, long supermarket lines. These are the images that mainstream U.S. media typically feature in their coverage of Venezuela’s ongoing food crisis. U.S. media outlets publish stories blaming Venezuela’s food crisis on the socialist government almost daily. Today isn’t any different.These images are usually accompanied by sarcastic headlines like Forbes’ “Venezuela Discovers the Perfect Weight Loss Diet” and the Cato Institute’s “Hunger Is in Retreat, But Not in Socialist Venezuela.” A new study released by researchers from three Venezuelan universities reported that nearly 75 percent of the population lost an average of 19 pounds in 2016 for lack of food. The report, titled, “2016 Living Conditions Survey,” added that about 32.5 percent of Venezuelans eat only once or twice a day, compared to 11.3 percent last year. Moreover, 93.3 percent told the researchers that their income was not enough to cover their food needs. The facts are clear — Venezuela does have a food crisis. Mainstream U.S. media, however, blames the socialist government that has radically improved the country’s standard of living instead of right-wing U.S.-backed opposition forces intentionally sabotaging the economy. Since the early 2000s, supermarket owners affiliated with Venezuela’s opposition have been purposefully hoarding food products so they can resell them at higher prices and make large profits. Food importing companies owned by the country’s wealthy right-wing elite are also manipulating import figures to raise prices. In 2013, former Venezuelan Central Bank chief Edmee Betancourt reported that the country lost between US$15 and $20 billion dollars the previous year through such fraudulent import deals. It doesn’t stop there. Last year, over 750 opposition-controlled offshore companies linked to the Panama Papers scandal were accused of purposely redirecting Venezuelan imports of raw food materials from the government to the private sector. Many of these companies sell their products to private companies in Colombia, which resell them to Venezuelans living close to Colombia. Reuters admitted in 2014 that Venezuelan opposition members living in border states are shipping low-cost foodstuffs provided by the Venezuelan government into Colombia for profit.“Selling contraband is a serious problem. People here are taking large quantities of products meant for Venezuelans and selling them in Colombia,” Valencia resident Francisco Luzon told Al Jazeera in a 2014 interview. Overall, Venezuela’s millionaire opposition are profiting handsomely from the country’s food crisis while blaming it on the socialist government that’s trying to eliminate it.
The Third Generation Partnership Project (3GPP) unites six telecommunications standards bodies, known as “Organizational Partners,” and provides their members with a stable environment to produce the highly successful Reports and Specifications that define 3GPP technologies. A mobile device, also called a User Equipment (UE), may operate in a wireless communication network that provides high-speed data and/or voice communications. The wireless communication networks may implement circuit-switched (CS) and/or packet-switched (PS) communication protocols to provide various services. For example, the UE may operate in accordance with one or more of an Code Division Multiple Access (CDMA) networks, Time Division Multiple Access (TDMA) networks, Frequency Division Multiple Access (FDMA) networks, Orthogonal FDMA (OFDMA) networks, Single-Carrier FDMA (SC-FDMA) networks, etc. The terms “networks” and “systems” are often used interchangeably. A CDMA network may implement a radio technology such as Universal Terrestrial Radio Access (UTRA), cdma2000, etc. UTRA: includes Wideband-CDMA (W-CDMA) and Low Chip Rate (LCR) cdma2000 covers IS-2000, IS-95 and IS-856 standards. A TDMA network may implement a radio technology such as Global System for Mobile Communications (GSM). An OFDMA network may implement a radio technology such as Evolved UTRA (E-UTRA), IEEE 802.11, IEEE 802.16, IEEE 802.20, Flash-OFDM®, etc. UTRA, E-UTRA, and GSM are part of Universal Mobile Telecommunication System (UMTS). Long Term Evolution (LTE) is an upcoming release of UMTS that uses E-UTRA. UTRA, E-UTRA, GSM. UMTS and LTE are described in documents from an organization named “3rd Generation Partnership Project” (3GPP).cdma2000 is described in documents from an organization named “3rd Generation Partnership Project 2” (3GPP2). These various radio technologies and standards are known in the art. LTE (Long Term Evolution) is a new standard for wireless communication of high-speed data for mobile phones and data terminals. It is based on the GSM/EDGE and UMTS/HSPA (Universal Mobile Telecommunication System/High Speed Packet Access) network technologies, increasing the capacity and speed using new modulation techniques. The IP-based LTE network architecture, called the Evolved Packet Core (EPC) supports seamless handovers for both voice and data to cell towers with older network technology such as GSM, UMTS and CDMA2000. The LTE technology is adapted for a smooth evolution from earlier 3GPP systems. In LTE networking, technologies such as inter-cell interference coordination (ICIC) in the frequency domain and enhanced ICIC (eICIC) in the time domain have been developed for a new heterogeneous network topology in LTE-Advanced technology. Heterogeneous networks arose due to the rapidly increasing numbers of mobile subscribers and demand for bandwidth, and the inadequacy of traditional macro base stations to meet subscriber requirements. Homogenous networks consisting of solely traditional macro base stations may have blind spots in coverage that adversely impact user experience. With the introduction of lower power base stations, including pico cells, femtocells, and relay nodes, LTE network topology becomes a heterogeneous network (HetNet) that is able to deliver more complete coverage. In a HetNet defined in 3GPP Release 10, low power nodes (LPNs), such as RRU/RRH, pico eNB (Enhanced Node B), home eNB, and relay node, are deployed inside the macro base station or enhanced node B coverage cell. The concept of an Almost Blank Subframe (ABS) was introduced in eICIC to address control channel interference between a macro eNB and a smaller base station such as a pico eNB in the time domain. Almost blank subframes are transmitted at low power and only contain limited signals. The interfering base station is configured to include ABSs in its transmission so that the ABS may be used by the interfered cell to provide service for the User Equipment that previously experienced strong interference. By coordinating the transmissions of the macro eNB and the pico eNB using ABS, inter-cell interference is avoided.
package com.yz.common.payment.trade.pay.params; /** * @author: yangzhao * @Date: 2019/7/10 19:00 * @Description: */ public class WXTradeJsApiPayParams extends WXTradePayParams { private String openId; public String getOpenId() { return openId; } public void setOpenId(String openId) { this.openId = openId; } }
# # This file is part of pyasn1-modules software. # # Created by Russ Housley. # # Copyright (c) 2019, Vigil Security, LLC # License: http://snmplabs.com/pyasn1/license.html # # NSA's CMS Key Management Attributes # # ASN.1 source from: # https://www.rfc-editor.org/rfc/rfc7906.txt # https://www.rfc-editor.org/errata/eid5850 # from pyasn1.type import char from pyasn1.type import constraint from pyasn1.type import namedtype from pyasn1.type import namedval from pyasn1.type import tag from pyasn1.type import univ from pyasn1_modules import rfc2634 from pyasn1_modules import rfc4108 from pyasn1_modules import rfc5280 from pyasn1_modules import rfc5652 from pyasn1_modules import rfc6010 from pyasn1_modules import rfc6019 from pyasn1_modules import rfc7191 MAX = float('inf') # Imports From RFC 2634 id_aa_contentHint = rfc2634.id_aa_contentHint ContentHints = rfc2634.ContentHints id_aa_securityLabel = rfc2634.id_aa_securityLabel SecurityPolicyIdentifier = rfc2634.SecurityPolicyIdentifier SecurityClassification = rfc2634.SecurityClassification ESSPrivacyMark = rfc2634.ESSPrivacyMark SecurityCategories= rfc2634.SecurityCategories ESSSecurityLabel = rfc2634.ESSSecurityLabel # Imports From RFC 4108 id_aa_communityIdentifiers = rfc4108.id_aa_communityIdentifiers CommunityIdentifier = rfc4108.CommunityIdentifier CommunityIdentifiers = rfc4108.CommunityIdentifiers # Imports From RFC 5280 AlgorithmIdentifier = rfc5280.AlgorithmIdentifier Name = rfc5280.Name Certificate = rfc5280.Certificate GeneralNames = rfc5280.GeneralNames GeneralName = rfc5280.GeneralName SubjectInfoAccessSyntax = rfc5280.SubjectInfoAccessSyntax id_pkix = rfc5280.id_pkix id_pe = rfc5280.id_pe id_pe_subjectInfoAccess = rfc5280.id_pe_subjectInfoAccess # Imports From RFC 6010 CMSContentConstraints = rfc6010.CMSContentConstraints # Imports From RFC 6019 BinaryTime = rfc6019.BinaryTime id_aa_binarySigningTime = rfc6019.id_aa_binarySigningTime BinarySigningTime = rfc6019.BinarySigningTime # Imports From RFC 5652 Attribute = rfc5652.Attribute CertificateSet = rfc5652.CertificateSet CertificateChoices = rfc5652.CertificateChoices id_contentType = rfc5652.id_contentType ContentType = rfc5652.ContentType id_messageDigest = rfc5652.id_messageDigest MessageDigest = rfc5652.MessageDigest # Imports From RFC 7191 SIREntityName = rfc7191.SIREntityName id_aa_KP_keyPkgIdAndReceiptReq = rfc7191.id_aa_KP_keyPkgIdAndReceiptReq KeyPkgIdentifierAndReceiptReq = rfc7191.KeyPkgIdentifierAndReceiptReq # Key Province Attribute id_aa_KP_keyProvinceV2 = univ.ObjectIdentifier('2.16.840.1.101.2.1.5.71') class KeyProvinceV2(univ.ObjectIdentifier): pass aa_keyProvince_v2 = Attribute() aa_keyProvince_v2['attrType'] = id_aa_KP_keyProvinceV2 aa_keyProvince_v2['attrValues'][0] = KeyProvinceV2() # Manifest Attribute id_aa_KP_manifest = univ.ObjectIdentifier('2.16.840.1.101.2.1.5.72') class ShortTitle(char.PrintableString): pass class Manifest(univ.SequenceOf): pass Manifest.componentType = ShortTitle() Manifest.subtypeSpec=constraint.ValueSizeConstraint(1, MAX) aa_manifest = Attribute() aa_manifest['attrType'] = id_aa_KP_manifest aa_manifest['attrValues'][0] = Manifest() # Key Algorithm Attribute id_kma_keyAlgorithm = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.1') class KeyAlgorithm(univ.Sequence): pass KeyAlgorithm.componentType = namedtype.NamedTypes( namedtype.NamedType('keyAlg', univ.ObjectIdentifier()), namedtype.OptionalNamedType('checkWordAlg', univ.ObjectIdentifier().subtype( implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), namedtype.OptionalNamedType('crcAlg', univ.ObjectIdentifier().subtype( implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))) ) aa_keyAlgorithm = Attribute() aa_keyAlgorithm['attrType'] = id_kma_keyAlgorithm aa_keyAlgorithm['attrValues'][0] = KeyAlgorithm() # User Certificate Attribute id_at_userCertificate = univ.ObjectIdentifier('2.5.4.36') aa_userCertificate = Attribute() aa_userCertificate['attrType'] = id_at_userCertificate aa_userCertificate['attrValues'][0] = Certificate() # Key Package Receivers Attribute id_kma_keyPkgReceiversV2 = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.16') class KeyPkgReceiver(univ.Choice): pass KeyPkgReceiver.componentType = namedtype.NamedTypes( namedtype.NamedType('sirEntity', SIREntityName().subtype( implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), namedtype.NamedType('community', CommunityIdentifier().subtype( implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) ) class KeyPkgReceiversV2(univ.SequenceOf): pass KeyPkgReceiversV2.componentType = KeyPkgReceiver() KeyPkgReceiversV2.subtypeSpec=constraint.ValueSizeConstraint(1, MAX) aa_keyPackageReceivers_v2 = Attribute() aa_keyPackageReceivers_v2['attrType'] = id_kma_keyPkgReceiversV2 aa_keyPackageReceivers_v2['attrValues'][0] = KeyPkgReceiversV2() # TSEC Nomenclature Attribute id_kma_TSECNomenclature = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.3') class CharEdition(char.PrintableString): pass class CharEditionRange(univ.Sequence): pass CharEditionRange.componentType = namedtype.NamedTypes( namedtype.NamedType('firstCharEdition', CharEdition()), namedtype.NamedType('lastCharEdition', CharEdition()) ) class NumEdition(univ.Integer): pass NumEdition.subtypeSpec = constraint.ValueRangeConstraint(0, 308915776) class NumEditionRange(univ.Sequence): pass NumEditionRange.componentType = namedtype.NamedTypes( namedtype.NamedType('firstNumEdition', NumEdition()), namedtype.NamedType('lastNumEdition', NumEdition()) ) class EditionID(univ.Choice): pass EditionID.componentType = namedtype.NamedTypes( namedtype.NamedType('char', univ.Choice(componentType=namedtype.NamedTypes( namedtype.NamedType('charEdition', CharEdition().subtype( implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), namedtype.NamedType('charEditionRange', CharEditionRange().subtype( implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))) )) ), namedtype.NamedType('num', univ.Choice(componentType=namedtype.NamedTypes( namedtype.NamedType('numEdition', NumEdition().subtype( implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))), namedtype.NamedType('numEditionRange', NumEditionRange().subtype( implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))) )) ) ) class Register(univ.Integer): pass Register.subtypeSpec = constraint.ValueRangeConstraint(0, 2147483647) class RegisterRange(univ.Sequence): pass RegisterRange.componentType = namedtype.NamedTypes( namedtype.NamedType('firstRegister', Register()), namedtype.NamedType('lastRegister', Register()) ) class RegisterID(univ.Choice): pass RegisterID.componentType = namedtype.NamedTypes( namedtype.NamedType('register', Register().subtype( implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))), namedtype.NamedType('registerRange', RegisterRange().subtype( implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6))) ) class SegmentNumber(univ.Integer): pass SegmentNumber.subtypeSpec = constraint.ValueRangeConstraint(1, 127) class SegmentRange(univ.Sequence): pass SegmentRange.componentType = namedtype.NamedTypes( namedtype.NamedType('firstSegment', SegmentNumber()), namedtype.NamedType('lastSegment', SegmentNumber()) ) class SegmentID(univ.Choice): pass SegmentID.componentType = namedtype.NamedTypes( namedtype.NamedType('segmentNumber', SegmentNumber().subtype( implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))), namedtype.NamedType('segmentRange', SegmentRange().subtype( implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 8))) ) class TSECNomenclature(univ.Sequence): pass TSECNomenclature.componentType = namedtype.NamedTypes( namedtype.NamedType('shortTitle', ShortTitle()), namedtype.OptionalNamedType('editionID', EditionID()), namedtype.OptionalNamedType('registerID', RegisterID()), namedtype.OptionalNamedType('segmentID', SegmentID()) ) aa_tsecNomenclature = Attribute() aa_tsecNomenclature['attrType'] = id_kma_TSECNomenclature aa_tsecNomenclature['attrValues'][0] = TSECNomenclature() # Key Purpose Attribute id_kma_keyPurpose = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.13') class KeyPurpose(univ.Enumerated): pass KeyPurpose.namedValues = namedval.NamedValues( ('n-a', 0), ('a', 65), ('b', 66), ('l', 76), ('m', 77), ('r', 82), ('s', 83), ('t', 84), ('v', 86), ('x', 88), ('z', 90) ) aa_keyPurpose = Attribute() aa_keyPurpose['attrType'] = id_kma_keyPurpose aa_keyPurpose['attrValues'][0] = KeyPurpose() # Key Use Attribute id_kma_keyUse = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.14') class KeyUse(univ.Enumerated): pass KeyUse.namedValues = namedval.NamedValues( ('n-a', 0), ('ffk', 1), ('kek', 2), ('kpk', 3), ('msk', 4), ('qkek', 5), ('tek', 6), ('tsk', 7), ('trkek', 8), ('nfk', 9), ('effk', 10), ('ebfk', 11), ('aek', 12), ('wod', 13), ('kesk', 246), ('eik', 247), ('ask', 248), ('kmk', 249), ('rsk', 250), ('csk', 251), ('sak', 252), ('rgk', 253), ('cek', 254), ('exk', 255) ) aa_keyUse = Attribute() aa_keyPurpose['attrType'] = id_kma_keyUse aa_keyPurpose['attrValues'][0] = KeyUse() # Transport Key Attribute id_kma_transportKey = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.15') class TransOp(univ.Enumerated): pass TransOp.namedValues = namedval.NamedValues( ('transport', 1), ('operational', 2) ) aa_transportKey = Attribute() aa_transportKey['attrType'] = id_kma_transportKey aa_transportKey['attrValues'][0] = TransOp() # Key Distribution Period Attribute id_kma_keyDistPeriod = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.5') class KeyDistPeriod(univ.Sequence): pass KeyDistPeriod.componentType = namedtype.NamedTypes( namedtype.OptionalNamedType('doNotDistBefore', BinaryTime().subtype( implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), namedtype.NamedType('doNotDistAfter', BinaryTime()) ) aa_keyDistributionPeriod = Attribute() aa_keyDistributionPeriod['attrType'] = id_kma_keyDistPeriod aa_keyDistributionPeriod['attrValues'][0] = KeyDistPeriod() # Key Validity Period Attribute id_kma_keyValidityPeriod = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.6') class KeyValidityPeriod(univ.Sequence): pass KeyValidityPeriod.componentType = namedtype.NamedTypes( namedtype.NamedType('doNotUseBefore', BinaryTime()), namedtype.OptionalNamedType('doNotUseAfter', BinaryTime()) ) aa_keyValidityPeriod = Attribute() aa_keyValidityPeriod['attrType'] = id_kma_keyValidityPeriod aa_keyValidityPeriod['attrValues'][0] = KeyValidityPeriod() # Key Duration Attribute id_kma_keyDuration = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.7') ub_KeyDuration_months = univ.Integer(72) ub_KeyDuration_hours = univ.Integer(96) ub_KeyDuration_days = univ.Integer(732) ub_KeyDuration_weeks = univ.Integer(104) ub_KeyDuration_years = univ.Integer(100) class KeyDuration(univ.Choice): pass KeyDuration.componentType = namedtype.NamedTypes( namedtype.NamedType('hours', univ.Integer().subtype( subtypeSpec=constraint.ValueRangeConstraint(1, ub_KeyDuration_hours)).subtype( implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), namedtype.NamedType('days', univ.Integer().subtype( subtypeSpec=constraint.ValueRangeConstraint(1, ub_KeyDuration_days))), namedtype.NamedType('weeks', univ.Integer().subtype( subtypeSpec=constraint.ValueRangeConstraint(1, ub_KeyDuration_weeks)).subtype( implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), namedtype.NamedType('months', univ.Integer().subtype( subtypeSpec=constraint.ValueRangeConstraint(1, ub_KeyDuration_months)).subtype( implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))), namedtype.NamedType('years', univ.Integer().subtype( subtypeSpec=constraint.ValueRangeConstraint(1, ub_KeyDuration_years)).subtype( implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))) ) aa_keyDurationPeriod = Attribute() aa_keyDurationPeriod['attrType'] = id_kma_keyDuration aa_keyDurationPeriod['attrValues'][0] = KeyDuration() # Classification Attribute id_aa_KP_classification = univ.ObjectIdentifier(id_aa_securityLabel) id_enumeratedPermissiveAttributes = univ.ObjectIdentifier('2.16.840.1.101.2.1.8.3.1') id_enumeratedRestrictiveAttributes = univ.ObjectIdentifier('2.16.840.1.101.2.1.8.3.4') id_informativeAttributes = univ.ObjectIdentifier('2.16.840.1.101.2.1.8.3.3') class SecurityAttribute(univ.Integer): pass SecurityAttribute.subtypeSpec = constraint.ValueRangeConstraint(0, MAX) class EnumeratedTag(univ.Sequence): pass EnumeratedTag.componentType = namedtype.NamedTypes( namedtype.NamedType('tagName', univ.ObjectIdentifier()), namedtype.NamedType('attributeList', univ.SetOf(componentType=SecurityAttribute())) ) class FreeFormField(univ.Choice): pass FreeFormField.componentType = namedtype.NamedTypes( namedtype.NamedType('bitSetAttributes', univ.BitString()), # Not permitted in RFC 7906 namedtype.NamedType('securityAttributes', univ.SetOf(componentType=SecurityAttribute())) ) class InformativeTag(univ.Sequence): pass InformativeTag.componentType = namedtype.NamedTypes( namedtype.NamedType('tagName', univ.ObjectIdentifier()), namedtype.NamedType('attributes', FreeFormField()) ) class Classification(ESSSecurityLabel): pass aa_classification = Attribute() aa_classification['attrType'] = id_aa_KP_classification aa_classification['attrValues'][0] = Classification() # Split Identifier Attribute id_kma_splitID = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.11') class SplitID(univ.Sequence): pass SplitID.componentType = namedtype.NamedTypes( namedtype.NamedType('half', univ.Enumerated( namedValues=namedval.NamedValues(('a', 0), ('b', 1)))), namedtype.OptionalNamedType('combineAlg', AlgorithmIdentifier()) ) aa_splitIdentifier = Attribute() aa_splitIdentifier['attrType'] = id_kma_splitID aa_splitIdentifier['attrValues'][0] = SplitID() # Key Package Type Attribute id_kma_keyPkgType = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.12') class KeyPkgType(univ.ObjectIdentifier): pass aa_keyPackageType = Attribute() aa_keyPackageType['attrType'] = id_kma_keyPkgType aa_keyPackageType['attrValues'][0] = KeyPkgType() # Signature Usage Attribute id_kma_sigUsageV3 = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.22') class SignatureUsage(CMSContentConstraints): pass aa_signatureUsage_v3 = Attribute() aa_signatureUsage_v3['attrType'] = id_kma_sigUsageV3 aa_signatureUsage_v3['attrValues'][0] = SignatureUsage() # Other Certificate Format Attribute id_kma_otherCertFormats = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.19') aa_otherCertificateFormats = Attribute() aa_signatureUsage_v3['attrType'] = id_kma_otherCertFormats aa_signatureUsage_v3['attrValues'][0] = CertificateChoices() # PKI Path Attribute id_at_pkiPath = univ.ObjectIdentifier('2.5.4.70') class PkiPath(univ.SequenceOf): pass PkiPath.componentType = Certificate() PkiPath.subtypeSpec=constraint.ValueSizeConstraint(1, MAX) aa_pkiPath = Attribute() aa_pkiPath['attrType'] = id_at_pkiPath aa_pkiPath['attrValues'][0] = PkiPath() # Useful Certificates Attribute id_kma_usefulCerts = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.20') aa_usefulCertificates = Attribute() aa_usefulCertificates['attrType'] = id_kma_usefulCerts aa_usefulCertificates['attrValues'][0] = CertificateSet() # Key Wrap Attribute id_kma_keyWrapAlgorithm = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.21') aa_keyWrapAlgorithm = Attribute() aa_keyWrapAlgorithm['attrType'] = id_kma_keyWrapAlgorithm aa_keyWrapAlgorithm['attrValues'][0] = AlgorithmIdentifier() # Content Decryption Key Identifier Attribute id_aa_KP_contentDecryptKeyID = univ.ObjectIdentifier('2.16.840.1.101.2.1.5.66') class ContentDecryptKeyID(univ.OctetString): pass aa_contentDecryptKeyIdentifier = Attribute() aa_contentDecryptKeyIdentifier['attrType'] = id_aa_KP_contentDecryptKeyID aa_contentDecryptKeyIdentifier['attrValues'][0] = ContentDecryptKeyID() # Certificate Pointers Attribute aa_certificatePointers = Attribute() aa_certificatePointers['attrType'] = id_pe_subjectInfoAccess aa_certificatePointers['attrValues'][0] = SubjectInfoAccessSyntax() # CRL Pointers Attribute id_aa_KP_crlPointers = univ.ObjectIdentifier('2.16.840.1.101.2.1.5.70') aa_cRLDistributionPoints = Attribute() aa_cRLDistributionPoints['attrType'] = id_aa_KP_crlPointers aa_cRLDistributionPoints['attrValues'][0] = GeneralNames() # Extended Error Codes id_errorCodes = univ.ObjectIdentifier('2.16.840.1.101.2.1.22') id_missingKeyType = univ.ObjectIdentifier('2.16.840.1.101.2.1.22.1') id_privacyMarkTooLong = univ.ObjectIdentifier('2.16.840.1.101.2.1.22.2') id_unrecognizedSecurityPolicy = univ.ObjectIdentifier('2.16.840.1.101.2.1.22.3') # Map of Attribute Type OIDs to Attributes added to the # ones that are in rfc5652.py _cmsAttributesMapUpdate = { id_aa_contentHint: ContentHints(), id_aa_communityIdentifiers: CommunityIdentifiers(), id_aa_binarySigningTime: BinarySigningTime(), id_contentType: ContentType(), id_messageDigest: MessageDigest(), id_aa_KP_keyPkgIdAndReceiptReq: KeyPkgIdentifierAndReceiptReq(), id_aa_KP_keyProvinceV2: KeyProvinceV2(), id_aa_KP_manifest: Manifest(), id_kma_keyAlgorithm: KeyAlgorithm(), id_at_userCertificate: Certificate(), id_kma_keyPkgReceiversV2: KeyPkgReceiversV2(), id_kma_TSECNomenclature: TSECNomenclature(), id_kma_keyPurpose: KeyPurpose(), id_kma_keyUse: KeyUse(), id_kma_transportKey: TransOp(), id_kma_keyDistPeriod: KeyDistPeriod(), id_kma_keyValidityPeriod: KeyValidityPeriod(), id_kma_keyDuration: KeyDuration(), id_aa_KP_classification: Classification(), id_kma_splitID: SplitID(), id_kma_keyPkgType: KeyPkgType(), id_kma_sigUsageV3: SignatureUsage(), id_kma_otherCertFormats: CertificateChoices(), id_at_pkiPath: PkiPath(), id_kma_usefulCerts: CertificateSet(), id_kma_keyWrapAlgorithm: AlgorithmIdentifier(), id_aa_KP_contentDecryptKeyID: ContentDecryptKeyID(), id_pe_subjectInfoAccess: SubjectInfoAccessSyntax(), id_aa_KP_crlPointers: GeneralNames(), } rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
<gh_stars>100-1000 import mastermind.rules as r ruleset = [{'url': 'http://localhost:8000/', 'request': {'skip': True}, 'name': 'foo', 'response': {'body': 'ok200.json', 'headers': {'add': {'X-ustwo-intercepted': 'Yes'}}}}] rule = {'url': 'http://localhost:8000/', 'request': {'skip': True}, 'name': 'foo', 'response': {'body': 'ok200.json', 'headers': {'add': {'X-ustwo-intercepted': 'Yes'}}}} def test_url(): assert r.url(rule) == 'http://localhost:8000/' def test_skip_true(): assert r.skip(rule) is True def test_skip_false(): assert r.skip({'request': {'skip': False}}) is False assert r.skip({'request': {}}) is False assert r.skip({}) is False def test_delay(): assert r.delay({'response': {}}) is None assert r.delay({}) is None assert r.delay({'response': {'delay': 10}}) == 10 def test_status_code(): assert r.status_code({'response': {'code': 500}}) == 500 assert r.status_code({'response': {}}) is None assert r.status_code({'response': {'code': '500'}}) == 500 def test_body_filename_exists(): assert r.body_filename(rule) == 'ok200.json' def test_body_filename_not_exists(): assert r.body_filename({'url': 'http://foo'}) is None def test_method(): assert r.method({"url": "http://example.org"}) is None assert r.method({"url": "http://example.org", "method": "GET"}) == "GET" assert r.method({"url": "http://example.org", "method": "post"}) == "POST" def test_match_rule(): expected_generic = {"url": "http://example.org"} expected_get = {"url": "http://example.org", "method": "GET"} expected_delete = {"url": "http://example.org", "method": "delete"} assert r.match_rule("GET", "http://example.org")(expected_generic) assert not r.match_rule("GET", "http://example.org/")(expected_generic) assert r.match_rule("GET", "http://example.org")(expected_get) assert r.match_rule("DELETE", "http://example.org")(expected_delete) assert not r.match_rule("PUT", "http://example.org")(expected_delete) def test_select(): assert r.select("GET", "http://example.org", []) == [] assert r.select("GET", "http://example.org", [{"url": "http://example.org", "method": "GET"}]) == [{"url": "http://example.org", "method": "GET"}] assert r.select("GET", "http://example.org", [{"url": "http://example.org", "method": "GET"}, {"url": "http://example.org", "method": "POST"}]) == [{"url": "http://example.org", "method": "GET"}] assert r.select("GET", "http://example.org/foo", [{"url": "http://example.org", "method": "GET"}]) == [] assert r.select("POST", "http://example.org", [{"url": "http://example.org", "method": "GET"}]) == []
Micron Technology (NASDAQ: MU) edged out beaten-down expectations when it reported its fiscal second-quarter results, but that&apos;s small consolation for investors. Prices for both DRAM and NAND chips declined more than the company expected, and elevated customer inventory levels remain a problem. Revenue and earnings plunged, and the company&apos;s guidance calls for an even bigger drop in the third quarter. Price drops coupled with declining bit volumes wreaked havoc on Micron&apos;s results. In the PC market, Micron gave no indication when it expects growth to return. Revenue dropped 25% from the first quarter, driven by weaker pricing, inventory drawdowns, and shortages of certain CPUs. The company is focused on cost competitiveness, so don&apos;t expect pricing to recover anytime soon. Micron does expect DRAM bit shipments to begin to grow sequentially in the third quarter, although the company gave no indication when prices will stop falling so quickly. Rising bit volumes don&apos;t necessarily mean the third quarter will mark the bottom of this downturn, since prices could continue to plunge fast enough to drive revenue lower for multiple quarters. In response to weaker than expected demand, Micron is idling about 5% of its DRAM wafer starts and reducing its NAND wafer starts by 5%. The company lowered its capex estimate for 2019 to $9 billion, down from a range of $9 billion to $9.5 billion, and it&apos;s evaluating its capex for fiscal 2020. If Micron is right that much of the excess customer inventory will be resolved by mid-year, the company&apos;s results may not get much worse after the third quarter. But that&apos;s a big if. So far, the downturn has been worse than Micron&apos;s management has expected. Take the company&apos;s outlook for a second-half recovery with a grain of salt.
An Improved Clonal Selection Algorithm Based Optimization Method for Iterative Learning Control Systems Abstract In this paper an improved Clonal Selection Algorithm (CSA) is proposed as a method to implement optimality based Iterative Learning Control algorithms. The strength of the proposed method is that it not only can cope with non-minimum phase plants and nonlinear plants but also can deal with constraints on input conveniently by a specially designed mutation operator. In addiction, because more priori information was used to decrease the size of the search space, the probability of the clonal selection algorithm converging rapidly to a global optimum was increased considerably. Simulations show that the convergence speed is satisfactory regardless of the nature of the plant.
/** * Evaluates a instance to predict its class. * * @param example Instance evaluated * @param trainData Training dataset. * @param nClasses number of classes. * @param trainOutput classes of the examples in the training dataset. * @param k number of nearest neighbours considered. * @return Class predicted * */ public static int evaluate (double example[], double trainData[][],int nClasses,int trainOutput[],int k) { double minDist[]; int nearestN[]; int selectedClasses[]; double dist; int prediction; int predictionValue; boolean stop; nearestN = new int[k]; minDist = new double[k]; for (int i=0; i<k; i++) { nearestN[i] = 0; minDist[i] = Double.MAX_VALUE; } for (int i=0; i<trainData.length; i++) { dist = distance(trainData[i],example); if (dist > 0.0){ stop=false; for(int j=0;j<k && !stop;j++){ if (dist < minDist[j]) { for (int l = k - 1; l >= j+1; l--) { minDist[l] = minDist[l - 1]; nearestN[l] = nearestN[l - 1]; } minDist[j] = dist; nearestN[j] = i; stop=true; } } } } selectedClasses= new int[nClasses]; for (int i=0; i<nClasses; i++) { selectedClasses[i] = 0; } for (int i=0; i<k; i++) { selectedClasses[trainOutput[nearestN[i]]]+=1; } prediction=0; predictionValue=selectedClasses[0]; for (int i=1; i<nClasses; i++) { if (predictionValue < selectedClasses[i]) { predictionValue = selectedClasses[i]; prediction = i; } } return prediction; }
/* * Copyright (C) 2021-2021 Huawei Technologies Co., Ltd. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.huawei.test.configelement.service.impl; import com.huawei.test.configelement.service.ExecuteTimesInfo; import org.junit.Assert; import org.junit.Test; public class ProcessModeCountServiceTest { @Test public void doIncrement() { ExecuteTimesInfo executeTimesInfo = new ExecuteTimesInfo.Builder() .setAgentCount(2) .setProcessCount(2) .setThreadCount(2) .setAgentNumber(1) .setProcessNumber(1) .setThreadNumber(1) .setRunNumber(3) // 第4次执行的取数应该是7 .build(); Assert.assertEquals(7, new ProcessModeCountService().doIncrement(executeTimesInfo)); } }
A new official Star Wars timeline released by Random House/Del Rey has plotted out when exactly Solo: A Star Wars Story takes place - and how it fits in relation to Rogue One, the prequel trilogy, and even pre-A New Hope prose novels. Solo is set to take place roughly 11 to 14 years before A New Hope's Battle of Yavin (BBY for short). Rogue One primarily took place days before A New Hope, although the early flashback with Galen Erso and his family is 13 BBY. Another new addition to the timeline is the upcoming prose novel Star Wars: Last Shot, set immediately prior to Solo.. Scheduled for release April 17, Last Shot's description reveals that the pilots of the Millennium Falcon are Lando Calrissiain and L3-37 but lists Han Solo and Chewbacca as merely crewmates. Solo: A Star Wars Story opens in theaters May 25.
In the name of security, Microsoft Corp. says it will give national governments and international organizations access to the code underlying current versions, beta releases and service packs of Windows 2000, Windows XP, Windows Server 2003 and Windows CE. Microsoft says it has signed about 10 agreements under the new Government Security Program (GSP), including contracts with the Russian government and NATO. Approximately 60 countries are eligible to participate in the program. However, countries subject to U.S. trade embargoes, such as Cuba and Iraq, are ineligible. Salah Dandan, the Redmond, Wash.-based manager of the worldwide government security program at Microsoft, said it is up to each individual country to announce its involvement in the program. “We respect the privacy interests of the national governments that we’re dealing with, and we don’t disclose who is participating unless we have consent. What I can tell you is that the list is definitely growing,” he said. A spokesperson for Public Works and Government Services Canada said he was unable to comment on the program or on any particular supplier to the Canadian government. The push behind this program is Microsoft’s acknowledgement that governments have been and will continue to turn to Linux, said Paul DeGroot, a lead analyst in sales and support strategies at analyst group Directions on Microsoft in Kirkland, Wash. “Last fall, the U.S. National Security Agency produced a secure version of Linux by modifying the Linux kernel. This is exactly the kind of thing that the new Microsoft program is intended to do – allow a government to do the same thing with Windows that they could with Linux. Whether or not they will is another question,” he said. DeGroot expects that some governments and international agencies will resist the program because, while it allows them to build systems, they will not be allowed to make modifications to the code or compile the source code into the Windows programs themselves. This requires officials to actually visit the Microsoft campus in Washington. “This doesn’t have the flexibility that downloading the latest Linux kernel off the Internet does, but possibly it wouldn’t be a huge objection for a government serious about security to send someone to Redmond to look at their code,” he said. Dandan described this aspect of the agreement as an opportunity rather than a challenge, as it lets officials test and validate code with the same people who have written it and designed the security. “It’s a chance to interact and explore opportunities for collaboration,” he said. The program currently excludes all levels of government except national because the focus is on security and not on product support, Dandan said. However, he said that this doesn’t mean that Microsoft is unwilling to share with other levels of government. According to DeGroot, Microsoft is limiting the program to national governments in order to keep better tabs on its code. “Microsoft has to draw a line someplace. They’re letting people have access to very valuable intellectual property,” he said. While DeGroot called this a good PR move for Microsoft, he said that there could be potential issues with the program. For instance, each time Microsoft issues a patch, there’s a chance it could affect a government’s customized systems. “There’s always the risk that you could screw up,” DeGroot said. “Or take a scenario where you change the code and it doesn’t work. Microsoft says they can’t help you until they see the code, which destroys the premise of the whole thing – you’ve made changes to the kernel to provide high security, and now for the purpose of support you’ve got to tell Microsoft what changes you’ve made. “I doubt very much that you’ll see governments using it broadly,” he said.
<filename>integration/send_packet.py # UsbSerial test: Sending single packet # args: # port (eg /dev/ttyUSB0) # size in bytes (eg 1024) # speed in bauds (eg 115200) import serial import sys import os port = sys.argv[1] size = sys.argv[2] speed = sys.argv[3] comm = serial.Serial(port, int(speed)) data_tx = os.urandom(int(size)) bytes_sent = comm.write(data_tx) print(str(bytes_sent))
package org.apache.commons.digester3; /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; /** * <p> * Simple regex pattern matching algorithm. * </p> * <p> * This uses just two wildcards: * <ul> * <li>{@code *} matches any sequence of none, one or more characters * <li>{@code ?} matches any one character * </ul> * Escaping these wildcards is not supported . * </p> * * @since 1.5 */ public class SimpleRegexMatcher extends RegexMatcher { // --------------------------------------------------------- Fields /** Default log (class wide) */ private static final Log BASE_LOG = LogFactory.getLog( SimpleRegexMatcher.class ); /** Custom log (can be set per object) */ private Log log = BASE_LOG; // --------------------------------------------------------- Properties /** * Gets the {@code Log} implementation. * * @return the {@code Log} implementation. */ public Log getLog() { return log; } /** * Sets the current {@code Log} implementation used by this class. * * @param log the current {@code Log} implementation used by this class. */ public void setLog( final Log log ) { this.log = log; } // --------------------------------------------------------- Public Methods /** * {@inheritDoc} */ @Override public boolean match( final String basePattern, final String regexPattern ) { // check for nulls if ( basePattern == null || regexPattern == null ) { return false; } return match( basePattern, regexPattern, 0, 0 ); } // --------------------------------------------------------- Implementations Methods /** * Implementation of regex matching algorithm. This calls itself recursively. * * @param basePattern the standard digester path representing the element * @param regexPattern the regex pattern the path will be tested against * @param baseAt FIXME * @param regexAt FIXME */ private boolean match( final String basePattern, final String regexPattern, int baseAt, int regexAt ) { if ( log.isTraceEnabled() ) { log.trace( "Base: " + basePattern ); log.trace( "Regex: " + regexPattern ); log.trace( "Base@" + baseAt ); log.trace( "Regex@" + regexAt ); } // check bounds if ( regexAt >= regexPattern.length() ) { // maybe we've got a match if ( baseAt >= basePattern.length() ) { // ok! return true; } // run out early return false; } if ( baseAt >= basePattern.length() ) { // run out early return false; } // ok both within bounds final char regexCurrent = regexPattern.charAt( regexAt ); switch ( regexCurrent ) { case '*': // this is the tricky case // check for terminal if ( ++regexAt >= regexPattern.length() ) { // this matches anything let - so return true return true; } // go through every subsequent apperance of the next character // and so if the rest of the regex matches final char nextRegex = regexPattern.charAt( regexAt ); if ( log.isTraceEnabled() ) { log.trace( "Searching for next '" + nextRegex + "' char" ); } int nextMatch = basePattern.indexOf( nextRegex, baseAt ); while ( nextMatch != -1 ) { if ( log.isTraceEnabled() ) { log.trace( "Trying '*' match@" + nextMatch ); } if ( match( basePattern, regexPattern, nextMatch, regexAt ) ) { return true; } nextMatch = basePattern.indexOf( nextRegex, nextMatch + 1 ); } log.trace( "No matches found." ); return false; case '?': // this matches anything return match( basePattern, regexPattern, ++baseAt, ++regexAt ); default: if ( log.isTraceEnabled() ) { log.trace( "Camparing " + regexCurrent + " to " + basePattern.charAt( baseAt ) ); } if ( regexCurrent == basePattern.charAt( baseAt ) ) { // still got more to go return match( basePattern, regexPattern, ++baseAt, ++regexAt ); } return false; } } }
package net.officefloor.performance.entities; import javax.persistence.Entity; import javax.persistence.Id; import lombok.Data; /** * World entity. * * @author <NAME> */ @Data @Entity public class World { @Id private int id; private int randomNumber; }
#include "cjitConv.hpp" #include "dllFileAux.hpp" // strings for declarations, paramString #include <string> using namespace std; using namespace cprog; #define CONST1(var) >>("#define " #var " "+asDec(var)) #define FREE1(var) >>("#undef " #var) #ifndef VEL_BUG /** 1 means use extra ve_lvl as workaround for clang bug */ #define VEL_BUG 0 #endif #define VRW_INDUCE 1 /*def 1*/ #define OUTINDEX 0 /*def 0*/ /** based on a very short (slow) direct_default3.c * NEW: playing with blocking (from innermost loop side) */ //std::string cjitConvolutionForward00( vednnConvolutionParam_t const* const p ) //std::string cjitConvolutionForward00( vednnConvolutionParam_t const* const p ) DllFile cjitConvolutionForward1( struct param const* const p ) { string const impl = "cjitConvFwd1"; int const verbose=0; #if 0 // vednn.h PUBLIC API vednnError_t vednnConvolutionForward( const vednnTensorParam_t *pParamIn, const void *pDataIn, const vednnFilterParam_t *pParamKernel, const void *pDataKernel, const vednnTensorParam_t *pParamOut, void *pDataOut, const vednnConvolutionParam_t *pParamConv, vednnConvolutionAlgorithm_t algo ) ; // but include/C/vednnConvolutionForward.h: IMPL API typedef vednnError_t (*vednnConvForward_t)( const vednnTensorParam_t * restrict pParamIn, const void * restrict pDataIn, const vednnFilterParam_t * restrict pParamKernel, const void * restrict pDataKernel, const vednnConvolutionParam_t * restrict pParamConv, const vednnTensorParam_t * restrict pParamOut, void * restrict pDataOut) ; #endif DllFile df; // return value //DllFileAux dfx("Convolution","Forward"); std::string parmstr = paramString(p); df.basename = impl+"_"+parmstr; cout<<impl<<" : df.basename = "<<df.basename<<endl; // we use intrinsics. suffix matches build recipe in "bin.mk" df.suffix = "-vi.c"; Cunit pr("program"); pr.v = verbose; // default is quite lengthy! auto& includes = pr["includes"]<<Endl; includes >>CSTR(#include "vednn.h") >>CSTR(#if __has_include("vednnx.h")) // an old clang directive >>CSTR(#include "vednnx.h") >>CSTR(#endif) #if VEL_BUG >>CSTR(#include "veintrin.h") #endif >>CSTR(#include "velintrin.h") >>"#include <stdio.h>" >>"#include <stdlib.h>" >>"#include <assert.h>" >>"#include <stdint.h>" ; pr["macros"]<<"\n" >>"" >>"#if 0" >>"static void err_print(char const* file, int const line, char const* what, int const requirement){" >>" if(!requirement){" >>" "<<CSTR(printf(" Error %s:%d failed CHK: %s\n",file,line,what);) >>" }else{" >>" "<<"//"<<CSTR(printf(" OK %s:%d\n",file,line);) >>" }" >>" fflush(stdout);" >>"}" >>"#define CHK(REQUIREMENT) err_print(__FILE__,__LINE__,#REQUIREMENT,(REQUIREMENT));" >>"#else" >>"#define CHK(REQUIREMENT) do {;}while(0)" >>"#endif" >>"" >>"#if "<<asDec(VEL_BUG) >>"// sometimes enabling this can fix 'wrong result'" >>"// Simple test case: jitconv -p mb64ih3ic1oc1_kh3ph0" >>"#define NO_SET_VLEN( VLEN ) _ve_lvl(VLEN)" >>"" >>"#else // but pure vel intrinsics should do nothing" >>"#define NO_SET_VLEN( VLEN ) do{}while(0)" >>"#endif" ; #if 0 // vednn.h **public** API and low-level impl call signature includes<<CSTR(#include "vednn.h"); std::string fn_declare; { std::string funcname(df.basename); std::ostringstream oss; oss<<"vednnError_t "<<funcname<<"(" <<"\n const vednnTensorParam_t * restrict pParamIn," <<"\n const void * restrict pDataIn," <<"\n const vednnFilterParam_t * restrict pParamKernel," <<"\n const void * restrict pDataKernel," <<"\n const vednnConvolutionParam_t * restrict pParamConv," <<"\n const vednnTensorParam_t * restrict pParamOut," <<"\n void * restrict pDataOut" <<"\n )"; fn_declare = oss.str(); } #elif 0 // or vednnx.h and typedefs (publicized from vednn **low-level** API) includes>>CSTR(#include "vednnx.h"); std::string fn_declare(CONVX_FWD_DECL(+df.basename+)); cout<<fn_declare<<endl; prefix_lines(cout,fn_declare,"--prefixed-- ")<<"\n"; #else // more macro approach std::string fn_declare = "vednnError_t "+df.basename+"(\n "+ multiReplace(",",",\n ", CSTR(CONVX_FWD_ORDER( VEDNN_PARAMS_CONV_FORWARD, VEDNN_DATARG_CONV_FORWARD))) +"\n)" ; #endif df.syms.push_back(SymbolDecl(df.basename, "vednn ConvolutionForward "+paramString(p), fn_declare)); //auto & fns = mk_extern_c(pr,"extern_C").after(pr["/macros"])["body"]; auto & fns = mk_extern_c(pr,"extern_C")["body"]; //auto & fns = mk_extern_c(pr,"extern_C")["body/.."]; auto& fn = mk_func(pr,"fn",fn_declare).after(fns)["body"]; // get the vars here first. const int64_t batch = p->batchNum; const int64_t group = p->group; const int64_t inChannel = p->inChannel; const int64_t inHeight = p->inHeight; const int64_t inWidth = p->inWidth; const int64_t outChannel = p->outChannel; const int64_t outHeight = p->outHeight; const int64_t outWidth = p->outWidth; const int64_t kernHeight = p->kernHeight; const int64_t kernWidth = p->kernWidth; const int64_t strideHeight = p->strideHeight; const int64_t strideWidth = p->strideWidth; const int64_t padHeight = p->padHeight; const int64_t padWidth = p->padWidth; const int64_t dilationHeight = p->dilationHeight; const int64_t dilationWidth = p->dilationWidth; assert( outWidth > 0 ); const int64_t inChannelGroup = inChannel / group; // equal to pDataKernel->inChannel const int64_t outChannelGroup = outChannel / group; // equal to pDataKernel->outChannel const int64_t inHW = inHeight * inWidth; const int64_t kernHW = kernHeight * kernWidth; const int64_t outHW = outHeight * outWidth; int64_t const vl_x_init = ve_vlen_suggest( outWidth ); #define DEF(VAR) def(#VAR, VAR) fn.DEF(batch).DEF(group).DEF(inChannel).DEF(inHeight).DEF(inWidth); fn.DEF(outChannel).DEF(outHeight).DEF(outWidth).DEF(kernHeight).DEF(kernWidth); fn.DEF(strideHeight).DEF(strideWidth).DEF(padHeight).DEF(padWidth).DEF(dilationHeight); fn.DEF(dilationWidth).DEF(inChannelGroup).DEF(outChannelGroup); fn.DEF(inHW).DEF(kernHW).DEF(outHW).DEF(vl_x_init); auto& fn_ptrs = fn["ptrs"]; fn_ptrs>>"float const * restrict pIn = pDataIn;" >>"float const * restrict pKernel = pDataKernel;" #if OUTINDEX >>"float * restrict const pOut = pDataOut;" #else >>"float * restrict pOut = pDataOut;" #endif ; auto& fn_vec_init = fn["vec_init"] //>>"NO_SET_VLEN(vl_x_init);" >>"const __vr vzeros = _vel_vbrds_vsl(0.0f, vl_x_init );" >>"const __vr vrseq = _vel_vseq_vl(vl_x_init);" >>"int64_t vl = vl_x_init;" //>>"NO_SET_VLEN(vl);" ; vrj_init(fn_vec_init); CBLOCK_SCOPE(loop_n,"for(int64_t n=0; n<batch; ++n)",pr,fn); CBLOCK_SCOPE(loop_g,"for(int64_t g=0; g<group; ++g)",pr,loop_n); // OK sub-tree loop_g >>"const int64_t outGroupOffset = g * outChannelGroup * outHW;" >>"const int64_t inGroupOffset = g * inChannelGroup * inHW;" >>"const int64_t kernGroupOffset = g * outChannelGroup * inChannelGroup * kernHW;" >>"const float *pIn_0 = pIn + inGroupOffset + (n * inChannel + 0) * inHW;" ; //loop_g>>"#pragma clang unroll(8)"; still 4.50 ms for ve_cmpconv default CBLOCK_SCOPE(loop_k,"for(int64_t k=0 ; k<outChannelGroup; ++k)",pr,loop_g); // Debug: loop_k #if OUTINDEX >>"int64_t outIndex = outGroupOffset + (n * outChannel + k) * outHeight*outWidth;" #else //>>"int64_t outIndex = outGroupOffset + (n * outChannel + k) * outHeight*outWidth;" //>>"CHK( pOut == (float *)pDataOut + outIndex );" >>"CHK( pOut == (float *)pDataOut + (outGroupOffset + (n*outChannel+k) * outHW) );" //>>CSTR(printf("k%u pOut=%p pDataOut+outIndex=%p delta %lld\n",(unsigned)k,(void*)pOut,(void*)((float*)pDataOut+outIndex), (long long)(pOut - ((float*)pDataOut+outIndex)));) //>>"pOut = (float * restrict)pDataOut + outIndex; // fix??" #endif >>"const float * restrict pKern_gk = pKernel + kernGroupOffset" >>" + (k * inChannelGroup + 0/*c*/) * kernHW;" >>"//int64_t kIndex_0 = kernGroupOffset + (k * inChannelGroup + 0) * kernHW;" ; CBLOCK_SCOPE(loop_y,"for(int64_t y=0 ; y<outHeight; ++y)",pr,loop_k); loop_y >>"const int64_t i = y * strideHeight - padHeight;" >>"" >>"int64_t kh_end=0;" >>"const int64_t kh_tmp = dilationHeight-i-1;" >>"const int64_t kh_beg= (i>=0? 0: kh_tmp / dilationHeight);" >>"if (i < inHeight){" >>" kh_end = (inHeight + kh_tmp) / dilationHeight;" >>" if (kh_end >= kernHeight) kh_end = kernHeight;" >>"}" ; CBLOCK_SCOPE(loop_x0,"for(int64_t x0=0 ; x0<outWidth; x0+=vl_x_init)",pr,loop_y); loop_x0[".."] >>"int64_t vl = vl_x_init;" >>"NO_SET_VLEN(vl);"; loop_x0 >>"vl = (outWidth - x0 < vl_x_init ? outWidth - x0: vl_x_init);" >>"NO_SET_VLEN(vl);" >>"__vr vrsum = vzeros;"; vrj_induce(loop_x0); loop_x0["last"] #if OUTINDEX >>"_vel_vstu_vssl(vrsum, 4, pOut+outIndex, vl);" >>"outIndex += vl;"; #else >>"_vel_vstu_vssl(vrsum, 4, pOut, vl) ;" >>"pOut += vl; // visible speedup cf. outIndex+=vl" #endif ; CBLOCK_SCOPE(loop_r,"for (int64_t r = kh_beg; r < kh_end; ++r)",pr,loop_x0); CBLOCK_SCOPE(loop_s,"for (int64_t s = 0; s < kernWidth; ++s)",pr,loop_r); #if VRW_INDUCE loop_s[".."]>>"__vr vrw = _vel_vor_vsvl(0,vrj,vl);"; loop_s["last"]>>"vrw = _vel_vaddsl_vsvl(dilationWidth, vrw, vl);"; #else loop_s>>"__vr const vrw = _vel_vaddsl_vsvl(s*dilationWidth, vrj, vl);"; #endif loop_s>>"__vm256 vm23 = " VEL_VFMK_mvs_0_TO(vrw,inWidth,vl) ";"; #if 1 // no strided read for kernel values CBLOCK_SCOPE(loop_c,"for (int64_t c = 0; c < inChannelGroup; ++c)",pr,loop_s); loop_c >>"const float *pIn = pIn_0 + c*inHW + (i+r*dilationHeight)*inWidth" >>" + x0*strideWidth-padWidth + s*dilationWidth;" >>"const float *pKerValue = pKern_gk + c*kernHW + r*kernWidth +s;" //>>"const float *pKerValue = pKernel + kernGroupOffset + ((k * inChannelGroup + c) * kernHeight + r) * kernWidth + s;" >>"__vr vrin = _vel_vldu_vssl(4*strideWidth,pIn, vl);" #if 0 // orig >>"vrin = _vel_vmrg_vvvml(_ve_vbrdu_vs_f32(0.0f, vl), vrin, vm23);" >>"vrsum = _vel_vfmads_vvsvl(vrsum, *pKerValue, vrin, vl);" #else >>"vrsum = _vel_vfmads_vvsvmvl(vrsum, *pKerValue, vrin, vm23, vrsum, vl);" #endif ; #else int64_t c = 0; #if 0 // here is the optional new code... THIS IS EXTREMELY SLOW! int const cBy = (inChannelGroup > 256? 256: inChannelGroup); loop_s>>"int64_t c = 0;"; fn["const"] CONST1(cBy); CBLOCK_SCOPE(loop_cB,"for ( ; c < inChannelGroup/cBy*cBy; c+=cBy)",pr,loop_s); loop_cB >>"const float *pKerValue = pKern_gk + c*kernHW + r*kernWidth +s;" >>"NO_SET_VLEN(cBy);" >>"__vr vKern_cBy =_vel_vldu_vssl(4*kernHW,pKerValue, vl); // vKern_cBy[0..cBy) are the kerValues" >>"NO_SET_VLEN(vl);" ; CBLOCK_SCOPE(loop_cc,"for (int64_t cc=0 ; cc < cBy; ++cc)",pr,loop_cB); loop_cc >>"const float *pIn = pIn_0 + (c+cc)*inHW + (i+r*dilationHeight)*inWidth" >>" + x0*strideWidth-padWidth + s*dilationWidth;" >>"__vr vrin = _vel_vldu_vssl(4*strideWidth,pIn, vl);" >>"float kerValue = _vel_lvs_svs_f32l( vKern_cBy, cc , vl);" >>"vrsum = _vel_vfmads_vvsvmvl(vrsum, kerValue, vrin, vm23, vrsum, vl);" ; c = inChannelGroup/cBy*cBy; // where do we end up? #elif 0 // here is the optional new code... faster, but still 10x slower for large ic int const cBy = (inChannelGroup > 256? 256: inChannelGroup); loop_s>>"int64_t c = 0;"; fn["const"] CONST1(cBy); fn["const"]>>"float * const kerMem = (void*)alloca(4*cBy);"; CBLOCK_SCOPE(loop_cB,"for ( ; c < inChannelGroup/cBy*cBy; c+=cBy)",pr,loop_s); loop_cB >>"const float *pKerValue = pKern_gk + c*kernHW + r*kernWidth +s;" >>"NO_SET_VLEN(cBy);" >>"__vr vKern_cBy =_vel_vldu_vssl(4*kernHW,pKerValue, vl); // vKern_cBy[0..cBy) are the kerValues" >>"_vel_vstu_vssl(vKern_cBy, 4, kerMem, vl);" >>"NO_SET_VLEN(vl);" ; CBLOCK_SCOPE(loop_cc,"for (int64_t cc=0 ; cc < cBy; ++cc)",pr,loop_cB); loop_cc >>"const float *pIn = pIn_0 + (c+cc)*inHW + (i+r*dilationHeight)*inWidth" >>" + x0*strideWidth-padWidth + s*dilationWidth;" >>"__vr vrin = _vel_vldu_vssl(4*strideWidth,pIn, vl);" >>"vrsum = _vel_vfmads_vvsvmvl(vrsum, kerMem[cc], vrin, vm23, vrsum, vl);" ; c = inChannelGroup/cBy*cBy; // where do we end up? // conclusion: blocking only to pre-read kernel values is not good. #elif 1 // now try simple blocking by 2 and switching to packed ops // // this code block should be enabled -- but can be disabled for debug // J cjitConvFwd1 | 1x 18.490 ms ~34185103.0654 50.88G conv2 // cjitConvFwd1_mb1_ic3ih256oc96oh258kh5_ph3 // int const cBy = 2; loop_s.def("cBy",cBy); loop_s>>"int64_t c = 0;"; fn["const"] CONST1(cBy); if( inChannelGroup >= cBy ){ CBLOCK_SCOPE(loop_cB,"for ( ; c < inChannelGroup/cBy*cBy; c+=cBy)",pr,loop_s); loop_cB >>"const float *pIn = pIn_0 + c*inHW + (i+r*dilationHeight)*inWidth" >>" + x0*strideWidth-padWidth + s*dilationWidth;" >>"__vr vrin = _vel_vldu_vssl(4*strideWidth,pIn, vl);" >>"__vr vrin2 = _vel_vldu_vssl(4*strideWidth,pIn +inHW , vl);" >>"const float *pKerValue = pKern_gk + c*kernHW + r*kernWidth +s;" >>"const uint64_t kerP = _vel_pack_f32p(pKerValue, pKerValue+kernHW);" >>"/*P*/ __vr vrinP = _vel_vshf_vvvsl(vrin, vrin2, VE_VSHUFFLE_YUZU, vl);" >>"vrPsum = _vel_pvfmad_vvsvMvl(vrPsum, kerP, vrinP, vmP, vrPsum, vl);" ; loop_s>>VEL_DECL_VM512(vmP, vm23,vm23, vl); // declare packed mask loop_x0>>"__vr vrPsum = vzeros;"; // introduce new summer loop_x0["induce+write"] // and how to fold new summer into vrsum >>"vrsum = _vel_vfadds_vvvl(vrsum,vrPsum, vl);" >>"__vr vrswap = _vel_vshf_vvvsl(vrPsum,vzeros,VE_VSHUFFLE_YLZL, vl);" >>"vrsum = _vel_vfadds_vvvl(vrsum,vrswap, vl);" ; c = inChannelGroup/cBy*cBy; // where do we end up? } #else //loop_s>>"int64_t c = 0;"; // <--- which loop_c ? #endif if( c < inChannelGroup ){ //CBLOCK_SCOPE(loop_c,"for ( ; c < inChannelGroup; ++c)",pr,loop_s); CBLOCK_SCOPE(loop_c,"for (int64_t c=0 ; c < inChannelGroup; ++c)",pr,loop_s); loop_c >>"const float *pIn = pIn_0 + c*inHW + (i+r*dilationHeight)*inWidth" >>" + x0*strideWidth-padWidth + s*dilationWidth;" >>"const float *pKerValue = pKern_gk + c*kernHW + r*kernWidth +s;" >>"__vr vrin = _vel_vldu_vssl(4*strideWidth,pIn, vl);" >>"vrsum = _vel_vfmads_vvsvmvl(vrsum, *pKerValue, vrin, vm23, vrsum, vl);" ; } #endif fn["exit"]>>"return VEDNN_SUCCESS;" ; #if 0 // // To do iteration, we NEED vednnx iterator API. // If we call existing functions, dlopen REQUIRES // - shared libvednnx...so // - or whole-archive libvednnx // to resolve symbols // // Currently, shared library is foobar, so we must whole-archive vednnx // NEW: it works with ncc 2+ (glibc variant, with C files) // // DO THIS LATER XXX -- single-use approach is fine for now. // std::string fn_ok_declare = "\n\nvednnError_t "+df.basename+"_ok(\n " +multiReplace(",",",\n ", CSTR(VEDNN_PARAMS_CONV_FORWARD)) +"\n)"; df.syms.push_back(SymbolDecl(df.basename+"_ok", "vednn ConvolutionForward ok (param check) "+ paramString(p), fn_ok_declare)); auto& fn_ok = mk_func(pr,"fn",fn_ok_declare).after(fns)["body"]; fn_ok>>"return vednnConvolutionForward_direct_default_ok(\n " CSTR(VEDNN_PARAMS_CONV_FORWARD_LIST) " );"; #endif pr["end-of-file"]>>"// vim: ts=4 sw=4 et cindent cino=^=l0,\\:.5s,=-.5s,N-s,g.5s,b1 cinkeys=0{,0},0),\\:,0#,!^F,o,O,e,0=break"; pr.v = 0; // set Cuint (root) back to non-verbose if(verbose){ // dump to cout (debug) // Note: 'write' currently has side-effect of "emptying" the tree. Subject to change! //cout<<string(80,'-')<<endl; //pr.write(cout); //cout<<string(80,'-')<<endl; //pr.dump(cout); //cout<<endl; if(verbose>=1) cout<<string(80,'-')<<pr.str() <<string(80,'-')<<endl; if(verbose>=2) cout<<string(80,'-')<<pr.tree()<<string(80,'-')<<endl; } df.code = pr.str(); return df; } // vim: ts=4 sw=4 et cindent cino=^=l0,\:.5s,=-.5s,N-s,g.5s,b1 cinkeys=0{,0},0),\:,0#,!^F,o,O,e,0=break
A machine-learning-based tool for last closed-flux surface reconstruction on tokamaks Nuclear fusion represents one of the best alternatives for a sustainable source of clean energy. Tokamaks allow to confine fusion plasma with magnetic fields and one of the main challenges in the control of the magnetic configuration is the prediction/reconstruction of the Last Closed-Flux Surface (LCFS). The evolution in time of the LCFS is determined by the interaction of the actuator coils and the internal tokamak plasma. This task requires real-time capable tools able to deal with high-dimensional data as well as with high resolution in time, where the interaction between a wide range of input actuator coils with internal plasma state responses add additional layer of complexity. In this work, we present the application of a novel state of the art machine learning model to the LCFS reconstruction in the Experimental Advanced Superconducting Tokamak (EAST) that learns automatically from the experimental data of EAST. This architecture allows not only offline simulation and testing of a particular control strategy, but can also be embedded in the real-time control system for online magnetic equilibrium reconstruction and prediction. In the real-time modeling test, our approach achieves very high accuracies, with over 99% average similarity in LCFS reconstruction of the entire discharge process. Introduction Thermonuclear fusion power is one of the ideal forms of clean and sustainable energy that has the potential to meet our future energy needs, while being inherently safe and potentially a limitless source of energy. A tokamak is a leading magnetic confinement fusion device for generating controlled thermonuclear fusion power. One core research of tokamak physics is the control of the magnetic fields distribution which is needed to keep the plasma confined. Magnetic control is not trivial, in particular for advanced configurations, since the resulting distribution of the magnetic fields is determined by the interaction of complex, sometimes unpredictable plasma state evolution and a wide range of actuator inputs. Therefore, tools capable of reconstructing efficiently and reliably the evolution of magnetic fields are of paramount importance both for the design of experiments as well as for developing robust control strategies. The conventional approach to this time-varying, non-linear, high-dimensional task is to solve an inverse problem to pre-compute a set of actuator coil (poloidal field coils etc) currents and voltages. Then, the real-time estimate of the tokamak plasma equilibrium through a simulation code allows modulating actuators coil voltages to achieve the desired target. Although these physical simulation codes are usually effective, they require substantial effort and expertise by physicists to adapt a model whenever the tokamak magnetic configuration is changed. To overcome these bottlenecks, fusion community has recently started investigating machine learning (ML) and artificial intelligence (AI) capabilities to reduce the complexity of models and numerical codes. Full tokamak discharge modeling is a critical task also from a computational point of view. The typical workflow required for tokamak modeling, known as "Integrated Modeling", is computationally very expensive. For instance, a few seconds' discharge process generally takes hours to days of computation for high fidelity simulations. Moreover, the integration of the many physics processes required to describe the evolution of the plasma state adds an even further layer of complexity. In this context, a common approach is to replace high fidelity simulation codes with ML-based surrogate models. This allows to accomplish the same task significantly reducing computation time while preserving a reasonable level of accuracy. Modeling the entire tokamak discharge process by leveraging machine learning approaches is challenging both from a technical and computational point of view. The duration of a plasma discharge in EAST can be of the order of thousands of seconds, with a resulting sequence length that exceeds 110 6 if the sampling rate is 1kHz. There are different classes of machine learning models to deal with sequence problems, RNNs Transformers based on the attention mechanism, and several variants. For the traditional RNN algorithm, training and inference time on the long sequence are usually slow. The sequential nature of RNN models prevents in general achieving a high level of parallelization in computations. From a machine learning perspective, the processing of long time sequences characterized by short and long terms dependencies is still an outstanding challenge. In the plethora of deep learning models, transformers are a novel architecture which allows overcoming some of the aforementioned issues thanks to the multi-head-attention mechanism. Nevertheless, also the use of transformers for modeling long sequences presents some limitations due to their computational complexity O(n 2 d), where n is the sequence length. In practice, when the sequence length is of the order of thousands of samples and we are dealing with high-dimensional data, training and inference times start to become unacceptable for most of the applications. The magnetic field reconstruction has two research paradigms: physics-driven and data-driven approaches. Physics-driven approaches in magnetic field reconstruction have been studied for the last decades, resulting in the development of various simulation codes, such as Equilibrium Fitting (EFIT), LIUQE, RAPTOR. The adaptation of these codes to new target plasma configurations or to new machines requires a non-negligible effort. This aspect, together with the aspect of computational efficiency, has recently brought the fusion community to leverage more and more data-driven methods to solve tasks at different levels of complexity. However, magnetic reconstruction is far behind other applications in fusion. To the best of our knowledge, only a few works such as, have actually been deployed and successfully tested in a real environment. In this paper, two different variants of 1D Shifted Windows Transformer model (1D-SWIN Transformer) have been proposed for, respectively, real-time and offline magnetic reconstruction of the LCFS. In the case of the 1D-SWIN Transformer, model computational complexity depends linearly on the sequence length n. Moreover, these models can take advantage of a high level of parallelization thanks to the attention mechanism and the non-sequential nature of the algorithm. The models presented in this work are trained only on experimental data and can be used for the estimation of the magnetic field evolution for the entire length of the tokamak discharge, including the ramp-up and the ramp-down phases of the plasma current. As far as the real-time estimation of the magnetic fields evolution is concerned, the model is not directly used to control the magnetic field. It is able to predict the evolution of the magnetic field one step ahead in the future, allowing to design more effective feedback control strategies. The real-time model can be integrated within the plasma control system (PCS) to assist robust magnetic control by predicting the magnetic field in the subsequent time step. The offline model reduces remarkably the execution time required to simulate the evolution of the magnetic field for the entire discharge. Moreover, when coupled to other ML-based surrogate models for the prediction of 0D quantities like in, it allows to simulate the evolution of various quantities of interest, supporting the experimental design as well as the optimization of the target plasmas. Compared to the model described in, our model does not rely on a physics simulation code, whose computational complexity cannot be ignored. Additionally, given the regression task, the training of our model is in general more efficient than the training of a model based on reinforcement learning. Another non-negligible aspect, which is of increasing importance in fusion as well as in many other fields of science, is that transformers have become particularly successful when used in the context of transfer learning. The key concept is that the model has the capability to learn the underlying dynamics characterizing the evolution of the magnetic field in a tokamak, encoding this knowledge in a reduced latent space representation that can be "easily" adapted to new devices. Such a perspective is extremely attractive and would allow to significantly optimize the exploitation of fusion devices for more and more advanced studies. According to the main quantities required for magnetic field control, the data used to build the machine learning model are mainly magnetic signals and references for control, namely magnetic surface probes data, in-vessel currents, poloidal field coils data, flux loop data, plasma current and shape references. For the real-time version of the model, the average similarity is over 99%, and the inference time is 0.7 ms (<1 ms in accordance with the typical control system requirements). For the offline version of the model, the average similarity is over 93%, and the average inference time is~0.22 s for sequence length 1 10 6, which is lower than the real-time model because of different settings, as it will be discussed in the following sections. Our contributions are summarized as follows: (i) We propose a generalized 1D shifted windows transformer architecture that can compute long time series. (ii) One of the models can be integrated into tokamak control for estimating in real-time magnetic field in advance. (iii) One of the models can also be combined with a 0D proposal estimation model to give a complete prediction for experimental proposal results. (iv) The validity of the proposed models is demonstrated over a large experimental data set of the EAST tokamak. Results We trained, validated, and tested real-time and offline versions of the proposed transformer-based model on the dataset during the 2016-2020 EAST campaigns with shots number in the range #52804-88283, whereas input and output signals can be found in section 4.2. The similarity metric used to test the model is defined as follows: Figure 1 shows our offline model prediction for the Last Closed-Flux Surface (LCFS) in the EAST shot #73678. The duration of this shot is longer than 70s, with a the sequence length of ∼ 710 4, which is a typical long sequence modeling problem. The LCFS shown in the figure is generated through the equilibrium reconstruction code EFIT by inputting the magnetic quantities predicted by the model into EFIT. The equilibrium reconstruction is a broad topic in tokamak research, extensively discussed in various papers and main plasma physics books, and therefore it will not be addressed in this paper. Figure 1 shows the model has reconstructed with high accuracy the LCFS not only during the flat-top phase of the plasma current, but also in the ramp-up and ramp-down phases, which are non-stationary phases. The model is able to reproduce the magnetic configuration during the various discharge phases, from the tokamak start-up "cycle", going through the formation of the "single null" shape, to the characteristic shapes in the shut-down "cycle". Offline model results The performance of the model has been evaluated with the same similarity indicator discussed in. The average similarity in the test set for the offline version of the model, as shown in figure 2, is 93.2%. Most of the shots are concentrated around 95%, with the bulk of the distribution above 90%. The test set for this work consists of experiments in the shot range #82651-88283 for a total of 1677 shots, some of which with a very long duration (see details in section 4. Note that the similarity is computed on raw signal data instead of the reconstructed LCFS. As far as experiments with similarity less than 0.85 are concerned, there are 98 shots, among which, 89 are disruptions, whereas 9 are shots with a regular termination. A disruption is an unexpected termination of the discharge where the plasma loses abruptly its thermal and magnetic confinement, involving huge electromagnetic forces and thermal loads, which can potentially damage the machine. Apart from experiments dedicated to the study of disruption physics and to the assessment of engineering limits during these violent transients, the design of the discharge itself together with robust real-time control strategies aim to avoid disruptions. Nevertheless, when operating close to stability limits, various sequence of events can potentially lead to disruption, strongly affecting the magnetic equilibrium and making it unavoidably deviate from offline modeling. The operational space characterizing disruptions is extremely complex and wide, making its coverage within the input domain unfeasible. The 9 regular terminations with relatively high error are not well estimated probably because of inherent limitations in the model, or inaccuracies in the measurements, but they correspond to only the 0.5% of the test set. Real-time model results The real-time model differs from the offline model both in terms of input quantities and inference time requirements (discussed in detail in section 4.1). Figure 3 shows the reconstruction results of the real-time model for the shot #73678. In real-time settings, the real measurement of the magnetic field probe at the previous step is fed as input to simulate the actual tokamak control feedback process. The similarity of the real-time model in the test set is shown in figure 4, which is the same test set as the offline model. Although there is almost no difference between the modeling results of shot 73678 in figure 1 and figure 3, comparing figure 2 and figure 4, it can be found that the real-time model performs slightly better than the offline model. A possible reason is that the plasma magnetic field is not a rapidly time-varying process, and the system output at the current time step is a good "guide" to forecast the evolution of the system in the subsequent time step. However, the offline model has no knowledge of the actual tokamak output, so even if bigger and more computationally demanding models are used for the offline task, the results are a bit less accurate compared to the real-time model. Discussion In the current work, we propose a 1D shifted windows transformer model that can work with long sequences (up to 1 10 6 sequence length for LCFS reconstruction in this work), which reduces the computational complexity of the original model from a square to a linear dependence to the sequence length. The proposed model can form a general sequence processing backbone network for both real-time and offline sequence modeling. Thanks to the reduced computational complexity, the model can be efficiently used for very long sequences, exceeding 1 10 6 sequence length, as we demonstrate in this study. To the best of our knowledge, we have achieved the first data-driven modeling of the LCFS for the whole tokamak discharge, including the ramp-up and the ramp-down phases of the plasma current. Being dynamic phases, ramp-up and ramp-down are in general more difficult to model, and as such they are often not taken into account in data-driven applications. The inference time for the real-time task (one-step ahead forecasting) is ∼ 0.7ms with an average Model Type Computational complexity Sequential operation O(n/w) Where k is kernel size of CNN, d is sequence dimension, n is sequence length, w is window size of 1D SWIN transformer similarity of >99%, while the average inference time for the offline modeling (entire discharge process) is 0.22s with an average similarity of >93%. From the machine learning point of view, to the best of our knowledge, this work is also the first proposing an attention-based mechanism for successfully modeling long time sequences. From the point of view of tokamak physics research, we have achieved high accuracy and fast tokamak magnetic field modeling, which can be used for critical applications such as real-time control or offline validation of tokamak's experimental proposals. If integrated with other existing discharge modeling data-driven frameworks, such as, the proposed approach can represent an extremely valuable tool to advance in the development of robust and high-performance tokamak scenario. A first important milestone for the future will be the actual integration of the real-time model within the plasma control system, which is of paramount importance to understand how reliable these systems are when operating routinely in a real environment. Another exciting future perspective triggered by the achievements documented in this work is the validation of the full modeling of the plasma discharge, integrating magnetic reconstruction with the prediction of key 0-D physics quantities commonly describing the outcome of a plasma discharge. Finally, extending and testing the 1D shifted windows transformer to other general areas of machine learning such as NLP is also an exciting direction for future research. Machine learning Model The general architecture of our machine learning models is shown in figure 5. Our architecture uses a customized 1D shifting window attention mechanism inspired by the Swin transformer to model long-term dependencies and interactions between inputs and outputs. We stack self-attention blocks to build the machine learning model. In the framework of deep-learning, there are four main candidate architectures for modeling such long-time sequences: convolutional neural network (CNN), recurrent neural network (RNN) such as long-short term memory (LSTM) and gated recurrent unit (GRU), Transformer, and our customized 1D SWIN transformer. In addition, some critical quantitative criteria should be taken into account for modeling tokamak magnetic probe data: computational complexity, number of sequential operations, and maximum path length. From table 1, 1D shifting window attention has roughly as many sequential operation and computational complexity as CNNs. Generally, the attention mechanism can achieve superior performance with respect to CNN in numerous time sequence tasks, such as natural language processing.. Our machine learning model architecture. In the figure, "L" is sequence length, "E" is the embedded dimension, "C" is the input sequence channels number, and "O" is the output sequence channels number. Generally speaking, there should be some differences between the real-time and offline modelbuilding strategies. The real-time model requires that the single-step inference is fast enough. That is, the one-step inference time of the model should be less than the response time required by the control system, and the actual system output of the previous step can be fed back as input to the model. According to the requirements of the EAST magnetic control system, model inference time should be less than 1 ms. For a typical transformer model, single-step input is complex. If the preset control commands are modified, the whole sequence needs to be recalculated, which makes the inference time exceed the control system requirements. In our work, we let "window size" = 1, which makes our model calculate the attention only in the channel axis, and single-step input becomes less expensive. This design of the model results in a one-step inference time of ∼ 0.7ms, which allows satisfying real-time constraints. For the offline model, the actual system output from The learner reads the measurements and targets from the HDF5 data store, and then computes the loss between the predicted magnetic field and the target magnetic field. Finally, using the loss as the criterion to train the learner. c, The online usage for the tokamak control. Our model can predict the tokamak Last Closed-Flux Surface (LCFS), the controller reads the estimation to generate the next control action sent to magnetic coils. the previous step should not be fed back as input unless it is trained using the teaching force trick. The time requirement of the offline mode can be relaxed, but it should generally be within one hour. Otherwise, the advantage of the machine learning model over the integrated modeling model will be diminished. If we use the teaching force, we have to recompute all the past sequences step-by-step, so the inference time of the entire sequence will be in the order of 1 10 5 s for the reason of the computational complexity. This paper's offline model does not use the teaching force trick since the inference time requirement is much shorter than one hour. Dataset In this paper, a total of 16609 shots of the EAST tokamak (discharge range between #56804-96915) were selected to construct the total dataset. The training set, validation set, and test set are divided in chronological order. The training set has 14732 shots, the validation set has 200 shots, and the test set has 1677 shots. In the experimental range #56804-96915, there are only 30 long discharge shots (discharge time >50s), 10 of which are included in the training set, and the remaining 20 shots are included in the test set. The validation set is relatively small because the model does not update parameters during the validation phase, and a relatively small validation set can speed up model training. As shown in table 2, we have selected the reference of plasma current, the in-vessel current IC1, the poloidal field coils current, the reference of poloidal field coils, the shape reference as the input signals, and the output signals include all magnetic probe signals of the magnetic field. Since the in-vessel current IC1 could not be obtained in advance at the experimental proposal stage, the input signals of the offline model did not include IC1, and the output signals of previous step data were not input to the offline model for efficiency reasons. All data was uniformly sampled at 1kHz for the entire length of the discharge, and all time axes were aligned to the same time-base. Data were saved to HDF5 files shot-by-shot, and for fast and robust training, each discharge experiment was saved as a separate HDF5 file, with 209 gigabytes (GB) of original data. Model training Before the model is trained, each signal's mean, variance, and presence flag are calculated for each shot, and then the data is stored in a MongoDB database. Then the data are normalized for each shot and finally fed into the machine learning model for training. The input set is different for the offline model and the real-time model. As analyzed 4.1, the real-time model input dimension is 130, which includes the system output at the previous step and the current IC1 signal. We can use the teaching force for training, and IC1 can be obtained in real-time experiment. For the offline model, the input dimension is 56 since the IC1 and the system output at the previous step are not used. Both versions of the model use Centos OS 7 executing on 8 P100 GPU cards. During the training of our model, we used a custom masked mean square error (MSE) loss function (MaskedMSELoss). l (x, y) = L = i=N i=0 {l 1, l 2,..., l N } N, where x is batch experimental sequence data, y is batch predicted sequence result, x i j, y i j are the jth point values of the ith experimental sequence and predicted sequence. f i is a signal data existence vector of ith experimental sequence, f i equals to 1 when the sequence exists and 0 otherwise. f i is used to mask a signal that does not have original data. The j=len j=0 is another mask for the invalid length of the sequence. This term prevents training on the zeros padding of the sequence. The use of existence masks and length masks can prevent models from being trained on sequences without actual target values and meaningless zeros padding tails. This improves accuracy and speed of the training process, where we used the bucketing algorithm for training acceleration, and the Tree of Parzen Estimator algorithm for the architectural hyperparameter search. We also tried various optimizers and regulators, and finally obtained the optimal set of hyperparameters as shown in table 3. Data availability The data that supports the findings of this study belongs to the EAST team and is available from the corresponding author upon reasonable request. Code availability The model code is open-source and can be found in github https://github.com/chgwan/1DSwin. The other codes for model training, data acquisition, and generate figures belong to EAST team and are available from the corresponding author upon reasonable request.
/** * Turn on led lights on the brick.<br> * <br> * * @param color of the light * @param blinkMode of the light */ public void ledOn(BrickLedColor color, BlinkMode blinkMode) { switch ( color ) { case GREEN: handleBlinkMode(blinkMode, 0); break; case RED: handleBlinkMode(blinkMode, 1); break; case ORANGE: handleBlinkMode(blinkMode, 2); break; } }
Efficient cytoplasmic delivery of a fluorescent dye by pH-sensitive immunoliposomes. We previously showed that liposomes composed of dioleoylphosphatidyl- ethanolamine and palmitoyl-homocysteine (8:2) are highly fusion competent when exposed to an acidic environment of pH less than 6.5. (Connor, J., M. B. Yatvin, and L. Huang, 1984, Proc. Natl. Acad. Sci. USA. 81:1715-1718). Palmitoyl anti-H2Kk was incorporated into these pH- sensitive liposomes by a modified reserve-phase evaporation method. Mouse L929 cells (k haplotype) treated with immunoliposomes composed of dioleoylphosphatidylethanolamine/palmitoyl-homocysteine (8:2) with an entrapped fluorescent dye, calcein, showed diffused fluorescence throughout the cytoplasm. Measurements by use of a microscope- associated photometer gave an approximate value of 50 microM for the cytoplasmic calcein concentration. This concentration represents an efficient delivery of the aqueous content of the immunoliposome. Cells treated with immunoliposomes composed of dioleoylphosphatidylcholine (pH-insensitive liposomes) showed only punctate fluorescence. The cytoplasmic delivery of calcein by the pH-sensitive immunoliposomes could be inhibited by chloroquine or by incubation at 20 degrees C. These results suggest that the efficient cytoplasmic delivery involves the endocytic pathway, particularly the acidic organelles such as the endosomes and/or lysosomes. One possibility is that the immunoliposomes fuse with the endosome membranes from within the endosomes, thus releasing the contents into the cytoplasm. This nontoxic method should be widely applicable to the intracellular delivery of biomolecules into living cells. The development of an effective, nontoxic method for the delivery of macromolecules to the cytoplasm of living cells is important for studies of control mechanisms of cellular processes. The versatility of liposomes, in their composition, size, and ability to encapsulate many different macromolecules, makes them attractive carriers for cellular delivery. Work in this lab, involving a model system for studying membrane fusion, led to the devleopment of a liposome composition of dioleoylphosphatidylethanolamine/palmitoyl homocysteine (DOPE/PHC) ~ (8:2) that undergoes fusion at acidic pH's. Concurrent with this model study, our laboratory established a reproducible cell targeting system, which involves the incorporation of a fatty acid derivatized monoclonal antibody into liposomes. This allows for the specific binding of the immunoliposomes to target cells. Upon binding the immunoliposome is endocytosed and eventually delivered to the lysosomes. The processing pathway includes an interme-Abbreviations used in this paper. DOPC, dioleoylphosphatidylcholine: DOPE, dioleoylphosphatidylethanolamine; PHC, palmitoyl homocysteine. 582 diate step at which the immunoliposomes encounter an acidic environment in the endosomes with a pH that varies from 4 to 6. In the work presented in this paper we have combined the established monoclonal antibody targeting system with the pH sensitive liposomes to generate pH-sensitive immunoliposomes that become fusion competent at acidic pH's. Thus, if the liposomes were endocytosed and proceeded into the endosome vesicles they would encounter an acidic environment and become fusion competent. If a fusion reaction occurred between the liposome and the endosome membranes the contents of the liposome would be released into the cell cytoplasm. The experiments in this paper were designed to test this hypothesis. MATERIALS AND METHODS Materials: PHC was synthesized and purified as described. DOPE Antibody Preparation: Anti-H2K k antibody from a mouse hybridoma cell line 11-4.1 was purified, labeled with ~2~I, and derivatized with Nhydroxysuccinimide ester of palmitic acid, as described by Huang et al.. Liposome Preparation: Reverse-phaseevaporationvesicleswereprepared as follows. Solvent-free lipid films containing DOPE/PHC (8:2) or DOPC were suspended in phosphate-buffered saline (PBS) containing 60 mM calcein. A trace amount of hexadecyl cholestanyl ether was included in the lipid mixture to facilitate the monitoring of the lipid. A critical ratio of 65 #1 buffer per 5 #mol lipid must be maintained in order to form a stable emulsion with the organic phase. The lipid suspension was sonicated at room temperature for 10 rain with a bath sonicator (Laboratory Supplies Co. Inc., Hicksville, NY) and the pH was adjusted to 7.6. The sonicated liposomes were transfered to a 25-ml round-bottom flask, and 4 ml of a 3:1 (vol/vol) mixture of chloroform/ ethyl ether was added. The mixture was briefly sonicated (~30 s) to form a stable emulsion. The emulsion mixture was rotovaped at 30"C with a water aspirator using a Buchi Rotavapor-R (Buchi Laboratoriums Technik AG, Switzerland) until all of the organic solvent was removed. The resulting reversephase evaporation vesicles were incubated in a fume hood for 1 h to remove any residual organic solvent. PBS was then added to the lipid suspension to bring the final concentration to 10 mM. Antibody' Incorporation: lmmunoliposomes were prepared by a modification of the methods developed by Shen et al.. Palmitoyl antibody, in PBS containing 0.15% deoxycholate at an antibody concentration of 1 mg/ ml, was mixed with reverse-phase evaporation vesicles at a lipid-to-antibody weight ratio of 10. 5% (vol/vol) of ethyl ether was added to the vortexing antibody-liposome mixture; this final solution was dialyzed against three changes of 4 L PBS to remove ethyl ether, deoxycholate, and untrapped calcein. The resulting immunoliposomes were run on a 5-ml 5-20% linear sucrose gradient spun at 46,000 rpm for 5 h to evaluate the efficiency of incorporation. Sizing ofimmunoliposomes was done by measurements obtained from electron micrographs. The immunoliposomes were negatively stained with 0.5% aqueous uranyl acetate and viewed in a Hitachi 600 electron microscope (Hitachi Ltd., Tokyo) at 75 kV. Size histograms were produced from micrographs taken from various preparations of liposomes. Cell Incubations: Mouse L929 cells (k haplotype) and A31 cells (d haplotype) were grown on glass coverslips which had been pretreated with a 1% solution of gelatin. The medium used for the incubation experiments consisted of PBS containing 1 mM Ca ++, 1 mM Mg *, and 16 mM D-glUCOSC. lmmunoliposomes (DOPE/PHC or DOPC) at a lipid concentration of 50 #g/ ml were incubated with both types of cell at 4"C for 1.5 h. The cells were washed three times with medium and incubated in fresh medium for 30 min more at 4"C to reduce the nonspecific binding. After the second 4"C incubation the cell were again washed three times with buffer and then observed under a Leitz Orthoplan epiluminescence microscope equipped with an Orthomat-W camera. Both phase-contrast and fluorescent pictures were taken; all of the fluorescent pictures were taken with the same exposure time (1.5 rain). In parallel experiments cells that had been treated with liposomes at 4"C as above were then incubated at either 20 or 37"C for 2 h, washed, and then photographed under the fluorescence microscope. In another series of experiments the cells were incubated with 50 #M chloroquine before the 4"C binding step, and after all washing steps the fresh incubation buffer was also supplemented with 50 #M chloroquine. After incubation with immunoliposomes at 4°C these cells were then incubated at 37°C for 2 h in the presence of chloroquine, washed, and then photographed under the fluorescence microscope, Quantitation of Cytoplasmic Fluorescence Intensity': Calcein in concentrations from 1 ~M to 1 mM was entrapped in large (>20 ~m) multilamellar liposomes composed of DOPC. A calibration curve was established using a Leitz Wetzlar MPV-2 Microscope Photometer, by measuring the fluorescence intensity from an area of 4 ~m ~ of the flattened liposomes that contained varying concentrations of calcein. At least two different areas of 25 liposomes were measured for each calcein concentration. The results showed a linear calibration curve over the concentration range of 1 t~M to 1 mM (data not shown). The fluorescence intensity from a spot of same surface area in the cytoplasm of the cells incubated at 37"C with pH-sensitive immunoliposomes was measured for two different areas of 15 cells. The cytoplasmic dye concentration was calculated from the calibration curve under the assumption that the thickness of the cytoplasm is about the same as that of the flattened liposomes. In reality the cytoplasm was probably thinner than the liposomes. Therefore, the estimated dye concentration in cytoplasm represents a lower limit of the actual value. Fluorescence intensity determination of untreated cells showed no measurable autofluorescence. The cytoplasmic dye concentrations of the control experiments were not measured, since the caleein fluorescence appeared in granules and was not evenly distributed in the cytoplasm. The fluorescence intensity of the punctate granules did not fall along the established calibration curve. Immunoliposomes The average size of the immunoliposomes composed of DOPE/PHC (8:2), as determined by negative stain electron microscopy, was 1,400 + 400.g,. The results of the sucrose gradient centrifugation showed a co-migration of the liposomes and the monoclonal antibody, which appeared at the top of the gradient. Radioactive marker counting indicated an 80% incorporation of the derivatized antibody into the liposomes. °C Binding Previous work of this laboratory demonstrated that there was specific targeting of immunoliposome with the anti-H2K k antibody to k-haplotype target cells whereas the d haplotype cells showed no specific binding. Fig. 1 37°C Incubation In these experiments both DOPE/PHC and DOPC immunoliposomes were bound to target cells at 4"C and then incubated at 37"C to allow endocytosis of the bound immunoliposomes. Figure 3A shows photographs of cells treated with the pH-sensitive DOPE/PHC liposomes. The diffused fluorescence observed in these cells clearly indicates a cytoplasmic delivery of calcein. Cells also showed a dark nucleus shadow, which indicates that calcein did not penetrate the nuclear membrane. The calcein released into the cytoplasm would still maintain its charged nature and should therefore be unable to permeate the nuclear membrane. The calcein may also associate with cytoplasmic macromolecules, which could block its entrance into the nucleus. Using the microscope photometer, we measured the relative fluorescent emission of the cytoplasmic calcein for a number of target cells. Based on the fluorescence intensity calibration curve the concentration of the calcein in the cellular cytoplasm was 50 +__ 20 uM (n = 30). The average volume of the cell cytoplasm was -103 um 3, and that of the immunoliposomes was -1 x 10 -3 ;zm 3. The calcein concentration in the immunoliposomes was 60 mM, which is about 1,000-fold higher than the concentration found in the cytoplasm. This result indicates that CONNOR AND HUANG pH-sensitive Immunoliposomes 586 FIGURE 4 Effect of chloroquine on the calcein delivery by immunoliposomes. Immunoliposomes were incubated with L929 ceils that had been constantly exposed to 50 #M chloroquine. Immunoliposomes were incubated at 4°C for 1.5 h, washed, and then incubated at 37°C for 2.0 h as described in Materials and Methods. A, DOPE/PHC (8:2) immunoliposomes; B, DOPC immunoliposomes. Bar, 10/~m. x 1,000. about one thousand liposomes had released their entrapped calcein into the cytoplasm of a cell. Cells treated with the pH-insensitive DOPC liposomes did not show any release of dye (Fig. 3B). Only punctate fluorescence was observed with these cells, indicating that the calcein was retained in the endosome/lyosome system. Cells treated with DOPC immunoliposomes did display capping of fluorescence, which has been observed in early work involving targeting of these liposomes. Paranuclear fluorescence was also seen in some of the cells treated with DOPC immunoliposomes, indicating delivery of the liposome and its contents to the lysosomes had occurred. Cells incubated with free calcein (0.3 mM) at 37"C showed some punctate but no diffused fluorescence, probably as a result of pinocytosis. Incubation with Chloroquine Chloroquine is a weak base that is readily taken up by cells, and is partitioned into acidic organelles and raises their pH. It has been demonstrated that chloroquine does not interfere with the receptor-mediated internalization of ligands. Because incubation of target cells with chloroquine blocks the acidification of the endosomes and lysosomes, upon endocytosis the immunoliposomes should encounter a less acidic pH. Immunoliposomes were bound at 4"C to chloroquinetreated cells, and the incubation temperature was then raised to 37°C to activate endocytosis of the liposomes. Fig. 4, A and B, shows cells that received such treatment. Similarly to the 20"C incubation, the fluorescence appeared punctate both on the periphery of the plasma membrane and inside the cells, which indicates that there was little or no release of the liposome-entrapped calcein. This is expected if the calcein delivery is dependent upon an acid-induced fusion of the immunoliposomes with the endosome membrane. The loss of cytoplasmic delivery with the neutralization of the endosome clearly supports this hypothesis. The cells treated with DOPC immunoliposomes also displayed internal punctate fluorescence similar to that seen with the 20"C incubation. DISCUSSION We previously showed that liposomes composed of DOPE/ PHC (8:2) become highly fusion active at acidic pH's. The pH for half-maximal fusion lies at -6.4, whereas almost 100% fusion occurs at pH 4.8 or lower. Although the mechanism of liposome fusion is not known, it probably involves the formation of the hexagonal phase of DOPE when PHC is protonated at the low pH. Since the pH range at which the liposome becomes fusion competent falls in the same range as the endosome and lysosome pH, the liposome may become fusion active if ~hey are delivered to these organdies. We previously showed that palmitoyl antibody can be incorporated into liposome membranes and the immunoliposomes are rapidly endocytosed by the target cells by a process that resembles the receptor-mediated endocytosis. If fusion of the liposome with the endosome membrane occurs, one would expect to see the release of the liposome content into the cytoplasm of cells. We have used a fluorescent dye, calcein, for this purpose. It is a water-soluble and self-quenching dye. It does not permeate cell membrane due to the high charge content. Immunoliposomes were bound to target cells at 4"C, washed, and then incubated at 37"C so that normal endocy-tosis and cellular processing, including endosome acidification, would occur. Cells incubated with liposomes composed of DOPE/PHC, which become fusion competent at acidic pH's, display diffused fluorescence throughout the cytoplasm, indicating a release of the entrapped calcein (Fig. 3A). Since the calcein cannot penetrate through the endosome membrane the dye must have been released directly into the cytoplasm probably by a fusion reaction between the liposome and the endosome membranes. The inability of calcein to cross membranes can also be seen by the lack of fluorescence in the nucleus. The pH-insensitive DOPC liposomes showed little or no diffused fluorescence after the 37"C incubation. Some cells showed high degree of paranuclear fluorescence, indicating this type of liposomes are delivered to the lysosomes. The cytoplasmic delivery of calcein by the pH-sensitive liposomes could be blocked by chloroquine (Fig. 4A). Since chloroquine effectively raises the pH of the endosomes/lysosomes, this result strongly indicates the importance of the acidic environment for calcein release. Incubation at 20"C also prevents the dye release by the pH-sensitive liposomes, but the liposomes appear to be intracellular ( Fig. 2A). Studies with mutants of Semliki Forest Virus have shown that at 20"C cells actively endocytose but that the fusion of endosomes with lysosomes is blocked ; concurrent with this blocking effect is a large degree of heterogeneity of the endosome pH. The lack of dye release from the pH-sensitive liposomes may indicate that the liposomes are located in those endosomes whose pH's are not sufficiently acidic. Alternatively, the dye release may take place in the lysosomes, which would be blocked if the endosome-lysosome fusion did not occur at 20"C. It is not clear from the present study if the liposomes actually fused with with endosome or lysosome membranes. We also do not know the fate of the liposomes after the dye release. In any case, the cells appeared to be morphologically normal and maintained normal doubling time after the treatment with the pH-sensitive immunoliposomes. If the liposomes fuse with the endosome or lysosome membrane as we suspect, the situation is very similar to the infection pathway of the enveloped virus such as the Semliki Forest, influenza, and vesicular stomatitis viruses. The viral membrane fuse with the endosome membrane in response to the acidic pH (references. The difference here is that the driving force for fusion in liposomes is a weakly acidic lipid such as PHC, whereas the driving force for the viruses is the viral glycoproteins. The efficiency of this pH-sensitive immunoliposome delivery system is apparent from the high number of liposomes (-103 liposomes/cell) that released their contents to the cytoplasm of the cell. This high level of delivery should prove very useful in future studies. For example, to know that immunoliposomes encapsulated with a monoclonal antibody at 10 mg/ml could deliver to each cell ~104 antibody molecules would obviously be very helpful in studying various cellular functions. There are a great many potential uses for this pH-sensitive immunoliposome system, depending only upon the ability to trap the desired molecule. In conclusion, we have designed a liposome system that efficiently delivers the contents to the cytoplasm of the cells in a target-specific manner. Since water-soluble molecules can be easily encapsulated in liposomes, this system should be very useful for the cytoplasmic delivery of drugs, enzymes, antibody, nucleic acids, and other biologically active molecules into the living cells. An article appeared after this paper was submitted for publication which described cytoplasmic delivery of calcein and fluorescently labeled dextran by pH-sensitive liposomes free of antibody.
/** * Setup Wizard's version of ChooseLockPattern screen. It inherits the logic and basic structure * from ChooseLockPattern class, and should remain similar to that behaviorally. This class should * only overload base methods for minor theme and behavior differences specific to Setup Wizard. * Other changes should be done to ChooseLockPattern class instead and let this class inherit * those changes. */ public class SetupChooseLockPattern extends ChooseLockPattern { public static Intent modifyIntentForSetup(Context context, Intent chooseLockPatternIntent) { chooseLockPatternIntent.setClass(context, SetupChooseLockPattern.class); return chooseLockPatternIntent; } @Override protected boolean isValidFragment(String fragmentName) { return SetupChooseLockPatternFragment.class.getName().equals(fragmentName); } @Override /* package */ Class<? extends Fragment> getFragmentClass() { return SetupChooseLockPatternFragment.class; } public static class SetupChooseLockPatternFragment extends ChooseLockPatternFragment { @Override protected Intent getRedactionInterstitialIntent(Context context) { // Setup wizard's redaction interstitial is deferred to optional step. Enable that // optional step if the lock screen was set up. SetupRedactionInterstitial.setEnabled(context, true); return null; } } }
Refrigeration challenges in the next generation of computer, telecommunications and military electronic equipment The application of vapor compression refrigeration to high performance, next generation electronic equipment cooling is ripe with challenge. Some challenges are specific to vapor compression refrigeration while others are challenges faced by many active cooling methods. Conversely, vapor compression refrigeration addresses many of the thermal challenges of high performance electronics particularly well. Obvious challenges include the ease of integration of the technology, the reliability, availability, cost and efficiency of the technology and for spot cooling, the design, operating range and thermal stability of the cold plate. An attempt will be made in this presentation to give an overview of these challenges associated with the application of vapor compression refrigeration to spot cooling of electronics and characterize the current state-of-the-art.
// // RBTViewRecorderHeader.h // RobotViewRecord // // Created by Caffrey on 2020/4/22. // Copyright © 2020 robot. All rights reserved. // #ifndef RBTViewRecorderHeader_h #define RBTViewRecorderHeader_h #if TARGET_OS_IPHONE #ifndef RBT_COLOR_CLASS #define RBT_COLOR_CLASS UIColor #endif #ifndef RBT_IMAGE_CLASS #define RBT_IMAGE_CLASS UIImage #endif #ifndef RBT_VIEW_CLASS #define RBT_VIEW_CLASS UIView #endif #elif TARGET_OS_MAC #ifndef RBT_COLOR_CLASS #define RBT_COLOR_CLASS NSColor #endif #ifndef RBT_IMAGE_CLASS #define RBT_IMAGE_CLASS NSImage #endif #ifndef RBT_VIEW_CLASS #define RBT_VIEW_CLASS NSView #endif #endif /// 视频录制的帧率 typedef NS_ENUM(NSInteger, RBTViewRecordFrameRate) { /// 每秒 5 帧 RBTViewRecordFrameRate5 = 5, /// 每秒 10 帧 (在模拟器上如果高于这个值,就会导致视频帧写入失败,因为mac跑起来很卡卡,真机可以运行.) RBTViewRecordFrameRate10 = 10, /// 每秒 15 帧 RBTViewRecordFrameRate15 = 15, /// 每秒 20 帧 RBTViewRecordFrameRate20 = 20, /// 每秒 25 帧 RBTViewRecordFrameRate25 = 25, /// 每秒 30 帧 RBTViewRecordFrameRate30 = 30, }; /// 开始录制的错误码 typedef NS_ENUM(NSUInteger, RBTVRStartErrorCode) { /// 路径错误 RBTVRStartErrorCodeWrongPath = 10000, /// 文件夹不存在 RBTVRStartErrorCodeInexistentDir = 10001, /// 文件已经存在 RBTVRStartErrorCodeExistentFile = 10002, /// 已在录制中 RBTVRStartErrorCodeStarted = 20000, /// 视频宽/高为零 RBTVRStartErrorCodeWrongVideoSize = 30000, /// 音频录制初始化错误 RBTVRStartErrorCodeAudioInitErr = 40000, /// 没有录制音频的权限 RBTVRStartErrorCodeNoAudioAuthorization = 40001, }; #endif /* RBTViewRecorderHeader_h */
Pullman, Chicago Beginnings Historic Pullman was built in the 1880s by George Pullman as workers' housing for employees of his eponymous railroad car company, the Pullman Palace Car Company. He established behavioral standards that workers had to meet to live in the area and charged them rent. Pullman's architect, Solon Spencer Beman, was said to be extremely proud that he had met all the workers' needs within the neighborhood he designed. The distinctive rowhouses were comfortable by standards of the day, and contained such amenities as indoor plumbing, gas, and sewers. Pullman Strike During the depression that followed the Panic of 1893, demand for Pullman cars slackened. The Pullman company laid off hundreds of workers and switched many more to pay-per-piece work. This work, while paying more per hour, reduced total worker income. Despite these cutbacks, the Company did not reduce rents for workers who lived in the town of Pullman. Pullman, despite the depression, paid his share holders their dividends which upset workers whose wages Pullman had just cut. Workers initiated the Pullman Strike in 1894, and it lasted for 2 months, eventually leading to intervention by the US government and military. The Strike Commission, set up in 1894, ruled that the aesthetic features admired by visitors had little monetary value for employees. Incorporation into Chicago After George Pullman died in 1897, the Illinois Supreme Court required the company to sell the town because operating it was outside the company's charter. In 1889, the town and other major portions of the South Side were annexed by the city of Chicago. Within ten years, the city sold the houses to their occupants. After the strike, Pullman gradually was absorbed as a regular Chicago neighborhood, defined by distinguishing Victorian architecture. But the fortunes of the neighborhood continued to rise and fall with the Pullman Company for many years. Deindustrialization With industrial and railroad restructuring beginning in the 1950s, many jobs were lost in the city. The neighborhood gradually declined along with work opportunities and income. People began to move to newer housing in the suburbs. In 1960 the original Town of Pullman, approximately between 103rd and 115th Streets, was threatened with total demolition for an industrial park. Forming the Pullman Civic Organization, the residents lobbied the city and saved their community. It reached its peak of population in 1970. Revival By 1972 the Pullman Historic District had obtained National, State, and City landmark status to protect the original 900 rowhouses and public buildings built by George Pullman. (It was designated a National Historic Landmark District in 1969 and listed on the National Register of Historic Places. In 1970 it was designated as a State landmark by the Illinois Historic Preservation Agency; and in 1972, South Pullman was declared a City of Chicago Landmark). To protect the character of the historic districts, the city has established guidelines for new building and renovation, administered by the City of Chicago. These are explained in the Beman Committee's Homeowner's Guide (the Committee is named after Pullman's original architect, Solon Spencer Beman ) The district was designated the Pullman National Monument under President Obama in February, 2015. Media and entertainment Pullman has been featured in several major motion pictures. Road to Perdition (starring Tom Hanks and Paul Newman) was filmed in historic Pullman, with scenes featuring the factory and how it "once was" with workers, as well as many other scenes of the neighborhood. The 1993 film The Fugitive had several key scenes in Pullman. Harrison Ford was featured in a local bar, next running down an alley, and over the tops of several Pullman rowhouses. In April 2007, Universal Studios filmed The Express: The Ernie Davis Story, which also featured several scenes in Pullman. The Polar Express animated scenes at the North Pole were based on Pullman architecture. Santa Claus emerges from a building based on the Pullman Company Administration Building; other buildings are based on the architectural style in Pullman. Robert Zemeckis, who designed the movie, grew up in the Roseland neighborhood near Pullman. On November 12, 2006, Historic Pullman was the topic of the HGTV television show National Open House, which featured a Pullman house at 112th Street and Langley. Politics Pullman is a stronghold for the Democratic Party. In the 2016 presidential election, Pullman cast 3,123 votes for Hillary Clinton and cast 100 votes Donald Trump. Despite winning 94.92% of the vote, it was Clinton's 25th largest share of the vote by percentage in the 76 community areas she won in heavily Democratic Chicago. In the 2012 presidential election, Pullman cast 3,521 votes for Barack Obama and 77 votes for Mitt Romney. Despite winning 97.43% of the vote, it was Obama's 25th largest share of the vote by percentage in the 76 community areas he won in heavily Democratic Chicago. At the local level, Pullman is located in Chicago's 8th and 9th wards represented by Democratic Alderwoman Michelle Harris and Democratic Alderman Anthony Beale respectively. Transportation Pullman is served by two Metra Electric Line stations; Kensington/115th Street station and Pullman/111th Street station. Most Metra suburban express trains passing through the area stop at the 115th Street station, and only local trains stop at the 111th Street station. Education Pullman is located in City of Chicago School District #299 and City Colleges of Chicago District #508. Pullman is zoned to the following elementary schools; Schmid Elementary School, Wendell Smith Elementary School, Edgar Allan Poe Classical School, and George M. Pullman School. The majority of Pullman is zoned to the Pullman located Corliss High School, while some the northeastern area is zoned to Harlan Community Academy High School in nearby Chatham The main campus of Olive-Harvey College, part of the City Colleges of Chicago system is located in Pullman.
Clinical significance of CCNE1 copy number gain in acral melanoma patients Supplemental Digital Content is available in the text. Copy number variations are frequently observed in cell cyclerelated genes in acral melanoma. However, the clinical significance of copy number gain of CCNE1 in acral melanoma has not been fully elucidated. In this study, 490 acral melanoma samples were examined for CCNE1 copy number using the QuantiGenePlex DNA Assay. Correlation between CCNE1 copy number and acral melanoma patients clinicopathologic features were analyzed using Chi-squared test. The impact of CCNE1 copy number on patients progression-free survival (PFS) and overall survival (OS) probability were analyzed using KaplanMeier analysis. The impact of CCNE1 copy number on patients median PFS after receiving chemotherapy was also evaluated. The results showed that CCNE1 copy number gain was observed in 28.30% of patients, with 3.16% of patients carrying both CCNE1 copy number gain and BRAF mutation and 4.34% of patients carrying both CCNE1 copy number gain and NRAS mutation. The median PFS time for patients with CCNE1 copy number gain was shorter than that of patients without CCNE1 copy number gain (17.0 vs. 27.0 months, P = 0.002).In the cohort that received chemotherapy (n = 82), the median PFS time for patients with CCNE1 copy number gain was shorter than that of patients without CCNE1 copy number gain (4.8 vs. 7.4 months, P = 00.006). CCNE1 copy number gain was an independent prognostic marker for acral melanoma patients PFS. Our study indicates that CCNE1 copy number gain is frequent in acral melanoma and may be a biomarker to predict acral melanoma patients outcomes after receiving chemotherapy.
import jax.numpy as jnp import numpy as np import netket def v1(x): return jnp.sum(jnp.exp(-(x ** 2))) def v2(x): return jnp.sum(2.0 * jnp.exp(-(x ** 2))) hilb = netket.hilbert.Particle(N=1, L=jnp.inf, pbc=False) hilb2 = netket.hilbert.Particle(N=2, L=5.0, pbc=True) # potential operators pot1 = netket.operator.PotentialEnergy(hilb, v1) pot2 = netket.operator.PotentialEnergy(hilb, v2) pot3 = netket.operator.PotentialEnergy(hilb2, v1) # sum of potential operators pottot = pot1 + pot2 pot10p52 = pot1 + 0.5 * pot2 # kinetic operators kin1 = netket.operator.KineticEnergy(hilb, mass=20.0) kin2 = netket.operator.KineticEnergy(hilb, mass=2.0) # sum of kinetic operators kintot = kin1 + kin2 kin10p52 = kin1 + 0.5 * kin2 # sum of potential and kinetic operators etot = pottot + kintot model1 = lambda p, x: 1.0 model2 = lambda p, x: jnp.sum(x ** 3) kinexact = lambda x: -0.5 * jnp.sum((3 * x ** 2) ** 2 + 6 * x) def test_potential_energy(): x = jnp.array([0]) energy1 = pot1._expect_kernel(model1, 0.0, x, pot1._pack_arguments()) energy2 = pot2._expect_kernel(model1, 0.0, x, pot2._pack_arguments()) np.testing.assert_allclose(energy1, v1(x)) np.testing.assert_allclose(energy2, v2(x)) with np.testing.assert_raises(NotImplementedError): pot1 + pot3 def test_kinetic_energy(): x = jnp.array([1, 2, 3.0]) energy1 = kin1._expect_kernel( model2, 0.0, jnp.array([1, 2, 3.0]), kin1._pack_arguments() ) kinen1 = jnp.sum(kinexact(x) / 20.0) np.testing.assert_allclose(energy1, kinen1) np.testing.assert_allclose(kin1.mass * kin1._pack_arguments(), 1.0) np.testing.assert_equal("KineticEnergy(m=20.0)", repr(kin1)) def test_sumoperator(): x = jnp.array([1, 2, 3.0]) potenergy = pottot._expect_kernel(model2, 0.0, x, pottot._pack_arguments()) energy10p52 = pot10p52._expect_kernel(model2, 0.0, x, pot10p52._pack_arguments()) np.testing.assert_allclose(potenergy, v1(x) + v2(x)) np.testing.assert_allclose(energy10p52, v1(x) + 0.5 * v2(x)) kinenergy = kintot._expect_kernel(model2, 0.0, x, kintot._pack_arguments()) kinenergyex = jnp.sum(kinexact(x) / 20.0) + jnp.sum(kinexact(x) / 2.0) np.testing.assert_allclose(kinenergy, kinenergyex) kinen10p52 = kin10p52._expect_kernel(model2, 0.0, x, kin10p52._pack_arguments()) kinenergy10p52ex = jnp.sum(kinexact(x) / 20.0) + 0.5 * jnp.sum(kinexact(x) / 2.0) np.testing.assert_allclose(kinen10p52, kinenergy10p52ex) enertot = etot._expect_kernel(model2, 0.0, x, etot._pack_arguments()) enerexact = v1(x) + v2(x) + jnp.sum(kinexact(x) / 20.0) + jnp.sum(kinexact(x) / 2.0) np.testing.assert_allclose(enertot, enerexact)
New Seed Varieties and the Small Farm That there is a relationship between adoption of the new varieties and size of farm, is generally accepted. In this article, the variability that existed within this relationship by crops, seasons, regions and years, in the period 1966-67 to 1968-69 is examined. Also examined are a nuimber of factors which may underlie the relationship. The authors conclude that, although credit appears tu be important, uncertainty may be the critical constraint to adoption among small farmers. Credit availability and uncertainty are interrelated factors, but depending on which is taken to be the crux of the problem, policies undertaken to facilitate adoption among small farmers are bound to be different. It is also noted that, for some crops, small farmers who adopt the new varieties plant a higher proportion of acreage with them than do large farmers. This may be due to a labotir restraint on the larger farms. That possibility, and its policy implications, are examined.
Exploiting information extraction and the semantic web at Yahoo search From the perspective of a search engine, Information Extraction and the Semantic Web are complimentary ways toward achieving the same goal: helping the search engine to understand the content on the Web and the possible intents of the users trying to retrieve it. In this talk, we will discuss some of the ways in which methods of Information Extraction as well as explicit metadata on the Web are exploited at Yahoo Search.
"Provide tech companies with funding or tax breaks to fund training" "The biggest issue our community faces is the difficulty recruiting highly skilled developers. Tech companies' success often depends on the skills of their development team. This is why both tech startups and large technology-dependent enterprises are desperate to find experienced, highly skilled software professionals. Every day, we are asked by tech startups how and where they can find good people. Our jobs board at Skills Matter is absolutely packed with vacancies advertising 'cool' projects and teams, begging people to join them. "The government can help in a number of ways. Provide tech companies with funding or tax breaks to fund training in advanced technology. And relax immigration rules for the highly skilled. In the meantime, we should provide the next generation with actual programming skills (as opposed to MS Office skills) very early on – by supporting initiatives such as Computing at School and Apps for Good, so that in 10 years' time, we'll be in a better position." "Make all of London Tech City" "The government focuses on publicising Old Street and the Olympic Park – but so far the winners are all over London. Betfair are in Hammersmith, Spotify are in Soho, Wonga are located in Camden and Skype in Fitzrovia. Whether it's promoting London or east London [as a tech hub], from a PR point of view, wouldn't make that much difference. If you look at what's going on around Old Street, while there are certainly some success stories, you have companies at an earlier stage with a higher probability of failure. "Why wouldn't you just claim the winners, wherever they are in London? History may prove me wrong when Crossrail is finished and maybe the Olympic Park [will] become a really popular place. But until that happens, I can't imagine you'll have many people who'll move into that space of their own free will, without the government twisting their arms. "Broadband provisioning is also a major issue. It's amazing to me, because you would just think that market forces would solve this – but it doesn't seem to be happening. If I start a company tomorrow and ring up to get a dedicated line in, it's going to take 100 days. And that's if you're lucky." "I'm very impressed with the way the government has positioned London as a hub for entrepreneurship. I think lionising entrepreneurs is a good thing – having educators and officials who recognise the value and importance of entrepreneurs in society. There are parts of the world where entrepreneurs are looked up to much more than here. I read about an entrepreneur here who said when he tells people he's an entrepreneur, they feel sorry for him, as if he can't get a real job. That shouldn't be the case. "[Tech] is the next industrial revolution and these people are the pioneers. This is what the country needs. You can't continue to rely on natural resources, traditional goods and insurance companies for growth and job creation. Schools and universities need to do more to promote entrepreneurship as a lifestyle, a career and an ethic. They need to make sure that all children are given access to great education in maths and science and engineering so we don't end up with a lopsided employment pool. We invested in a company called Code Academy. Their thesis is that coding is the new English – if you don't program you'll be programmed. The government putting their weight behind that would be great." "Make New York the model, not Silicon Valley" "[Attempting to rival Silicon Valley] is boneheaded. The way to think of London is the way you think of financial services markets in offshoot countries. Singapore and Hong Kong are vibrant markets in Asia. However, they are markets that are subservient to the two global markets that really matter in finance – New York and London. [In tech terms] London has a very good shot at becoming an offshoot market. Really what the government should be focused on is how we work in harmony with Silicon Valley. A lot of that is bridge-building between those two economies and they've not done a good job there so far. "The best parallel for success, if you take this thesis, is New York. New York was by and large a nonexistent tech centre, and then a few things happened that made a material difference. One was Google set up an engineering centre. One of the best ways to bridge is exactly what New York did – get a company that is out on the west coast to open up not a sales office but an engineering centre here. The government needs to spend a lot of energy doing this, because without that depth of managerial and engineering talent, you will not get big companies in your backyard." "Develop the next generation of coders" "We all heard the prime minister's speech, but the reality is I don't know what Tech City is. I've been in this industry for 25 years and as a leader of a big [technology] company in the UK, and while I do accept that our focus isn't working with internet startups, I would have expected Tech City to have hit my radar on a regular basis. But it's just not happening. I would have also expected [the government] to come to me to see how I could help incubate some of those skills. "We could help some of these young companies with things like mentoring and, possibly, invest in them. But no one's asked that question of me or any colleagues in other [similar] organisations. Also, I don't see anything in the school curriculum that is going to help us create the next Google in this country in terms of coding capability. My kids aren't allowed to take their phones to school [to build apps], and while they do take ICT classes, it's all based around Windows rather than coding and social media. We're going to have a 10-year skills deficit here, when it comes to building the [tech] superstars of the future, unless we start building coding into the curriculum." "Do more to boost London tech flotations" "As all of our companies grow and mature, where you look to float for the second stage of growth, including larger-scale later investment, is crucial – and London is not geared well for that. Traditionally a lot of companies head to Silicon Valley for the large, later-stage investment, because that investment is there. Equally, when you want to float you still head to New York. Two things need to be done. The first is happening slowly anyway, which is educating the market itself. Second, the rules and regulations around floating are considerably more onerous and difficult to go through here. "I've seen several companies abandon float plans in London and shift them to the Nasdaq [in New York] because of it. The government is working on that, but more needs to be done. Much more also needs to be done with government procurement. When I spoke at the launch [of Tech City], I talked about trying to build a booking system for sport. It took two years and we're finally working very closely with Sport England – and they wouldn't have worked with us until the government said: 'You have to work with some startups.' I've spoken to a lot of companies who are still quite frustrated when it comes to dealing with potential [public-sector] tender processes." "Change the rules for the enterprise investment scheme" "When I first started talking to Tracy [Doree] and Vivienne [Bearman, co-founders of the design retail site that started life as Llustre], one of the many things I loved was the location of their office. It is located just west of Silicon Roundabout – an area sprinkled with fashion boutiques, up-and-coming and more established designers, and unique cafes selling vintage clothing and strong espresso to budding tech entrepreneurs. "That inspiring setting is partly because of the government's effort in encouraging large digital companies such as Google, entrepreneurs and investors to set up shop in Tech City. The government was smart to see that as one of the keys to the success of Silicon Valley, but they should be looking at this as just one initiative among many to make London a global centre of tech. Additional priorities include education programmes that encourage young Brits to pursue careers as entrepreneurs, engineers and designers. Organisations such as the Prince's Trust are getting this kickstarted, but they could use help. The UK needs a group of angel investors who are eager to invest their time and their millions in helping businesses get off the ground. This could be helped by the government changing the rules for the enterprise investment scheme (EIS)."
<filename>DESIRE-Engine/src/Engine/Core/FS/FileSystemWatcher_linux.cpp #include "Engine/stdafx.h" #include "Engine/Core/FS/FileSystemWatcher.h" #if DESIRE_PLATFORM_LINUX #include <sys/inotify.h> #include <unistd.h> #include <errno.h> static int32_t s_inotifyFD = -1; // inotify file descriptor // -------------------------------------------------------------------------------------------------------------------- // FileSystemWatcherImpl // -------------------------------------------------------------------------------------------------------------------- class FileSystemWatcherImpl { public: int32_t wd = -1; }; // -------------------------------------------------------------------------------------------------------------------- // FileSystemWatcher // -------------------------------------------------------------------------------------------------------------------- FileSystemWatcher::FileSystemWatcher(const String& directory, std::function<void(FileSystemWatcher::EAction action, const String& filename)> actionCallback) : m_actionCallback(actionCallback) , m_spImpl(std::make_unique<FileSystemWatcherImpl>()) { if(s_inotifyFD < 0) { s_inotifyFD = inotify_init(); if(s_inotifyFD < 0) { return; } } int32_t wd = inotify_add_watch(s_inotifyFD, directory.Str(), IN_CLOSE_WRITE | IN_MOVED_FROM | IN_MOVED_TO | IN_CREATE | IN_DELETE); if(wd < 0) { LOG_ERROR("FileSystemWatcher error: %s", strerror(errno)); return; } m_spImpl->wd = wd; } FileSystemWatcher::~FileSystemWatcher() { if(m_spImpl->wd >= 0) { inotify_rm_watch(s_inotifyFD, m_spImpl->wd); } } void FileSystemWatcher::UpdateAll() { fd_set read; struct timeval timeout; timeout.tv_sec = 0; timeout.tv_usec = 0; FD_ZERO(&read); FD_SET(s_inotifyFD, &read); int32_t ret = select(s_inotifyFD + 1, &read, nullptr, nullptr, &timeout); if(ret < 0) { LOG_ERROR("Error calling select() in FileSystemWatcher"); return; } if(FD_ISSET(s_inotifyFD, &read)) { char buffer[1024 * (sizeof(inotify_event) + FILENAME_MAX)] = {}; const ssize_t numRead = read(s_inotifyFD, buff, BUFF_SIZE); ssize_t offset = 0; while(offset < numRead) { inotify_event* event = reinterpret_cast<inotify_event*>(buffer + offset); const String filename(event->name, event->len); FileSystemWatcher* watcher = watchers[event->wd]; if(event->mask & IN_MOVED_TO || event->mask & IN_CREATE) { watcher->m_actionCallback(FileSystemWatcher::EAction::ADDED, filename); } else if(event->mask & IN_MOVED_FROM || event->mask & IN_DELETE) { watcher->m_actionCallback(FileSystemWatcher::EAction::DELETED, filename); } else if(event->mask & IN_CLOSE_WRITE) { watcher->m_actionCallback(FileSystemWatcher::EAction::MODIFIED, filename); } offset += sizeof(inotify_event) + event->len; } } } #endif // #if DESIRE_PLATFORM_LINUX
# -*- coding: utf-8 -*- """ Copyright (C) 2013 TopCoder Inc., All Rights Reserved. This is the module that wraps the Data Conversion functionality. It defines a function to convert data file. Note that only the function declaration is designed, the actual implementation will be determined later. This module resides in Python source file dataconversion.py Thread Safety: The implementation should be thread safe. @author: TCSASSEMBLER @version: 1.0 """ import os from logginghelper import method_enter from logginghelper import method_exit from conversion.converter import csv2xml from validationhelper import check_string from errors import DataConversionError def convert_data(file_type, input_file_name, output_file_name): """ This function is used to convert data file. @param file_type: the file type - it is supposed to be a str, not None/empty. Required. @param input_file_name: the input file name (including full path), this function will assume the file exists - it is supposed to be a str, not None/empty. Required. @param output_file_name: the output file name (including full path), this function will assume the file exists, hence it will not create the file - it is supposed to be a str, not None/empty. Required. @throws TypeError throws if any argument isn't of right type @throws ValueError throws if any argument isn't valid (refer to the argument documentation) @throws DataConversionError throws if any other error occurred during the operation """ signature = 'hfppnetwork.partner.httpservices.dataconversion.convert_data' method_enter(signature,{ 'file_type':file_type, 'input_file_name':input_file_name, 'output_file_name':output_file_name }) # Acceptable file types types_mapping = { 'beneficiary': 'BeneficiarySummary', 'carrier': 'CarrierClaim', 'inpatient': 'InpatientClaim', 'outpatient': 'OutpatientClaim', 'prescription': 'PrescriptionEvent'} check_string('file_type', file_type) if not file_type in types_mapping: raise ValueError('File type "' + file_type + '" is not acceptable. Use ' + str(types_mapping)) check_string('input_file_name', input_file_name) check_string('output_file_name', output_file_name) if os.path.exists(input_file_name) is False: raise ValueError('input_file_name should be valid file path') if os.path.exists(output_file_name) is False: raise ValueError('output_file_name should be valid file path') try: csv2xml(input_file_name, output_file_name, types_mapping[file_type]) except: raise DataConversionError('Data conversion internal error.') method_exit(signature)
1. Field of the Invention The present invention relates to computer security, and more particular, to identifying unauthorized and potentially malicious code running on the operating system's kernel. 2. Description of the Related Art Computer security is top most priority for an enterprise. With the ubiquitous nature of computing system and ever increasing number of computer applications, computer users are constantly confronted with the need to protect their computers from malicious codes and/or existing code malfunction. Malicious codes have plagued common computer users and large enterprises alike. The damages incurred by the users/enterprises include system downtime, identity thefts and loss of other sensitive information. The most common way a malicious code makes its way into a computer system is by taking advantage of weaknesses in software running on the system. In addition to malicious codes, some of the software loaded on the computer system may become corrupt and might not provide the same functionality as was originally designed. The corrupted software code resides in the system and executes whenever the system boots up or when an application associated with the corrupted code executes. In order to prevent the malicious codes from making their way into the computer system, enterprises have developed their own anti-virus solutions or installed anti-virus solutions developed by others to ensure that the malfunctioning/malicious codes do not execute on the computer system compromising the secure and sensitive information contained therein. Some of the solutions detect and remove the problem codes while some others detect and repair the malfunctioning code. In the case where the solutions detect and remove the problem codes, the solutions are typically reactive in nature, wherein the solutions are designed and executed after a malicious/malfunctioning code has already executed on the computer. These security solutions have to be updated constantly to address newly developed malicious codes so that adequate protections are met for the computer system. Preventing malicious codes from running in a computer system may involve a hardware solution wherein access control bits are set for pages in memory in order to prevent the code on a given page of memory from executing. The problem with the hardware solution is there is no guarantee that the data within the page itself is not corrupted. Further, if the code within the operating system components is itself corrupted by malicious codes, the setting of the access control bits will be affected, which, in turn, affects the security of those pages. Additionally, most of the solutions are reactive and do not guarantee the validity of currently executing code or codes that bypass a segment and jump to a new memory region and begin executing. The bypassing of a segment essentially allows circumnavigating any securities provided at specific pages in memory making these solutions ineffective. Further, these solutions are operating system dependent and reside and execute on each guest virtual machine (GVM or guest). This means that the solutions are distributed. The solutions running on each guest try to intercept viruses at each of the guests as files are accessed on the guest. However, the in-guest viruses may compromise the security within the guest allowing malicious code access to the sensitive data. Also, every time a specific guest's operating system (OS) is updated or service pack or software patch is installed, the solutions running on that guest may have to be updated so that the solutions support the guest's OS updates leading to non-centralized solution updates. It is in this context embodiments of the invention arise.
<gh_stars>1-10 /** * Copyright 2008 Institute of Web Science, Southeast University, PR China * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package iws.falcon.model.coordination.rule; import iws.falcon.model.coordination.Coordinator; import java.util.HashMap; /** * @author <NAME> & <NAME> */ public abstract class Equivalent implements Coordinator { HashMap allValues = new HashMap(); public boolean isEmpty() { if (allValues != null) { return allValues.isEmpty(); } return true; } public abstract boolean isEquivalent(Object o); public abstract Object getEquivalent(Object left); public class SameNode { public static final int MaxInteger = 5000000; public Object value = null; public int setNum = MaxInteger; public SameNode(Object v) { value = v; } } }
. Congenital short tendo calcaneus is seen in children as partial or complete walking on the toes, and may represent a major disturbance for normal motor development and coordination. This clinical finding may indicate a more serious, underlying disease (cerebral paresis, childhood psychosis or a neuromuscular disorder). If the patient, apart from walking on the toes exhibits normal clinical findings, the diagnosis of congenital short tendo calcaneus may be justified. The disease may be inherited. The diagnosis should be made by an orthopaedist, and the treatment is either conservative with aggressive physical therapy, or surgery. The prognosis is good after early diagnosis and special treatment. A review of the literature, with emphasis on diagnosis and treatment, is presented, together with a brief review of five patients operated by the authors.
Undoing Incest: A Meditation on "Daughters and Fathers" In speaking to one of the editors of Daughters and Fathers at a professional conference I made a crucial slip. I had seen a notice of the book and asked whether it was in print, referring to it by what I thought was its title: Fathers and Daughters. When the editor graciously corrected me, I realized the significance of my error. In placing fathers before daughters I was contributing to the very phenomenon the book seeks to examine-the subordination and effacement of the daughter within Western family structures and traditions. By reversing the terms of this relationship, the editors hope not only to highlight the position of the daughter within it but also to assist a process of cultural transformation, with the aim of altering the network of systems that ensure the ideological reproduction of the family.l This is an ambitious project, fittingly served by the large design and ample proportions of the collection of essays gathered in its name. It would be an impossible task to comment on the entire contents of this book, so I will concentrate on what I take to be its most consistent concerns as well as its overall impact on me as a reader. I feel invited to do this by the editors themselves, who in their introduction state that their book is meant to evoke a process of reflection in which each reader will question her own socialization process and
// Copyright 2019 The LUCI Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package protoutil import ( "fmt" "sort" "strconv" "strings" "google.golang.org/protobuf/proto" "go.chromium.org/luci/common/data/strpair" pb "go.chromium.org/luci/buildbucket/proto" ) // StringPairs converts a strpair.Map to a slice of StringPair messages. func StringPairs(m strpair.Map) []*pb.StringPair { ret := make([]*pb.StringPair, 0, len(m)) for k, vs := range m { for _, v := range vs { ret = append(ret, &pb.StringPair{Key: k, Value: v}) } } SortStringPairs(ret) return ret } // SortStringPairs sorts string pairs. func SortStringPairs(pairs []*pb.StringPair) { sort.Slice(pairs, func(i, j int) bool { switch { case pairs[i].Key < pairs[j].Key: return true case pairs[i].Key > pairs[j].Key: return false default: return pairs[i].Value < pairs[j].Value } }) } // StringPairMap converts a slice of StringPair messages to a strpair.Map. func StringPairMap(pairs []*pb.StringPair) strpair.Map { m := make(strpair.Map) for _, pair := range pairs { m.Add(pair.Key, pair.Value) } return m } // BuildSets returns all of the buildsets of the build. func BuildSets(b *pb.Build) []string { var result []string for _, tag := range b.Tags { if tag.Key == "buildset" { result = append(result, tag.Value) } } return result } // GerritBuildSet returns a buildset representation of c, // e.g. "patch/gerrit/chromium-review.googlesource.com/677784/5" func GerritBuildSet(c *pb.GerritChange) string { return fmt.Sprintf("patch/gerrit/%s/%d/%d", c.Host, c.Change, c.Patchset) } // GerritChangeURL returns URL of the change. func GerritChangeURL(c *pb.GerritChange) string { return fmt.Sprintf("https://%s/c/%d/%d", c.Host, c.Change, c.Patchset) } // GitilesBuildSet returns a buildset representation of c. // e.g. "commit/gitiles/chromium.googlesource.com/infra/luci/luci-go/+/b7a757f457487cd5cfe2dae83f65c5bc10e288b7" // // Returns an empty string if not all of {Host, Project, Id} are set. func GitilesBuildSet(c *pb.GitilesCommit) string { if c.Host == "" || c.Project == "" || c.Id == "" { return "" } return fmt.Sprintf("commit/gitiles/%s/%s/+/%s", c.Host, c.Project, c.Id) } // GitilesRepoURL returns the URL for the gitiles repo. // e.g. "https://chromium.googlesource.com/chromium/src" func GitilesRepoURL(c *pb.GitilesCommit) string { return fmt.Sprintf("https://%s/%s", c.Host, c.Project) } // GitilesCommitURL returns the URL for the gitiles commit. // e.g. "https://chromium.googlesource.com/chromium/src/+/b7a757f457487cd5cfe2dae83f65c5bc10e288b7" // or "https://chromium.googlesource.com/chromium/src/+/refs/heads/master" // if id is not available. func GitilesCommitURL(c *pb.GitilesCommit) string { suffix := c.Id if suffix == "" { suffix = c.Ref } return fmt.Sprintf("%s/+/%s", GitilesRepoURL(c), suffix) } // ParseBuildSet tries to parse buildset as one of the known formats. // May return *pb.GerritChange, *pb.GitilesCommit // or nil. func ParseBuildSet(buildSet string) proto.Message { // fmt.Sscanf cannot be used for this parsing because // var a, b string // fmt.Scanf("a/b", "%s/%s", &a, &b) // a == "a/b", b == "" p := strings.Split(buildSet, "/") for _, c := range p { if c == "" { return nil } } n := len(p) switch { case n == 5 && p[0] == "patch" && p[1] == "gerrit": gerrit := &pb.GerritChange{ Host: p[2], } var err error if gerrit.Change, err = strconv.ParseInt(p[3], 10, 64); err != nil { return nil } if gerrit.Patchset, err = strconv.ParseInt(p[4], 10, 64); err != nil { return nil } return gerrit case n >= 5 && p[0] == "commit" && p[1] == "gitiles": if p[n-2] != "+" || !looksLikeSha1(p[n-1]) { return nil } return &pb.GitilesCommit{ Host: p[2], Project: strings.Join(p[3:n-2], "/"), // exclude plus Id: p[n-1], } default: return nil } } func looksLikeSha1(s string) bool { if len(s) != 40 { return false } for _, c := range s { switch { case '0' <= c && c <= '9': case 'a' <= c && c <= 'f': default: return false } } return true }
package jnesulator.core.nes.video; import java.awt.image.BufferedImage; import java.awt.image.DataBufferInt; import java.awt.image.WritableRaster; public abstract class Renderer { int frame_width; /* * there's stuff involving this variable that's much uglier than it needs to * be because of me not really remembering how abstract classes work */ int clip = 8; int height = 240 - 2 * clip; BufferedImage[] imgs = { null, null, null, null }; int imgctr = 0; public BufferedImage getBufferedImage(int[] frame) { BufferedImage image = imgs[++imgctr % imgs.length]; WritableRaster raster = image.getRaster(); int[] pixels = ((DataBufferInt) raster.getDataBuffer()).getData(); System.arraycopy(frame, frame_width * clip, pixels, 0, frame_width * height); return image; } protected void init_images() { for (int i = 0; i < imgs.length; ++i) { imgs[i] = new BufferedImage(frame_width, height, BufferedImage.TYPE_INT_ARGB_PRE); } } public abstract BufferedImage render(int[] nespixels, int[] bgcolors, boolean dotcrawl); public void setClip(int i) { // how many lines to clip from top + bottom clip = i; height = 240 - 2 * clip; } }
Mixed-layer Heat Budget in Western and Eastern Tropical Pacific Ocean during El Nio Event in 2015/2016 Temporal variation of mixed-layer heat budget at two contrasting locations, namely, western Pacific (warm water pool) and eastern Pacific (cold tongue) during the extreme El Nio phenomenon in 2015/2016 is evaluated. Oceanic and atmospheric datasets, including sea surface temperature (SST), wind stress, shortwave radiation (SWR), longwave radiation, latent heat flux (LHF), and sensible heat flux are analyzed. A slight warming occurred in the eastern tropical Pacific associated with a positive SST anomaly, which reflected the weakening or reversal of the trade winds. Meanwhile, the western tropical Pacific exhibited a cooling tendency during the development phase of El Nio. Analysis of the mixed-layer heat budget shows that the net heat flux due to SWR and LHF significantly contributes to the warming of the eastern tropical Pacific. The contribution from horizontal advection was extremely small on both sides. The analysis shows that the residual term significantly contributes to cooling (warming) tendency observed in the western (eastern) tropical Pacific. This condition may suggest that residual process due to entrainment and diffusivity played an important role in the evolution of cooling (warming) process in the western (eastern) tropical Pacific. Introduction Air-sea interactions share an inseparable and important relationship in regulating the ocean heat budget, as reflected by variations in sea surface temperature (SST). Cronin and Sprintall, demonstrated that the warming of the ocean surface down to the base of the mixed layer is due to shortwave radiation (SWR) from sunlight, whereas the cooling process is due to longwave radiation (LWR) from the average global surface temperature, sensible heat flux (SHF) from the air temperature differences across the ocean surface, and latent heat flux (LHF) from the evaporation process. The heat balance generated by the air-sea interactions has become increasingly important and requires further observations to better evaluate this process, particularly during El Nio-Southern Oscillation events, which can significantly influence global climate change. An El Nio event is usually associated with the weakening or reversal of the easterly trade winds in the Equatorial Pacific Ocean. This reversal of trade winds enhances downwelling Kelvin waves, shifting the warm pool (warm SST and high convection) to the eastern tropical Pacific. This condition results in relatively low and high rainfall in the western and eastern Pacific, respectively. The abovementioned studies and other previous research on the Pacific Ocean heat budget have improved our understanding of oceanographic processes. A recent study by Song and Yu determined that heat flux diffusion, sun penetration, and zonal advection are the main factors that influence SST changes in the western Pacific. Meanwhile, in the eastern region, Pinker et al. identified significant temporal (seasonal) variations in the LHF and SHF over the Pacific cold tongue. Pinker et al. also suggested a relationship between SWR, which was affected by cloud cover, and LWR, which was affected by moisture. Furthermore, Abellan et al. compared the mechanism of the 2015/2016 El Nio event with the 1997/1998 El Nio event and observed that the zonal winds along the equatorial Pacific during the 2015/2016 event had a lower intensity than those during the 1997/1998 event. However, they found significant meridional activity during the 2015/2016 event compared with that of the 1997/1998 event. The importance of the zonal and meridional winds have been reported by Guan et al.. This study aims to evaluate mixed-layer heat balance in the warm pool and cold tongue regions during the evo-lution of the 2015/2016 El Nio event. The paper is organized as follows. The datasets and analytical methods are described in section 2. In section 3, heat balance of the mixed layer in the western and eastern equatorial Pacific during the 2015/2016 El Nio event are compared and discussed. The final section summarizes and concludes the main findings of this study. Data and Methods The SST and wind stress data were obtained from the European Centre for Medium-Range Weather Forecasts (ECMWF). Both datasets have a daily temporal resolution and spatial resolution of 0.25° 0.25°and cover the period from January 1, 1995 to December 31, 2016. The zonal and meridional current data were obtained from the Ocean Surface Current Analyses Real-time (OSCAR). The OSCAR data comprise ocean current flow observations at 15 m depth with a spatial resolution of 0.33° 0.33°. It is available on the daily time series from January 1, 1995 to December 31, 2016. The daily time series of the atmospheric flux data containing SWR, LWR, SHF, and LHF, which were from the TropFlux project by ESSO-Indian National Centre for Ocean Information Services, are also used in this study. These atmospheric flux data have a spatial resolution of 1° 1°. Furthermore, subsurface temperature and salinity data from in-situ observations obtained by the Tropical Atmosphere-Ocean (TAO) buoy array located at 137°E, 2°N, and 110°W, 0°were used ( Figure 1). The TAO provided temperature and salinity data from the ocean surface to 500 m depth. All spatial data used (i.e., SST, heat flux, winds, and ocean currents) covered the tropical Pacific Ocean. First, the daily climatology for all parameters were calculated for the period from January 1, 1995 to December 31, 2016. The climatological values represent normal climate conditions in the Pacific Ocean. Then, the anomalies were calculated by subtracting these climatological values from the daily time series, followed by smoothing of the anomaly data using a 15 d running average. Note that the resulting SST anomaly values are being used for calculating Nio 3.4 index to describe the evolution of El Nio 2015/2016. Following Iskandar et al., we calculate the heat budget within the mixed layer during the 2015/2016 El Nio event as where h is the thickness of the mixed layer, is the heat storage estimated by using ECMWF SST, is net surface heat flux across the air-sea interface, is the heat loss due to SWR penetration below the mixed layer, is the density of sea water (1022.4 kg/m 3 ), is the heat capacity (3940 J/°C/kg), is a residual term, and and are mixed-layer temperature and horizontal velocity, respectively. The three terms on the right-hand side represent the atmospheric heating, horizontal advection, and residual components. The atmospheric heating term captures how the atmospheric flux influences air-sea interactions, and the horizontal advection term captures how large-scale ocean currents influence the SST conditions. We assume that the residual term incorporates the parameters that could not e estimated from the data, which are inferred through nonlinear processes, such as vertical mixing from the bottom and vertical diffusivity. The mixed-layer thickness (h) was computed by density criterion in which the thickness is defined by specifying a density difference of 0.125 kg m −3, and the density data from the TAO Buoy data, following Bosc et al Local storage in Eq. was estimated using the SST data. We calculated by summing the heat flux parameters across the air-sea interface as where u and v are the zonal and meridional averaged velocity currents, respectively, calculated using the OSCAR data. T is the average SST difference between the boundary and average SST anomaly in the regions of interest. ∆x and ∆y are the distances along the zonal and meridional boundaries in the regions of interest, respectively. w, e, s, and n subscripts represent the western, eastern, southern, and northern boundaries of the regions of interest, respectively. We selected our regions of interest based on the SST characteristic in the western (warm pool region) and eastern (cold tongue region) Pacific. The western region is bounded by 140°1 34°W, 0°-4°N, while the eastern region is bounded by 113°-107°W, 2°S-2°N. March which the thickness is defined by specifying a density, and the density data from et al.'s work Local storage in Eq. was estimated using the SST by summing the heat flux sea interface as is the albedo with a constant value of 0.055., is the is the LWR heat flux, is the LHF. We defined following Wang and McPhaden, with a gamma value of 0.004/m. We estimated the horizontal advection based on Lee et are the zonal and meridional averaged velocity currents, respectively, calculated using the is the average SST difference between the boundary and average SST anomaly in the regions are the distances along the zonal nd meridional boundaries in the regions of interest, subscripts represent the western, eastern, southern, and northern boundaries of the regions of interest, respectively. We selected our aracteristic in the western (warm pool region) and eastern (cold tongue region) Pacific. The western region is bounded by 140°-4°N, while the eastern region is bounded by Heat Budgets Contain Heat Storage Components. C) Western and D) East-), and SHF (Purple). All Values in Fig-Western Pacific), and 2°S-2°N and. The Development and Ter-Line The warming tendency induced by the residual term was balanced by the cooling tendency due to reduced surface heat flux ( Figure 4B). A short warming occurred in A gust 2015 as the heat flux was significantly reduced and the residual term, which may be associated with downwelling Kelvin waves, tends to warm the eastern tropical Pacific. No significant change in the heat bud et was observed during the mature phase of the event. During the termination of the event from March to May 2016, the residual term associated with strong upwelling cooled the eastern tropical Pacific although the surface heat flux tended to warm the ocean. Mixed-layer Heat Budget in Western and Eastern Tropical March The warming tendency induced by the residual term was balanced by the cooling tendency due to reduced surface ). A short warming occurred in Aux was significantly reduced and the residual term, which may be associated with downwelling Kelvin waves, tends to warm the eastern tropical Pacific. No significant change in the heat budget was observed during the mature phase of the event. mination Horizontal advection. The horizontal advection are shown in Figure 5, where the values shown in Figure 4 have been separated into their zonal heat and meridional period from January 1, 2015 to December 31, 2016 in the two regions of interest. No significant activity (zonal or meridional) was observed in the western tropical Pacific (Figure 5A), which was expected due to the low current activity in this region. However, large fluctuations are observed in the eastern tropical Pacific ( Figure 5B), with a peak in the zonal advection observed during the El Nio peak in November 2015. Strong zonal and meridional advection fluctuations are also observed after the end of the El Nio event in July 2016 ( Figure 5B), suggesting that advection may play an impor role in the cooling SST trends. greatly mimicked the SWR ( Figure 4A). However, in the, only LHF showed high temporal in the surface heat flux followed the variations of the LHF. Horizontal Advection in A) Western and B) Eastern Pacific. Note that the The temporal variations in horizontal advection are shown in Figure 5, where the values shown in Figure 4 have been separated into their heat components for the period from January 1, 2015 to December 31, 2016 in the two regions of interest. No significant advection activity (zonal or meridional) was observed in the western tropical Pacific (Figure 5A), which was expected due to the low current activity in this region. However, large fluctuations are observed in the eastern tropical Pacific h a peak in the zonal advection observed during the El Nio peak in November 2015. Strong zonal nd meridional advection fluctuations are also observed after the end of the El Nio event in July 2016 ( Figure ), suggesting that advection may play an important role in the cooling SST trends. Pacific. Note that the Horizontal in Figure 4 are Calculated Across 2°N and 113°-107°W (Eastern Pacific), Termination Phases of the El Nio Conclusion The analysis of the evolution of the El Nio 2015/2016 event shows that the trade wind variations along the equatorial Pacific have become the keys for the mechanism of warming and cooling SST anomaly in the eastern and western tropical Pacific. In addition, a significant heat flux contribution was observed for warming (cooling) SST in the eastern (western) tropical Pacific during the development phase of the El Nio event. The SWR was a major contributor to the surface heat flux variability in the western and eastern tropical Pacific. In the eastern side, the LHF also had a significant influence on the surface heat flux variations. Furthermore, the horizontal advection contributed to the mixed-layer heat budget only in the eastern tropical Pacific. A residual term (vertical entrainment) played an important role in the mixed-layer heat budgets in those two regions, especially in the eastern tropical Pacific during the termination phase of the El Nio event.
Decompressive laparotomy for abdominal compartment syndrome a critical analysis Introduction Abdominal compartment syndrome (ACS) is increasingly recognized in critically ill patients, and the deleterious effects of increased intraabdominal pressure (IAP) are well documented. Surgical decompression through a midline laparotomy or decompressive laparotomy remains the sole definite therapy for ACS, but the effect of decompressive laparotomy has not been studied in large patient series. Methods We reviewed English literature from 1972 to 2004 for studies reporting the effects of decompressive laparotomy in patients with ACS. The effect of decompressive laparotomy on IAP, patient outcome and physiology were analysed. Results Eighteen studies including 250 patients who underwent decompressive laparotomy could be included in the analysis. IAP was significantly lower after decompression (15.5 mmHg versus 34.6 mmHg before, p < 0.001), but intraabdominal hypertension persisted in the majority of the patients. Mortality in the whole group was 49.2% (123/250). The effect of decompressive laparotomy on organ function was not uniform, and in some studies no effect on organ function was found. Increased PaO2/FIO2 ratio (PaO2 = partial pressure of oxygen in arterial blood, FiO2 = fraction of inspired oxygen) and urinary output were the most pronounced effects of decompressive laparotomy. Conclusion The effects of decompressive laparotomy have been poorly investigated, and only a small number of studies report its effect on parameters of organ function. Although IAP is consistently lower after decompression, mortality remains considerable. Recuperation of organ dysfunction after decompressive laparotomy for ACS is variable. Introduction Intraabdominal hypertension (IAH) is a clearly identified cause of organ dysfunction in patients after emergency abdominal surgery and trauma. It is also increasingly recognized in other patients in the intensive care unit (ICU), for example, after elective surgical procedures, liver transplantation, massive fluid resuscitation for extraabdominal trauma and severe burns. The presence of IAH at admission to the ICU has been associated with severe organ dysfunction during the ICU stay, and the development of IAH during ICU stay was an independent predictor of mortality. The clinical picture resulting from sustained IAH has been described as abdominal compartment syndrome (ACS). Although understanding of the pathophysiology of IAH has greatly improved, few advances have been made in the treatment of ACS. Few non-surgical options are available for the treatment of ACS. In some patients, IAH is caused by intraperitoneal fluid, and in these patients percutaneous drainage may be an option, as has been described in patients with ACS after burns. The use of gastric and rectal tubes to drain air and gastrointestinal contents has been proposed by experts, but a scientific foundation is lacking. Other proposed therapies include ultrafiltration and the use of muscular blocking agents. Surgical decompression is the only available definite treatment for IAH, and numerous case series have been reported, but the ACS = abdominal compartment syndrome; APACHE = Acute Physiology and Chronic Health Evaluation; CI=cardiac index; CVP = central venous pressure; DL = decompressive laparotomy; DO2I = Oxygen delivery index; HR = heart rate; IAH = intraabdominal hypertension; IAP = intraabdominal pressure; ICP = intracranial pressure; ICU = intensive care unit; ISS = Injury Severity score; MAP = mean arterial pressure; NA = not available; SOFA = sepsis related organ failure assessment; SVRi = systemic vascular resistance index. effects of surgical decompression have not been reviewed in large series; patients who require decompression are frequently a selected subpopulation of the total study population. Also, most papers focus on factors associated with IAH and its effects, rather than specifically looking at endpoints, such as hospital mortality and organ function after surgical decompression. The goal of this review is to describe the effect of surgical decompression through a midline laparotomy (termed 'decompressive laparotomy' (DL) in this review) on intraabdominal pressure (IAP) and the outcome and physiology of patients undergoing this procedure. Articles describing adult patients with IAH requiring decompression were included in the analysis if: details on IAP -at least before decompression -were available; and the outcome was available for all patients who underwent abdominal decompression. In this setting, DL was defined as a surgical intervention on the abdominal wall aimed at reducing the IAP, after which a temporary abdominal closure device was used; percutaneous drainage of fluid collections or escharotomies were not considered in this review. Materials and methods The bibliographies of the articles that were included in the final analysis were reviewed for relevant publications that would have been missed by the computerized search. For the articles retrieved, we classified the ACS according to the current guidelines of the World Society of Abdominal Compartment Syndrome (Table 1), and recorded the indication for decompression. The effect of abdominal decompression on organ function was recorded; hemodynamic (blood pressure, heart rate, cardiac output, central venous pressure, pulmonary occlusion pressure, systemic vascular resistance and oxygen delivery indices), ventilatory (PaO 2 / FIO 2 ratio (PaO 2 = partial pressure of oxygen in arterial blood, FiO 2 = fraction of inspired oxygen), peak airway pressure, lung compliance expressed by static or dynamic compliance) and renal function parameters (urinary output) were retrieved. Patient characteristics such as age, disease severity as expressed by the Acute Physiology and Chronic Health Evaluation (APACHE) II score or Injury Severity score (ISS), and the timing of DL after the precipitating event (hospital admission or prior abdominal surgical intervention) were recorded when available. Statistical analysis was performed using SPSS for Windows 12.0 ® (SPSS, Chicago, IL, USA). IAP and physiological variables before and after DL were compared using paired samples t test. Continuous data are expressed as mean (standard deviation). A double sided p value of less than 0.05 was considered statistically significant. Results The computerized search yielded 85 papers, 19 of which could be included in the analysis based on the analysis of the type of article and review of the abstract. From the references in these articles, another 8 papers were considered to contain significant data, bringing the total number of studies reporting on patients who underwent surgical decompression to 27. After analysis of the data available in the papers, 9 papers were excluded because of various reasons (no data on IAP available (n = 5), no DL performed as a means of decompression (n = 1), analysis based on patients already described in another paper that was included in the analysis (n = 1), indication for laparotomy planned for reasons other than ACS (n = 1), and insufficient data on the groups of patients that were decompressed (n = 1). Table 2. In total, 250 patients were treated with DL for ACS, of which 174 had primary ACS and 76 secondary ACS. The 18 papers included in the final analysis are in listed in In four papers no indication for DL was named, but it could be presumed it was ACS. No clear definition of ACS was mentioned in another five papers, and only six used a more or less clear definition of ACS, including a cut off IAP level ( Table 2). The definitions of ACS were different in every paper, and most noticeably the critical level of IAP that was considered an indication for DL varied from 18 to 30 mmHg. In one paper, uncontrollable intracranial pressure was the sole indication for DL. Mean interval from admission to the hospital or from the previous surgical intervention to DL was reported only in a limited number of papers, and varied from 12 to 38 hours, except from the study in which uncontrollable ICP was the indication for DL; in this paper, the mean interval between admission and DL was 139 hours. Effect of surgical decompression on IAP From 10 studies, IAP values before and after abdominal decompression were available from a total of 161 patients; the other studies only reported IAP values before decompression. In all but one report, IAP fell significantly after surgical decompression ( Figure 1). Overall, the mean reported IAP before DL was 34.6 mmHg (8.06) and fell to 15.5 mmHg (4.81) after DL (p < 0.001). Outcome after surgical decompression for ACS Mortality rates for patients who underwent surgical decompression for ACS are summarized in Table 3. Overall, reported mortality for all patients with ACS who underwent surgical decompression was 49.2% (123/250). The mean age in the different studies was 44.5 years. The severity of disease, as assessed by APACHE II score and ISS, is generally high in these patients, but was not available in most of the papers; an APACHE II-based predicted mortality, therefore, could not be calculated for these patients. The cause of death of patients who underwent DL could be retrieved from only nine of the studies. This accounted for only 29 out of the total of 123 patients who died. The main cause of death after DL was single or multiple organ dysfunction (n = 23, 79%); other causes included head injury (n = 2, 7%) and haemorrhage (n = 1, 3%). In three patients, therapy was withdrawn. Table 4 summarizes the effect of abdominal decompression on hemodynamic physiological variables considered to be impaired because of ACS. Blood pressure remained unchanged after decompression in five out of nine reports, but increased significantly in the remainder. A significant drop in central venous pressure was present in three out of eight papers, and four out of eight reported a significantly lower pulmonary artery occlusion pressure. Heart rate was found to be unchanged in all but two reports. In the majority of the papers that studied cardiac function before and after decompression, the cardiac output or cardiac index improved significantly after decompression. Effect of abdominal decompression on hemodynamic, respiratory and renal function parameters A small number of studies reported detailed information on hemodynamic parameters: one study found an increased oxygen delivery after decompression, whereas another found no difference. Systemic vascular resistance decreased in two studies, but increased in one. No differences in SvO 2 (mixed venous oxygen saturation) were found in both studies reporting details on this topic. The effect of DL on respiratory function is presented in Table 5. In all studies, respiratory function improved significantly in most patients, as well as in terms of reduced peak inspiratory pressures and improved PaO 2 /FIO 2 ratio. In all reports, PaO 2 / FIO 2 ratios after decompression remained below 300, ranging from 154 to 239. In two of the larger patient series, there was no change in urinary output ( Figure 2). In papers that reported a limited number of patients, absolute values increased, but the number of patients is probably too limited to reach statistical significance. In 5 out of 10 studies, the mean urinary output The effect of decompressive laparotomy (DL) on intraabdominal pressure (IAP) in patients with primary and secondary abdominal compartment syn-drome The effect of decompressive laparotomy (DL) on intraabdominal pressure (IAP) in patients with primary and secondary abdominal compartment syndrome. IAP levels are those reported in individual papers in the study; Kron and colleagues, Platell and colleagues, Meldrum and colleagues, Chang and colleagues, Sugrue and colleagues, Ertel and colleagues, Biffl and colleagues, Hobson and colleagues, Mayberry and colleagues, Balogh and colleagues. (page number not for citation purposes) was above 50 ml/hour before decompression (mean urinary output ranged from 50 ml/hour to 105 ml/hour) and, in most of these, it significantly increased after decompression. Discussion DL resulted in a decrease in IAP in all patients who were studied. However, IAH persisted in a considerable number of patients, as the mean IAP after DL remained well above the 12 mmHg threshold for IAH. In one study, the IAP after decompression was as high as 26 mmHg. The fact that IAP decreased is of course not surprising, but the level of IAP after surgical intervention is more intriguing. Apparently, several patients must have suffered from (early) recurrent or persistent ACS in these studies, although only a few studies specifically mention this problem. Important limitations here are the facts that almost half of the studies (accounting for about a third of the patients in this review) did not report IAP values after DL and that the time to measurement of IAP after DL was not standardized. The problem of recurrent ACS in patients with open abdomen treatment has been reported by Gracias and colleagues. Mortality in their patients with recurrent ACS was high when compared to patients without recurrent ACS (60% versus 7%); recurrent ACS occurred between 1.5 and 12 hours after surgery. From the data available, it is not clear whether recurrent ACS is an independent risk factor for mortality, but considering the association of organ dysfunction and mortality in recent epidemiological studies, it seems plausible that this is a major factor determining outcome in these patients. Mortality in patients undergoing DL remains high and deserves further investigation. Several factors may explain the fact that half of the patients in the included studies eventually died, in spite of aggressive measures like DL. First of all, patients who require DL are severely ill at the moment of decompression, and often DL is considered a last resort. This may not be reflected by APACHE II scores early after ICU admission or the ISS, although in the few studies that reported these parameters, these were high to very high. Obviously, as no control group is available, it is difficult to guess the outcome of these patients without decompression. Secondly, the fact that IAP remained moderately to severely elevated in a number of patients (who can be considered incomplete or non-responders) should also be taken into account. This is also reflected by the fact that although a number of physiological values improved, these did not return to normal. The effect of DL on oxygenation is one such example. The mean PaO 2 /FIO 2 ratio after decompression remained far below 300 in all the reports, and below 200 in most of them, notably in the two largest studies. Unfortunately, no data on the effect on organ dysfunction as assessed by serial scoring systems designed to study the evolution of organ dysfunction, such as the SOFA score, are available. Moreover, from the variables included in the SOFA score, only one out of six organ systems (the respiratory system) could be graded by the parameters reported in the studies in this review. The data reported for the cardiovascular, haematological, renal, neurological and gastrointestinal systems are incomplete or lacking in most studies. Although the parameters most notably impaired by the development of ACS, such as peak inspiratory pressure, mean arterial pressure and urinary output, are often significantly improved, these might not be the best parameters for studying organ function. To evaluate the cardiovascular system, information on the amount of vasoactive medications should be mentioned; serum creatinine probably should be included to evaluate renal function. Thirdly, it should be considered that DL may be harmful for some patients. Morris and colleagues described a lethal reperfusion syndrome early after DL. There may be a risk of rebleeding when coagulation is not completely restored before considering abdominal decompression, especially in trauma patients who are often severely coagulopathic early after arrival in the ICU. Hemorrhagic shock was the cause of death in a third of the deaths after DL in the paper by Ertel and colleagues ; Balogh and colleagues reported that exsanguination was the cause of death in two out of six patients with secondary ACS who were decompressed and later died. Also, in patients with severe acute pancreatitis and ACS, we found that three out of four patients who were decompressed died, two of them from uncontrollable haemorrhage. Although DL has a positive effect on cardiovascular, respiratory and renal function, some issues require further investigation. Filling pressures (central venous pressure and pulmonary artery pressure) decreased in all patients, but this probably only reflects the decrease in IAP in those patients. It has been shown that IAP is transduced to a large extent (25% to 80%) to the thoracic cage, resulting in the high central venous and pulmonary occlusion pressures often observed in ACS. The decrease after decompression does not necessarily reflect an improvement in organ function. Cardiac function improved in the majority of the patients, but it is remarkable that in the largest study no improvement in cardiac index was found. The change in peak airway pressure is not surprising. Some of the studies date from the era when normal tidal volumes (8 to 12 ml/kg) were used to ventilate patients with acute respiratory distress syndrome, so the decrease in peak airway pressure and improvement in compliance may be more pronounced than when lower tidal volumes are used. The effect on oxygenation was positive overall, but respiratory function remained severely impaired in the majority of the patients. There was no change in urinary output in the two largest series and, remarkably, the urinary output before DL in patients with ACS was about 50 ml/hour or more in the majority of the papers. Nevertheless, significant improvement was found in all but two papers, often despite the small number of patients. Sugrue and colleagues reported an increase in serum creatinine after DL with only little improvement over 14 days. No data on short or long term effects of renal function were reported in the other papers. Some questions cannot be answered by the analysis of the outcome parameters in this review. The effects of the timing of DL and the speed of diagnosis of ACS on patient outcome remain to be elucidated. The timing of surgical intervention was only rarely reported, and it is not clear from the papers presented which clinical condition exactly triggered surgical decompression in the patients reported. Also, coexisting causes of organ dysfunction, such as sepsis or acute respiratory distress syndrome, in these severely ill patients and its role in the development of IAH and ACS should be further explored. Patients suffering from severe sepsis often have increased fluid requirements, which in itself may contribute to recurrent ACS. Although there is a consensus on the definition of ACS, there is no clear consensus for which parameter should be the threshold for surgical decompression in patients with ACS; no clear conclusion can be drawn from this review either. Several authors have suggested that an IAP of more than 25 should trigger DL. Others suggest that the IAP recordings are only supportive data, and base the decision to open the abdomen on clinical parameters. The clinical condition of the patient with secondary ACS makes the whole picture often very complicated. Often, these patients have other causes of hypotension, renal dysfunction or respiratory failure, and the development of IAH may be a factor contributing to the clinical picture of ACS. This concern was also raised by Balogh and colleagues, who considered ACS to be an indicator of disease severity, not the cause of early death. Conclusion Patients with primary and secondary ACS generally are good responders to DL in terms of reduction of IAP and improvement of several physiological variables, but the exact effect on Numbers in bold are the significant difference between value before and after decompression. PaO2 = partial pressure of oxygen in arterial blood, FiO2 = fraction of inspired oxygen. (page number not for citation purposes) organ dysfunction is not clear. An important next step in the management of patients with primary and secondary ACS is to identify those patients who would benefit most from DL, as this review indicates that recuperation of organ dysfunction is variable and unpredictable, and mortality remains considerable in patients treated with it. In both primary and secondary ACS, the IAP value probably is not the only parameter that should be considered and clinical parameters should be included when evaluating a patient with IAH for surgical decompression. To study the effect of abdominal decompression in a larger series of patients, we propose to open a registry of patients with ACS undergoing abdominal decompression, coordinated by the World Society of Abdominal Compartment Syndrome (WSACS); more information can be found at the society's website. Key messages Detailed effects of DL on organ function are only rarely reported. IAP threshold levels for DL reported in the literature vary considerable. DL decreases IAP to levels below 20 mmHg in most studies. A positive effect on organ function is reported in most studies, but the effect is inconsistent, and the duration of this effect is not clear. Reported mortality after DL for ACS is high.
//----------------------------------------------------------------------------- // Purpose: Decode a single block of stereo ADPCM audio // Input : *pOut - 16-bit output buffer // *pIn - ADPCM encoded block data // count - number of sample pairs to decode //----------------------------------------------------------------------------- void CAudioMixerWaveADPCM::DecompressBlockStereo( short *pOut, const char *pIn, int count ) { int pred[2], co1[2], co2[2]; int i; for ( i = 0; i < 2; i++ ) { pred[i] = *pIn++; co1[i] = m_pCoefficients[pred[i]].iCoef1; co2[i] = m_pCoefficients[pred[i]].iCoef2; } int delta[2], samp1[2], samp2[2]; for ( i = 0; i < 2; i++, pIn += 2 ) { delta[i] = *((short *)pIn); } for ( i = 0; i < 2; i++, pIn += 2 ) { samp1[i] = *((short *)pIn); } for ( i = 0; i < 2; i++, pIn += 2 ) { samp2[i] = *((short *)pIn); } *pOut++ = (short)samp2[0]; *pOut++ = (short)samp2[1]; *pOut++ = (short)samp1[0]; *pOut++ = (short)samp1[1]; count -= 2; int high = 1; int error, sample = 0; while ( count ) { for ( i = 0; i < 2; i++ ) { if ( high ) { sample = (unsigned char) (*pIn++); error = sample >> 4; sample = sample & 0xf; high = 0; } else { error = sample; high = 1; } int errorSign = error_sign_lut[error]; int predSample = (samp1[i] * co1[i]) + (samp2[i] * co2[i]); predSample >>= 8; predSample += (errorSign * delta[i]); delta[i] = (delta[i] * error_coefficients_lut[error]) >> 8; if ( delta[i] < 16 ) delta[i] = 16; if ( predSample > 32767L ) predSample = 32767L; else if ( predSample < -32768L ) predSample = -32768L; *pOut++ = (short)predSample; samp2[i] = samp1[i]; samp1[i] = predSample; } count--; } }
By concentrating too much on classroom-based academics with four-year college as a goal, the nation’s education system has failed vast numbers of students, who instead need solid preparation for careers requiring less than a bachelor’s degree, Harvard scholars say in a report issued today . Their report arrives as experts are trying to define what skills are necessary for work and for higher learning. The proposal from an esteemed school of education sparked immediate concern—including what one activist called “a major case of heartburn”—for raising the specter of tracking, in which disadvantaged students would be channeled unquestioningly into watered-down programs that curtail their prospects. The Harvard study also drew notice because it was driven in part by the concerns of one of its co-authors, Robert B. Schwartz, a prominent champion of higher academic expectations for all students, who said he began to doubt the wisdom of a “college for all” approach to education. Another co-author, Ronald Ferguson, the director of Harvard’s Achievement Gap Initiative, is a national expert on improving learning opportunities for disadvantaged children. The authors contend that their vision would expand opportunity for all students, especially those who face the dimmest prospects now because their education stops at high school. Rather than derailing some students from higher learning, their system would actually open more of those pathways, they say, by offering sound college preparation and rigorous career-focused, real-world learning, and by defining clear routes from secondary school into certificate or college programs. Appearing at an event to discuss the report on Wednesday, U.S. Secretary of Education Arne Duncan urged educators and policymakers to embrace a vision of career and technical education that prepares students simultaneously for college and good-paying jobs by imparting the blend of academic and workplace skills needed in both. He acknowledged that too many CTE programs have been “dumping grounds for students tracked with weaker academic skills,” but asserted that re-envisioned programs will be “viable and rigorous pathways” to college and career success. In 1973, seven in 10 jobs in the United States were held by those with only a high school education, but by 2007, that figure dropped to four in 10, the report says. Half the jobs created in the next decade will be well matched to those with associate’s degrees or vocational or technical training, including “middle skills” jobs such as construction manager or dental hygienist, it says. Many of those jobs pay more than jobs typically held by workers with only high school diplomas, and some even pay more than the average job held by a four-year college graduate, according to the study. Six in 10 Americans don’t complete associate’s or bachelor’s degrees by their mid-20s, the report notes, and only one in 10 earns an occupational certificate. Those figures, combined with the job forecasts, suggest that education must be fundamentally reworked to ensure sound options for non-college-bound students, the authors say. Drawing on European systems of vocational education, they argue for an American version of a “more holistic” education that would involve employers in defining the skills necessary for work and providing internships, apprenticeships, and other opportunities linked tightly to students’ courses of study. Pivotal to such a system would be career counseling embedded in schools from early in students’ education. A focus on better preparing students for middle-skills jobs is long overdue, said Anthony P. Carnevale, one of the job-market experts whose research is cited in the study. But creating varied pathways is fraught with political peril because of the risk that some students will be held to lower expectations, Mr. Carnevale said. Some education advocates reacted with alarm to the recommendations, especially given the virtual absence of career counseling in the K-12 or community college system to help level the playing field between disadvantaged students and more-fortunate ones. Mr. Schwartz of Harvard acknowledged that the report wades into “tricky terrain.” But he said that tracking is “when schools make decisions about what kids are capable of and what their futures are. It’s pervasive in our schools, and it’s a huge problem. Michael Cohen, who succeeded Mr. Schwartz as the president of Achieve, a Washington-based organization that works with states to raise their academic expectations, took issue with the report’s depiction of the college-readiness agenda as having failed. Only recently, he said, have states adopted course requirements that reflect the skills and knowledge needed for college and good jobs. “To say we’ve tried this and it failed seems a bit premature, like snatching defeat from the jaws of victory,” he said. Some states and districts are moving toward highly rigorous versions of career and technical education. The report cites examples such as California’s Linked Learning initiative, which combines work-based learning with counseling supports, and Massachusetts’ network of regional vocational-technical schools. Construction Technology Academy at Kearny High School in San Diego, one of the 50-plus campuses in California’s Linked Learning network, could illustrate some of what the report’s authors have in mind, said Gary Hoachlander, the president of ConnectEd, a Berkeley, Calif.-based nonprofit group that supports Linked Learning schools. Students who choose the academy study architecture, engineering, and construction as well as the typical core curriculum, he said. Some go on to apprenticeship programs in the construction trades, some go to community colleges, and some enroll in universities, but all students take courses in the principles of engineering, computer-assisted design, carpentry, and electricity, Mr. Hoachlander said.
<reponame>yuan50697105/my-admin-test<gh_stars>0 package com.example.commons.db.mybatis.mapper.base.sqlhelper.module.service.impl; import com.example.commons.db.mybatis.mapper.base.sqlhelper.module.mapper.SysUserLogMapper; import com.example.commons.db.mybatis.mapper.base.sqlhelper.module.pojo.SysUserLog; import com.example.commons.db.mybatis.mapper.base.sqlhelper.module.service.SysUserLogService; import org.springframework.stereotype.Service; import javax.annotation.Resource; import java.util.List; @Service public class SysUserLogServiceImpl implements SysUserLogService { @Resource private SysUserLogMapper sysUserLogMapper; @Override public int updateBatch(List<SysUserLog> list) { return sysUserLogMapper.updateBatch(list); } @Override public int updateBatchSelective(List<SysUserLog> list) { return sysUserLogMapper.updateBatchSelective(list); } @Override public int batchInsert(List<SysUserLog> list) { return sysUserLogMapper.batchInsert(list); } }
import {TestBed} from '@angular/core/testing'; import {LoaderService} from './loader.service'; describe('LoaderService', () => { let loaderServiceSpy: LoaderService; beforeEach(() => { loaderServiceSpy = new LoaderService(); }); beforeEach(() => TestBed.configureTestingModule({ providers: [ {provide: LoaderService, useValue: loaderServiceSpy} ] })); it('should be created', () => { const service: LoaderService = TestBed.get(LoaderService); expect(service).toBeTruthy(); }); });
Modelling and Analysis of Elastic and Thermal Deformations of a Hydrodynamic Radial Journal Bearing Journal bearings are widely used in heavy industry and in internal combustion engine applications. There is a need to increase the power density of various machine parts which leads to increased bearing loads and reduced lubrication film thicknesses. This type of development may increase deformations on the bearing surfaces which need to be considered in the bearing design process.The main purpose of this work was to develop a parameterized calculation model for hydrodynamic radial journal bearings which takes into account elastic and thermal deformations of the bearing surfaces. Hydrodynamic calculations were based on the numerical solving of the Reynolds equation by assuming rigid surfaces. Elastic and thermal deformations of the bearing and shaft surfaces were calculated by using the finite element method. It is concluded that elastic and thermal deformations are partly canceling each other out at the loaded side of the bearing and depending on the sliding speed and the external normal force either one of them could be more significant.
America's infrastructure is in disrepair. Why don't we care? A number of years ago in Cambridge, Mass., a young friend from Germany commented on the potholed and patched streets that surrounded us, as well as the uneven sidewalks and assorted other rough edges. "It looks like a Third World country here," he said. "Apparently no one cares." To him, it was amazing that the wealthy and well-educated residents of Cambridge would tolerate such a poor public environment. Yet in the United States, this is more the rule than the exception. Occasional disasters focus attention on the problem -- for example, the near liquidation of New Orleans because of inadequate and poorly maintained levees -- but, in general, the state of disrepair is so common that we simply accept it. The American Society of Civil Engineers, in its well-known Report Card for America's Infrastructure, gives the U.S. an overall grade of D and says there is a $2.2 trillion deficit -- the amount of money it would take in five years to bring the country's public works up to acceptable levels. Much of this estimate is for simple maintenance. Now, asking a bunch of civil engineers about public-works spending is like asking the barber if you need a haircut. Still, the organization's work is impressive. It attempts a comprehensive assessment of needs in 10 categories, from aviation to wastewater. You don't need an engineering degree to see that many U.S. roads, train lines, bridges, sewers and water systems are less spiffy than in other advanced countries. Some national systems, like the interstates, look pretty good. Local streets, bridges, sidewalks, train stations, water tunnels and the like seem to be in the worst shape. To some extent, these cracks in our infrastructure -- or public works, to use the meatier and older term -- reflect the cracks in our government. Under the American system, which is based on the English model, authority is separated among not only federal, state and local, but among independent public authorities, as well as private utility companies. City Hall may be nominally in charge of Main Street, but private companies for phone, gas, electric, cable and Internet service are the ones tearing up the street. Another challenge is that states and localities, unlike the federal government, make a firm distinction between operating and capital expenditures. You can borrow money to build a road, but not to maintain it. Then there is the American antigovernment predilection. We look at government as something outside ourselves, rather than a reflection of us. Politicians have become loath to spend any of their constituents' money on anything that makes a structure look good if it might be seen as a symbol of waste. There are assorted remedies, although none are magic bullets. Tom Downs, a former president of Amtrak and New Jersey transportation commissioner, said that one start would be to have a "depreciation account," as many businesses do. That way, government would see what needed to be funded. Another way is to have annual reports on the status of highways, streets, utilities, schools, fire stations and so forth. Al Appleton, a former commissioner of environmental protection and director of water and sewer systems in New York, said politicians and the press focus inordinately on the "head count" in government, without seeing the savings and efficiency that are gained through adequate staffing levels. "You can't fight a war without an army," he said. "To do maintenance right, you need people." Much of this is contrary to the public discussion of infrastructure. We see highway crews loafing, and jump to assumptions that all government is overstaffed. And our obsession with eliminating "pork" can get in the way of making sure that what we build is built right, and kept that way. Rising standards would help. We should expect our streets to shine, and if they don't, we should hold the politicians responsible. Alex Marshall is a senior fellow at the Regional Plan Association in New York and author of the forthcoming, "The Surprising Design of Market Economies." This article was distributed by Bloomberg News.
Comparative analysis of full genome sequences of african swine fever virus isolates taken from domestic pigs and wild boar in Zabaykalsky Krai of Russian Federation in 2020 African swine fever virus (ASFV) is the causative agent of african swine fever (ASF) causing a disease characterized by hemorrhagic fever with the lethality rate up to 100%. The spillover into the Russian Federation, EU and Asia caused a considerable damage to the economy of these countries estimated at billions of dollars. Despite the ongoing pandemic, new outbreaks are reported. In 2020 in Russia ASF was reported in Zabaykalskiy kray in wild boars and domestic pigs. In this paper we report the analysis of the isolate «ASFV/Zabaykali 2020/DP-4905 from domestic pigs in comparison to the one reported in wild boars sequenced previsouly. We found that the genome of ASFV/Zabaykali 2020/ DP-4905 contains 189451 base pairs and encodes 189 open-reading frames (ORF), sharing 99,8% identity to Georgia 2007/1 and 99,8% identity to ASFV/Zabaykali 2020/ WB-5314 from wild boars of Zabaykalsky kray. Both isolates from Zabaykalsky kray had no additional tandem repeat sequence (TRS) in the intergenic region between I73R and I329L and belonged to group I like Georgia 2007/1 and no SNPs were identified in their CVR of B602L gene. We also identified a unique SNP G>C at position 204 of ORF E199L that had never been observed before in isolates from Russia and Eurasian countries. Therefore, this ORF can be considered as a new genome marker for grouping isolates, since several novel SNPs were identified in the studied isolates assigning them to seven groups.
Cefepime. Pharmacokinetics and clinical response in patients with cystic fibrosis. OBJECTIVE To measure first-dose and steady-state plasma, urine, and sputum concentrations of cefepime and make preliminary assessments of the clinical efficacy of cefepime in patients with cystic fibrosis. DESIGN Open noncomparative clinical trial. SETTING Memorial Miller Children's Hospital of Long Beach, Calif. PARTICIPANTS Twelve patients, aged 4 to 41 years, with a confirmed diagnosis of cystic fibrosis and chronic bronchopulmonary infections. INTERVENTIONS Patients received cefepime at 50 mg/kg per dose (maximum dose, 2 g per dose) given intravenously every 8 hours. Clinical evaluations, pulmonary function tests, quantitative sputum cultures, and sensitivity testing were performed before, at the end of, and 2 weeks after therapy. MEASUREMENTS AND MAIN RESULTS Mean (+/- SD) peak plasma concentrations after the first dose were 148.2 (36.6) mg/L; the following other values were included: half-life, 1.59 (0.46) hours; area under the curve, 292 microgram/h per milliliter; total-body clearance, 3.01 (1.46) mL/min per kilogram; volume of distribution at steady state, 0.32 (0.10) L/kg; and percent of dose recovered in urine, 52% (27%). Steady-state and first-dose values were similar. Trough levels varied from 6.4 to 7.2 mg/L. Mean (+/- SD) sputum concentrations at steady state varied from 6.3 (5.4) to 4.8 (2.3) mg/L. At completion of therapy, nine of 10 patients' conditions were improved as indicated by clinical scores (greater than 10 points), forced vital capacity (greater than 10%), and a greater than or equal to 1 log decrease in sputum bacterial concentration. Cefepime was well tolerated in 10 patients, but rash and light-headedness developed in two patients. Pseudomonas aeruginosa minimum inhibitory concentration90 increased from the start (64 mg/L) to the end of therapy (256 mg/L) and was unchanged 2 weeks later. CONCLUSION Based on these data and the potential advantage of a single agent for the treatment of mixed infections (Staphylococcus aureus and P aeruginosa), comparative clinical trials of cefepime and standard therapy for bronchopulmonary exacerbations in patients with cystic fibrosis appear to be warranted.
<reponame>tmpvar/quat.c<filename>include/quat/create.h #ifndef __quat_create__ #define __quat_create__ #include "type.h" #include <vec4/create.h> /** * Creates a new identity quat * * @returns {quat} a new quaternion */ quat create () { quat out = vec4_create(); out[3] = 1; return out; } #endif
// Copyright (c) 2021-2022 The MetabaseNet developers // Distributed under the MIT/X11 software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef METABASENET_SERVICE_H #define METABASENET_SERVICE_H #include "base.h" #include "hcode.h" #include "network.h" namespace metabasenet { class CService : public IService { public: CService(); ~CService(); /* System */ void Stop() override; /* Network */ int GetPeerCount() override; void GetPeers(std::vector<network::CBbPeerInfo>& vPeerInfo) override; bool AddNode(const hcode::CNetHost& node) override; bool RemoveNode(const hcode::CNetHost& node) override; /* Blockchain & Tx Pool*/ int GetForkCount() override; bool HaveFork(const uint256& hashFork) override; int GetForkHeight(const uint256& hashFork) override; bool GetForkLastBlock(const uint256& hashFork, int& nLastHeight, uint256& hashLastBlock) override; void ListFork(std::vector<std::pair<uint256, CProfile>>& vFork, bool fAll = false) override; bool GetForkContext(const uint256& hashFork, CForkContext& ctxtFork) override; bool VerifyForkName(const uint256& hashFork, const std::string& strForkName, const uint256& hashBlock = uint256()) override; bool GetForkGenealogy(const uint256& hashFork, std::vector<std::pair<uint256, int>>& vAncestry, std::vector<std::pair<int, uint256>>& vSubline) override; bool GetBlockLocation(const uint256& hashBlock, uint256& hashFork, int& nHeight) override; int GetBlockCount(const uint256& hashFork) override; bool GetBlockHash(const uint256& hashFork, int nHeight, uint256& hashBlock) override; bool GetBlockHash(const uint256& hashFork, int nHeight, std::vector<uint256>& vBlockHash) override; bool GetBlockNumberHash(const uint256& hashFork, const uint64 nNumber, uint256& hashBlock) override; bool GetBlock(const uint256& hashBlock, CBlock& block, uint256& hashFork, int& nHeight) override; bool GetBlockEx(const uint256& hashBlock, CBlockEx& block, uint256& hashFork, int& nHeight) override; bool GetLastBlockOfHeight(const uint256& hashFork, const int nHeight, uint256& hashBlock, int64& nTime) override; bool GetBlockStatus(const uint256& hashBlock, CBlockStatus& status) override; bool GetLastBlockStatus(const uint256& hashFork, CBlockStatus& status) override; void GetTxPool(const uint256& hashFork, std::vector<std::pair<uint256, std::size_t>>& vTxPool) override; void ListTxPool(const uint256& hashFork, const CDestination& dest, std::vector<CTxInfo>& vTxPool, const int64 nGetOffset = 0, const int64 nGetCount = 0) override; bool GetTransaction(const uint256& txid, CTransaction& tx, uint256& hashBlock) override; Errno SendTransaction(CTransaction& tx) override; bool GetVotes(const CDestination& destDelegate, uint256& nVotes, string& strFailCause) override; bool ListDelegate(uint32 nCount, std::multimap<uint256, CDestination>& mapVotes) override; bool GetTransactionReceipt(const uint256& hashFork, const uint256& txid, CTransactionReceipt& receipt, uint256& hashBlock, uint256& nBlockGasUsed) override; bool CallContract(const uint256& hashFork, const CDestination& from, const CDestination& to, const uint256& nAmount, const uint256& nGasPrice, const uint256& nGas, const bytes& btContractParam, int& nStatus, bytes& btResult) override; bool GetDefiRelationSign(const uint256& hashFork, const CDestination& destSub, const CDestination& destParent, bytes& btSignData) override; /* Wallet */ bool HaveKey(const crypto::CPubKey& pubkey, const int32 nVersion = -1) override; std::size_t GetPubKeys(std::set<crypto::CPubKey>& setPubKey, const uint64 nPos, const uint64 nCount) override; bool GetKeyStatus(const crypto::CPubKey& pubkey, int& nVersion, bool& fLocked, int64& nAutoLockTime, bool& fPublic) override; boost::optional<std::string> MakeNewKey(const crypto::CCryptoString& strPassphrase, crypto::CPubKey& pubkey) override; boost::optional<std::string> AddKey(const crypto::CKey& key) override; boost::optional<std::string> RemoveKey(const crypto::CPubKey& pubkey) override; bool ImportKey(const std::vector<unsigned char>& vchKey, crypto::CPubKey& pubkey) override; bool ExportKey(const crypto::CPubKey& pubkey, std::vector<unsigned char>& vchKey) override; bool EncryptKey(const crypto::CPubKey& pubkey, const crypto::CCryptoString& strPassphrase, const crypto::CCryptoString& strCurrentPassphrase) override; bool Lock(const crypto::CPubKey& pubkey) override; bool Unlock(const crypto::CPubKey& pubkey, const crypto::CCryptoString& strPassphrase, int64 nTimeout) override; bool GetBalance(const uint256& hashFork, const uint256& hashLastBlock, const CDestination& dest, CWalletBalance& balance) override; bool SignSignature(const crypto::CPubKey& pubkey, const uint256& hash, std::vector<unsigned char>& vchSig) override; bool SignTransaction(CTransaction& tx) override; bool HaveTemplate(const CTemplateId& tid) override; void GetTemplateIds(std::set<CTemplateId>& setTid, const uint64 nPos, const uint64 nCount) override; bool AddTemplate(CTemplatePtr& ptr) override; CTemplatePtr GetTemplate(const CTemplateId& tid) override; bool RemoveTemplate(const CTemplateId& tid) override; bool ListTransaction(const uint256& hashFork, const CDestination& dest, const uint64 nOffset, const uint64 nCount, const bool fReverse, std::vector<CDestTxInfo>& vTx) override; boost::optional<std::string> CreateTransaction(const uint256& hashFork, const CDestination& destFrom, const CDestination& destTo, const bytes& btToData, const uint64 nTxType, const uint256& nAmount, const uint64 nNonce, const uint256& nGasPriceIn, const uint256& mGasIn, const bytes& vchData, const bytes& btFormatData, const bytes& btContractCode, const bytes& btContractParam, CTransaction& txNew) override; bool AesEncrypt(const crypto::CPubKey& pubkeyLocal, const crypto::CPubKey& pubkeyRemote, const std::vector<uint8>& vMessage, std::vector<uint8>& vCiphertext) override; bool AesDecrypt(const crypto::CPubKey& pubkeyLocal, const crypto::CPubKey& pubkeyRemote, const std::vector<uint8>& vCiphertext, std::vector<uint8>& vMessage) override; void GetWalletDestinations(std::set<CDestination>& setDest) override; bool GetContractCodeContext(const uint256& hashFork, const uint256& hashContractCode, CContractCodeContext& ctxtContractCode) override; bool ListContractCodeContext(const uint256& hashFork, const uint256& txid, std::map<uint256, CContractCodeContext>& mapContractCode) override; bool ListContractAddress(const uint256& hashFork, std::map<uint256, CContractAddressContext>& mapContractAddress) override; bool GeDestContractContext(const uint256& hashFork, const CDestination& dest, CContractAddressContext& ctxtContract) override; bool GetContractSource(const uint256& hashFork, const uint256& hashSource, bytes& btSource) override; bool GetContractCode(const uint256& hashFork, const uint256& hashCode, bytes& btCode) override; bool GetDestTemplateData(const uint256& hashFork, const CDestination& dest, bytes& btTemplateData) override; bool RetrieveInviteParent(const uint256& hashFork, const CDestination& destSub, CDestination& destParent) override; bool ListDefiInviteRelation(const uint256& hashFork, const CDestination& destParent, std::map<CDestination, std::set<CDestination>>& mapDefiInvite) override; /* Mint */ bool GetWork(std::vector<unsigned char>& vchWorkData, int& nPrevBlockHeight, uint256& hashPrev, uint32& nPrevTime, int& nAlgo, int& nBits, const CTemplateMintPtr& templMint) override; Errno SubmitWork(const std::vector<unsigned char>& vchWorkData, const CTemplateMintPtr& templMint, crypto::CKey& keyMint, uint256& hashBlock) override; protected: bool HandleInitialize() override; void HandleDeinitialize() override; bool HandleInvoke() override; void HandleHalt() override; bool SetContractTransaction(const bytes& btFormatData, const bytes& btContractCode, const bytes& btContractParam, CTransaction& txNew, std::string& strErr); bool SetTemplateTransaction(const bytes& btFormatData, const bytes& vchData, const bytes& btToData, CTransaction& txNew, std::string& strErr); protected: ICoreProtocol* pCoreProtocol; IBlockChain* pBlockChain; ITxPool* pTxPool; IDispatcher* pDispatcher; IWallet* pWallet; CNetwork* pNetwork; IForkManager* pForkManager; network::INetChannel* pNetChannel; }; } // namespace metabasenet #endif //METABASENET_SERVICE_H
from confluent_kafka import Producer from bus import rest import json class BusProducer: def __init__(self): # Pre-shared credentials self.credentials = { "api_key": "<KEY>", "kafka_admin_url": "https://kafka-admin-prod02.messagehub.services.eu-gb.bluemix.net:443", "kafka_brokers_sasl": [ "kafka03-prod02.messagehub.services.eu-gb.bluemix.net:9093", "kafka02-prod02.messagehub.services.eu-gb.bluemix.net:9093", "kafka04-prod02.messagehub.services.eu-gb.bluemix.net:9093", "kafka05-prod02.messagehub.services.eu-gb.bluemix.net:9093", "kafka01-prod02.messagehub.services.eu-gb.bluemix.net:9093" ] } # Construct required configuration self.configuration = { 'client.id': 'CRCL_producer', 'group.id': 'CRCL_producer_group', 'bootstrap.servers': ','.join(self.credentials['kafka_brokers_sasl']), 'security.protocol': 'SASL_SSL', 'ssl.ca.location': '/etc/ssl/certs', 'sasl.mechanisms': 'PLAIN', 'sasl.username': self.credentials['api_key'][0:16], 'sasl.password': self.credentials['api_key'][16:48], 'api.version.request': True } self.producer = Producer(self.configuration) def send(self, topic, message): message = json.dumps(message, indent=3) # print("\n INSIDE SEND: ", type( message ) ) # Check if topic exists and create it if not if not self.handle_topic(topic): return False # Produce and flush message to buss try: self.producer.produce(topic, message.encode('utf-8'), 'key', -1, self.on_delivery) # self.producer.poll(0) self.producer.flush() except Exception as err: print('Sending data failed') print(err) return False return True def handle_topic(self, topic_name): # Create rest client to handle topics try: rest_client = rest.MessageHubRest(self.credentials['kafka_admin_url'], self.credentials['api_key']) except Exception as e: print(e) return False # List all topics try: response = rest_client.list_topics() topics = json.loads(response.text) except Exception as e: print(e) return False # Check if desired topic exists in topic list topic_exists = False for topic in topics: if topic['name'] == topic_name: topic_exists = True # If topic does not exist if not topic_exists: # Create topic try: response = rest_client.create_topic(topic_name, 1, 1) print(response.text) except Exception as e: print(e) return False return True def on_delivery(self, err, msg): if err: # print('Delivery report: Failed sending message {0}'.format(msg.value())) print(err) # We could retry sending the message else: print('Message produced, offset: {0}'.format(msg.offset()))
The present invention relates to a method of recognizing a radar target object type and apparatus therefor. There is a non-cooperative target recognition (NCTR) problem when a radar system, which is either ground-based or airborne, attempts to recognize a target object type. Most existing or proposed radar NCTR systems require high range resolution, that is, large transmit bandwidths, polarization diversity and imaging, that is, SAR or ISAR, capability. The principal object of the invention is to provide a method of recognizing a radar target object type, which method may be used by low frequency radars. An object of the invention is to provide a method of recognizing a radar target object type, which method may be used by law enforcement radars. Another object of the invention is to provide a method of recognizing a radar target object type, which method may be used by search radars in efforts to combat drug smuggling. Still another object of the invention is to provide a method of recognizing a radar target object type, which method eliminates the need for, and does not require, high range resolution, polarization diversity and imaging capability. Yet another object of the invention is to provide a method of recognizing a radar target object type, which method has few and readily completed steps. Another object of the invention is to provide apparatus of simple structure, which is inexpensive in manufacture, for recognizing a radar target object type. Still another object of the invention is to provide apparatus for recognizing a radar target object type, which apparatus is efficient, effective and reliable in operation. Yet another object of the invention is to provide apparatus for recognizing a radar target object type, which apparatus has fewer components than known similar apparatus.
<reponame>TheProrok29/andorra # Generated by Django 3.0.1 on 2020-01-21 18:37 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('journeys', '0002_activejourney_journey_end'), ('journeys', '0003_activejourney_log'), ] operations = [ migrations.RemoveField( model_name='activejourney', name='journey_end', ), ]
/** * @param args the command line arguments */ public static void main(String args[]) { if (args.length != 1) { System.err.println("Usage: SimpleBrowser <url>"); System.exit(0); } try { DocumentSource docSource = new DefaultDocumentSource(args[0]); DOMSource parser = new DefaultDOMSource(docSource); Document doc = parser.parse(); DOMAnalyzer da = new DOMAnalyzer(doc, docSource.getURL()); da.attributesToStyles(); da.addStyleSheet(null, CSSNorm.stdStyleSheet(), DOMAnalyzer.Origin.AGENT); da.addStyleSheet(null, CSSNorm.userStyleSheet(), DOMAnalyzer.Origin.AGENT); da.addStyleSheet(null, CSSNorm.formsStyleSheet(), DOMAnalyzer.Origin.AGENT); da.getStyleSheets(); SimpleBrowser test = new SimpleBrowser(da.getRoot(), docSource.getURL(), da); test.setSize(1275, 750); test.setVisible(true); docSource.close(); } catch (Exception e) { System.out.println("Error: "+e.getMessage()); e.printStackTrace(); } }
<reponame>gavin2lee/generic-support package com.lachesis.support.common.util.coder; import java.io.UnsupportedEncodingException; import java.security.InvalidKeyException; import java.security.NoSuchAlgorithmException; import java.security.SecureRandom; import java.security.spec.InvalidKeySpecException; import javax.crypto.BadPaddingException; import javax.crypto.Cipher; import javax.crypto.IllegalBlockSizeException; import javax.crypto.NoSuchPaddingException; import javax.crypto.SecretKey; import javax.crypto.SecretKeyFactory; import javax.crypto.spec.DESKeySpec; import org.apache.commons.codec.binary.Base64; import com.lachesis.support.common.util.crypt.CryptUtils; import com.lachesis.support.common.util.exception.CryptException; public class DesCryptStringCoder implements StringCoder { private static final String ENCRYPTION_ALGORITHM_DES = "DES"; private static final String ENCODING = CryptUtils.INTERNAL_ENCODING; private String encryptionKey = "<KEY>"; @Override public String encode(String plainText) throws CryptException { try { return doEncrypt(plainText); } catch (Exception e) { throw new CryptException(e); } } @Override public String decode(String cryptograph) throws CryptException { try { return doDecrypt(cryptograph); } catch (Exception e) { throw new CryptException(e); } } private String doDecrypt(String cryptograph) throws InvalidKeyException, UnsupportedEncodingException, NoSuchAlgorithmException, InvalidKeySpecException, NoSuchPaddingException, IllegalBlockSizeException, BadPaddingException { byte[] decodedCipherBytes = Base64.decodeBase64(cryptograph); byte[] decryptedBytes = deDecryptWithDes(decodedCipherBytes, getEncryptionKey()); return new String(decryptedBytes, ENCODING); } private byte[] deDecryptWithDes(byte[] cipherBytes, String encryptionKey) throws InvalidKeyException, UnsupportedEncodingException, NoSuchAlgorithmException, InvalidKeySpecException, NoSuchPaddingException, IllegalBlockSizeException, BadPaddingException { SecureRandom random = new SecureRandom(); DESKeySpec desKey = new DESKeySpec(encryptionKey.getBytes(ENCODING)); SecretKeyFactory keyFactory = SecretKeyFactory.getInstance(ENCRYPTION_ALGORITHM_DES); SecretKey securekey = keyFactory.generateSecret(desKey); Cipher cipher = Cipher.getInstance(ENCRYPTION_ALGORITHM_DES); cipher.init(Cipher.DECRYPT_MODE, securekey, random); byte[] decryptBytes = cipher.doFinal(cipherBytes); return decryptBytes; } private String doEncrypt(String plainText) throws InvalidKeyException, NoSuchAlgorithmException, UnsupportedEncodingException, InvalidKeySpecException, NoSuchPaddingException, IllegalBlockSizeException, BadPaddingException { byte[] cipherBytes = doEncryptStringWithDes(plainText, getEncryptionKey()); String encodedCipherString = Base64.encodeBase64URLSafeString(cipherBytes); return encodedCipherString; } private byte[] doEncryptStringWithDes(String plainText, String encrytionKey) throws NoSuchAlgorithmException, InvalidKeyException, UnsupportedEncodingException, InvalidKeySpecException, NoSuchPaddingException, IllegalBlockSizeException, BadPaddingException { SecureRandom random = new SecureRandom(); DESKeySpec desKey = new DESKeySpec(encrytionKey.getBytes(ENCODING)); SecretKeyFactory keyFactory = SecretKeyFactory.getInstance(ENCRYPTION_ALGORITHM_DES); SecretKey securekey = keyFactory.generateSecret(desKey); Cipher cipher = Cipher.getInstance(ENCRYPTION_ALGORITHM_DES); cipher.init(Cipher.ENCRYPT_MODE, securekey, random); byte[] cipherBytes = cipher.doFinal(plainText.getBytes(ENCODING)); return cipherBytes; } private String getEncryptionKey() { return encryptionKey; } public void setEncryptionKey(String encryptionKey) { this.encryptionKey = encryptionKey; } }
package br.com.zup.proposta.api.controller.cartao.dto.response; import br.com.zup.proposta.dominio.modelo.cartao.bloqueio.StatusBloqueio; public enum StatusBloqueioResponse { BLOQUEADO { @Override public StatusBloqueio getStatus() { return StatusBloqueio.EFETUADO; } }, FALHA { @Override public StatusBloqueio getStatus() { return StatusBloqueio.FALHA; } }; public abstract StatusBloqueio getStatus(); }
Diagram of the brain of a person with Alzheimer's Disease. Credit: Wikipedia/public domain. A recently-recognized pathologic protein in the brain may play a larger role in the development of clinical Alzheimer's disease dementia than previously recognized, according to a study by researchers at Rush University Medical Center. The findings of the study of nearly 1,000 older adults were published in the Sept. 30 issue of the journal, Brain. "This finding could help researchers to understand the cause of memory loss and lead to new ways to approach studying Alzheimer's disease," said Bryan James, PhD, study author and epidemiologist with the Rush Alzheimer's Disease Center. "Our study found that when the main characteristic pathologies of Alzheimer's disease, plaques and tangles, were mixed with a pathologic protein called TDP-43 in the brain, the combination was more likely to result in diagnosed Alzheimer's dementia than plaques and tangles alone." The abnormal protein, TDP-43 (short for 'hyperphosphorylated transactive response DNA-binding protein 43'), previously has been associated with frontal temporal dementia and amyotrophic lateral sclerosis (ALS, sometimes called Lou Gehrig's disease). In recent years, TDP-43 also has been found in the brains of persons with other diseases, but most recently in Alzheimer's disease. Mixed pathologies increase Alzheimer's risk The hallmark pathologies of Alzheimer's disease are the accumulation of the protein beta-amyloid (called plaques) and an abnormal form of the protein tau (called tangles). However, research from the Rush Alzheimer's Disease Center and other groups has shown that the majority of persons with clinical Alzheimer's dementia also develop other disease pathologies in their brains as well, such as small strokes or protein deposits called Lewy bodies. This combination, called 'mixed pathologies,' increases the risk for developing diagnosed Alzheimer's dementia above and beyond just having plaques and tangles in the brain. "The clinical disease that we call 'Alzheimer's disease' is looking more and more like the result of the accumulation of a number of disease processes in the brain of older persons," James said. The majority of persons with diagnosed Alzheimer's dementia actually have mixed pathologies in their brains—not just the plaques and tangles that are the known hallmarks of Alzheimer's disease. "In particular, mixed Alzheimer's and TDP-43 pathologies appear to be an under-recognized yet common form of mixed pathologies that contributes to the development of clinical Alzheimer's dementia," James said. "This is one of the first studies to examine TDP-43 and Alzheimer's disease in the context of mixed pathologies." TDP-43 found in two-thirds of those with Alzheimer's dementia The Brain paper built on previous research by examining whether TDP-43 was associated with an increased likelihood of a diagnosis of Alzheimer's dementia in persons both with and without pathologic Alzheimer's disease. The new study examined brain pathology, drawing on tissue samples from 946 deceased older men and women who had been enrolled in one of two cohort studies by the Rush Alzheimer's Disease Center, the Rush Memory and Aging Project or the Religious Orders Study. Participants in both studies agree to donate their brains to research after their death. TDP-43 was present in the brains of about half of the participants and in two-thirds of the brains of persons who had been diagnosed with Alzheimer's dementia while alive. More than a third of the participants had mixed Alzheimer's (plaques and tangles) and TDP-43 pathologies in their brain. Mixed Alzheimer's and TDP-43 pathologies were associated with a higher likelihood of diagnosed Alzheimer's dementia at death than plaques and tangles alone. "These data are exciting, because an improved understanding of the TDP-43 protein has potential to guide alternative treatment strategies for Alzheimer's disease," James said. Provided by Rush University Medical Center
#include <iostream> #include <set> #include <cassert> using namespace std; int main () { int b[3] = {3,6,9}; set<int> myset(b,b+3); // set some initial values: assert(myset.count(3) != 1); return 0; }
One person is dead after firefighters pulled three family members from a smoke-filled apartment in a Lawrence Heights lowrise Monday night.Toronto Fire Services reported a two-alarm fire erupted shortly before 9:30 p.m. in the ground-floor unit at 5 Flemington Rd. — a three-storey building near Lawrence Ave. W. and Allen Rd. “Thoughts and prayers are with those affected by this tragic loss,” he added. The victims are believed to be a woman in her 40s and her two teenage sons, according to media reports. The victims names and ages have not been officially released, but William Hay, an investigator with the Ontario Fire Marshal’s office who was at the scene Tuesday, confirmed all three are family members. He said the preliminary investigation suggests the fire started in the living room of the two-bedroom apartment. “We will be examining that area to see what ignition sources exist within it and hopefully by doing that we will be able to determine exactly what caused the fire to ignite,” Hay said. The three windows of the ground-floor corner apartment were each covered with a tarp to protect the scene from rain. The Ontario Fire Marshal’s office at the scene of a deadly ground-floor apartment fire at 5 Flemington Rd. in Toronto on Tuesday, Dec. 5, 2017. The fire was contained to the one apartment, so residents of the second and third floor were allowed to return to their homes Tuesday morning. One area resident, who did not give her name, wondered why the family was unable to escape the apartment when the fire happened at a time when most people would have been awake. “It’s so sad,” she said.
import { Component, OnInit } from '@angular/core'; import { ActivatedRoute, Params, Router } from '@angular/router'; import { FormGroup, FormControl, FormArray, Validators } from '@angular/forms'; // import { RecipeService } from '../productListingContract.service'; @Component({ selector: 'app-productListingContract-edit', templateUrl: './ProductListingContract-edit.component.html', styleUrls: ['./ProductListingContract-edit.component.css'] }) export class ProductListingContractEditComponent implements OnInit { id: string; editMode = false; productListingContractForm: FormGroup; constructor( private route: ActivatedRoute, // private productListingContractService: RecipeService, private router: Router ) {} ngOnInit() { // this.route.params.subscribe((params: Params) => { // this.id = +params['id']; // this.editMode = params['id'] != null; // this.initForm(); // }); } onSubmit() { // const newProductListingContract = new productListingContract( // this.productListingContractForm.value['name'], // this.productListingContractForm.value['description'], // this.productListingContractForm.value['imagePath'], // this.productListingContractForm.value['ingredients']); // if (this.editMode) { // this.productListingContractService.updateRecipe(this.id, this.productListingContractForm.value); // } else { // this.productListingContractService.addRecipe(this.productListingContractForm.value); // } // this.onCancel(); } onAddIngredient() { // (<FormArray>this.productListingContractForm.get('ingredients')).push( // new FormGroup({ // name: new FormControl(null, Validators.required), // amount: new FormControl(null, [ // Validators.required, // Validators.pattern(/^[1-9]+[0-9]*$/) // ]) // }) // ); } // onDeleteIngredient(index: number) { // (<FormArray>this.productListingContractForm.get('ingredients')).removeAt(index); // } // onCancel() { // this.router.navigate(['../'], { relativeTo: this.route }); // } // private initForm() { // let productListingContractName = ''; // let productListingContractImagePath = ''; // let productListingContractDescription = ''; // let productListingContractIngredients = new FormArray([]); // if (this.editMode) { // const productListingContract = this.productListingContractService.getRecipe(this.id); // productListingContractName = productListingContract.name; // productListingContractImagePath = productListingContract.imagePath; // productListingContractDescription = productListingContract.description; // if (productListingContract['ingredients']) { // for (let ingredient of productListingContract.ingredients) { // productListingContractIngredients.push( // new FormGroup({ // name: new FormControl(ingredient.name, Validators.required), // amount: new FormControl(ingredient.amount, [ // Validators.required, // Validators.pattern(/^[1-9]+[0-9]*$/) // ]) // }) // ); // } // } // } // this.productListingContractForm = new FormGroup({ // name: new FormControl(productListingContractName, Validators.required), // imagePath: new FormControl(productListingContractImagePath, Validators.required), // description: new FormControl(productListingContractDescription, Validators.required), // ingredients: productListingContractIngredients // }); // } }
"""Defines models related to rooms.""" import string from sqlite3 import IntegrityError import random from blinker import Namespace from flask import request from flask_login import current_user from sqlalchemy import Column, INTEGER, VARCHAR, ForeignKey, Table from sqlalchemy.orm import relationship from base.models import SerializableMixin from database import Base __author__ = "<NAME> <<EMAIL>>" participants_in_room = Table( "participants_in_room", Base.metadata, Column("user_id", INTEGER, ForeignKey("users.id")), Column("room_id", INTEGER, ForeignKey("rooms.id")) ) namespace = Namespace() deleted = namespace.signal("deleted") class Room(SerializableMixin, Base): """Defines a model for rooms.""" __tablename__ = "rooms" __excluded__ = set("owner_id") id = Column(INTEGER, primary_key=True) name = Column(VARCHAR(255)) token = Column(VARCHAR(6), unique=True) owner_id = Column(INTEGER, ForeignKey("users.id")) owner = relationship("User") participants = relationship("User", secondary=participants_in_room, backref="rooms") def as_dict(self): """Get the object as a dictionary.""" base = super().as_dict() base["owning"] = current_user.is_authenticated and self.owner_id == current_user.id return base def set_token(self, session, size=6): """ Create a random token to identify the room. :param session: session to use to commit the changes :param size: size of the token to generate """ e = None for i in range(1000): try: self.token = "".join(random.choice(string.ascii_uppercase + string.digits) for _ in range(size)) session.commit() except IntegrityError as exc: e = exc else: return raise e
// UpdateAdminState updates the provision watcher admin state in cache by id. func (p *provisionWatcherCache) UpdateAdminState(id string, state models.AdminState) error { p.mutex.Lock() defer p.mutex.Unlock() name, ok := p.nameMap[id] if !ok { errMsg := fmt.Sprintf("failed to find provisionwatcher with given id %s in cache", id) return errors.NewCommonEdgeX(errors.KindInvalidId, errMsg, nil) } p.pwMap[name].AdminState = state return nil }
<filename>domino/junit4xpages/src/main/java/org/eclipse/jdt/internal/junit4/runner/JUnit4TestClassReference.java /******************************************************************************* * Copyright (c) 2006 IBM Corporation and others. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html * * Contributors: * <NAME> (<EMAIL>) - initial API and implementation * (bug 102632: [JUnit] Support for JUnit 4.) *******************************************************************************/ package org.eclipse.jdt.internal.junit4.runner; import org.eclipse.jdt.internal.junit.runner.ITestIdentifier; import org.eclipse.jdt.internal.junit.runner.IVisitsTestTrees; import org.junit.runner.Description; import org.junit.runner.Request; public class JUnit4TestClassReference extends JUnit4TestReference { protected final Class<?> fClass; public JUnit4TestClassReference(Class<?> clazz) { super(Request.aClass(clazz)); fClass= clazz; } public int countTestCases() { return fRunner.testCount(); } public String getName() { return fClass.getName(); } public void sendTree(final IVisitsTestTrees notified) { sendDescriptionTree(notified, fRunner.getDescription()); } private void sendDescriptionTree(final IVisitsTestTrees notified, org.junit.runner.Description description) { if (description.isTest()) { notified.visitTreeEntry(new JUnit4Identifier(description), false, 1); } else { notified.visitTreeEntry(new JUnit4Identifier(description), true, description.getChildren().size()); for (Description child : description.getChildren()) { sendDescriptionTree(notified, child); } } } @Override public boolean equals(Object obj) { if (! (obj instanceof JUnit4TestReference)) return false; JUnit4TestReference ref= (JUnit4TestReference) obj; return (ref.getIdentifier().equals(getIdentifier())); } @Override public int hashCode() { return fClass.hashCode(); } public ITestIdentifier getIdentifier() { return new JUnit4Identifier(fRunner.getDescription()); } }
This Thanksgiving, six American Ebola survivors are thankful just to be alive for the holiday ― and for the chance to do it all over again. Dr. Kent Brantly, Dr. Rick Sacra, aid worker Nancy Writebol, nurse Nina Pham, nurse Amber Vinson and journalist Ashoka Mukpo met for the first time Wednesday on the "Today" show, and they all agreed: Ebola is a ghastly disease, but their work is worth the risk. "I don't think we can really compare war stories. This is a horrible experience for anybody who goes through it," Brantly, who developed the virus while working in Liberia, said. The six ― who all contracted the virus while either treating Ebola patients or reporting on the epidemic ― said they have no regrets about their choices. "I'm thankful for God's grace and the second chance and the opportunity just to continue to serve, possibly back in Liberia," said Writebol, who developed Ebola in July while caring for patients in Liberia. "I'm thankful for the opportunity to go back," said Scara, who also contracted the virus while on a health mission to Liberia. Pham and Vinson both developed Ebola in Texas while treating the first patient to be diagnosed on U.S. soil. Both said they're happy to care for all patients ― even those with the deadly virus. "Nursing is a calling," Pham said on the "Today" show. "It's just in our personalities and in our nature to help whoever needs help." The six had not met as a group before, but many were already connected: Brantly ― the first of the six to recover ― donated possibly life-saving plasma to many of the other survivors. "He's a selfless man and he's like our angel and gave us a second chance at life," Pham said. Aside from their health, the survivors said they're thankful for the "little things" ― "like when I'm cooking, smelling the food as I cook, doing laundry, feeling the warmth and the smell of laundry right out of the dryer," Vinson said. Missing from the Wednesday reunion was Dr. Craig Spencer, who was cured of Ebola earlier this month after he developed it while treating patients in Guinea. Two more Ebola patients have died on U.S. soil: Thomas Eric Duncan, the first person to be diagnosed with the disease in the U.S., and Dr. Martin Salia. The 2014 Ebola epidemic has killed 5,459 people in West Africa this year. At least 9,596 people have been diagnosed with the virus since the March onset of the outbreak.
/* * Adito * * Copyright (C) 2003-2006 3SP LTD. All Rights Reserved * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ package com.maverick.crypto.asn1; import java.io.IOException; public class DERGeneralString extends DERObject implements DERString { private String string; public static DERGeneralString getInstance( Object obj) { if (obj == null || obj instanceof DERGeneralString) { return (DERGeneralString) obj; } if (obj instanceof ASN1OctetString) { return new DERGeneralString(((ASN1OctetString) obj).getOctets()); } if (obj instanceof ASN1TaggedObject) { return getInstance(((ASN1TaggedObject) obj).getObject()); } throw new IllegalArgumentException("illegal object in getInstance: " + obj.getClass().getName()); } public static DERGeneralString getInstance( ASN1TaggedObject obj, boolean explicit) { return getInstance(obj.getObject()); } public DERGeneralString(byte[] string) { char[] cs = new char[string.length]; for (int i = 0; i != cs.length; i++) { cs[i] = (char) (string[i] & 0xff); } this.string = new String(cs); } public DERGeneralString(String string) { this.string = string; } public String getString() { return string; } public byte[] getOctets() { char[] cs = string.toCharArray(); byte[] bs = new byte[cs.length]; for (int i = 0; i != cs.length; i++) { bs[i] = (byte) cs[i]; } return bs; } void encode(DEROutputStream out) throws IOException { out.writeEncoded(GENERAL_STRING, this.getOctets()); } public int hashCode() { return this.getString().hashCode(); } public boolean equals(Object o) { if (!(o instanceof DERGeneralString)) { return false; } DERGeneralString s = (DERGeneralString) o; return this.getString().equals(s.getString()); } }
<filename>dir600b_v2.03/kernels/rt305x/fs/proc/proc_misc.c /* * linux/fs/proc/proc_misc.c * * linux/fs/proc/array.c * Copyright (C) 1992 by <NAME> * based on ideas by <NAME> * * This used to be the part of array.c. See the rest of history and credits * there. I took this into a separate file and switched the thing to generic * proc_file_inode_operations, leaving in array.c only per-process stuff. * Inumbers allocation made dynamic (via create_proc_entry()). AV, May 1999. * * Changes: * <NAME> : Encapsulated position metric calculations. * <<EMAIL>> */ #include <linux/types.h> #include <linux/errno.h> #include <linux/time.h> #include <linux/kernel.h> #include <linux/kernel_stat.h> #include <linux/fs.h> #include <linux/tty.h> #include <linux/string.h> #include <linux/mman.h> #include <linux/proc_fs.h> #include <linux/ioport.h> #include <linux/mm.h> #include <linux/mmzone.h> #include <linux/pagemap.h> #include <linux/swap.h> #include <linux/slab.h> #include <linux/smp.h> #include <linux/signal.h> #include <linux/module.h> #include <linux/init.h> #include <linux/smp_lock.h> #include <linux/seq_file.h> #include <linux/times.h> #include <linux/profile.h> #include <linux/utsname.h> #include <linux/blkdev.h> #include <linux/hugetlb.h> #include <linux/jiffies.h> #include <linux/sysrq.h> #include <linux/vmalloc.h> #include <linux/crash_dump.h> #include <linux/pid_namespace.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/io.h> #include <asm/tlb.h> #include <asm/div64.h> #include "internal.h" #define LOAD_INT(x) ((x) >> FSHIFT) #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) /* * Warning: stuff below (imported functions) assumes that its output will fit * into one page. For some of those functions it may be wrong. Moreover, we * have a way to deal with that gracefully. Right now I used straightforward * wrappers, but this needs further analysis wrt potential overflows. */ extern int get_hardware_list(char *); extern int get_stram_list(char *); extern int get_filesystem_list(char *); extern int get_exec_domain_list(char *); extern int get_dma_list(char *); extern int get_locks_status (char *, char **, off_t, int); static int proc_calc_metrics(char *page, char **start, off_t off, int count, int *eof, int len) { if (len <= off+count) *eof = 1; *start = page + off; len -= off; if (len>count) len = count; if (len<0) len = 0; return len; } static int loadavg_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { int a, b, c; int len; a = avenrun[0] + (FIXED_1/200); b = avenrun[1] + (FIXED_1/200); c = avenrun[2] + (FIXED_1/200); len = sprintf(page,"%d.%02d %d.%02d %d.%02d %ld/%d %d\n", LOAD_INT(a), LOAD_FRAC(a), LOAD_INT(b), LOAD_FRAC(b), LOAD_INT(c), LOAD_FRAC(c), nr_running(), nr_threads, current->nsproxy->pid_ns->last_pid); return proc_calc_metrics(page, start, off, count, eof, len); } static int uptime_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { struct timespec uptime; struct timespec idle; int len; cputime_t idletime = cputime_add(init_task.utime, init_task.stime); do_posix_clock_monotonic_gettime(&uptime); cputime_to_timespec(idletime, &idle); len = sprintf(page,"%lu.%02lu %lu.%02lu\n", (unsigned long) uptime.tv_sec, (uptime.tv_nsec / (NSEC_PER_SEC / 100)), (unsigned long) idle.tv_sec, (idle.tv_nsec / (NSEC_PER_SEC / 100))); return proc_calc_metrics(page, start, off, count, eof, len); } static int meminfo_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { struct sysinfo i; int len; unsigned long committed; unsigned long allowed; struct vmalloc_info vmi; long cached; /* * display in kilobytes. */ #define K(x) ((x) << (PAGE_SHIFT - 10)) si_meminfo(&i); si_swapinfo(&i); committed = atomic_read(&vm_committed_space); allowed = ((totalram_pages - hugetlb_total_pages()) * sysctl_overcommit_ratio / 100) + total_swap_pages; cached = global_page_state(NR_FILE_PAGES) - total_swapcache_pages - i.bufferram; if (cached < 0) cached = 0; get_vmalloc_info(&vmi); /* * Tagged format, for easy grepping and expansion. */ len = sprintf(page, "MemTotal: %8lu kB\n" "MemFree: %8lu kB\n" "Buffers: %8lu kB\n" "Cached: %8lu kB\n" "SwapCached: %8lu kB\n" "Active: %8lu kB\n" "Inactive: %8lu kB\n" #ifdef CONFIG_HIGHMEM "HighTotal: %8lu kB\n" "HighFree: %8lu kB\n" "LowTotal: %8lu kB\n" "LowFree: %8lu kB\n" #endif "SwapTotal: %8lu kB\n" "SwapFree: %8lu kB\n" "Dirty: %8lu kB\n" "Writeback: %8lu kB\n" "AnonPages: %8lu kB\n" "Mapped: %8lu kB\n" "Slab: %8lu kB\n" "SReclaimable: %8lu kB\n" "SUnreclaim: %8lu kB\n" "PageTables: %8lu kB\n" "NFS_Unstable: %8lu kB\n" "Bounce: %8lu kB\n" "CommitLimit: %8lu kB\n" "Committed_AS: %8lu kB\n" "VmallocTotal: %8lu kB\n" "VmallocUsed: %8lu kB\n" "VmallocChunk: %8lu kB\n", K(i.totalram), K(i.freeram), K(i.bufferram), K(cached), K(total_swapcache_pages), K(global_page_state(NR_ACTIVE)), K(global_page_state(NR_INACTIVE)), #ifdef CONFIG_HIGHMEM K(i.totalhigh), K(i.freehigh), K(i.totalram-i.totalhigh), K(i.freeram-i.freehigh), #endif K(i.totalswap), K(i.freeswap), K(global_page_state(NR_FILE_DIRTY)), K(global_page_state(NR_WRITEBACK)), K(global_page_state(NR_ANON_PAGES)), K(global_page_state(NR_FILE_MAPPED)), K(global_page_state(NR_SLAB_RECLAIMABLE) + global_page_state(NR_SLAB_UNRECLAIMABLE)), K(global_page_state(NR_SLAB_RECLAIMABLE)), K(global_page_state(NR_SLAB_UNRECLAIMABLE)), K(global_page_state(NR_PAGETABLE)), K(global_page_state(NR_UNSTABLE_NFS)), K(global_page_state(NR_BOUNCE)), K(allowed), K(committed), (unsigned long)VMALLOC_TOTAL >> 10, vmi.used >> 10, vmi.largest_chunk >> 10 ); len += hugetlb_report_meminfo(page + len); return proc_calc_metrics(page, start, off, count, eof, len); #undef K } extern struct seq_operations fragmentation_op; static int fragmentation_open(struct inode *inode, struct file *file) { (void)inode; return seq_open(file, &fragmentation_op); } static const struct file_operations fragmentation_file_operations = { .open = fragmentation_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; extern struct seq_operations zoneinfo_op; static int zoneinfo_open(struct inode *inode, struct file *file) { return seq_open(file, &zoneinfo_op); } static const struct file_operations proc_zoneinfo_file_operations = { .open = zoneinfo_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int version_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = snprintf(page, PAGE_SIZE, linux_proc_banner, utsname()->sysname, utsname()->release, utsname()->version); return proc_calc_metrics(page, start, off, count, eof, len); } extern struct seq_operations cpuinfo_op; static int cpuinfo_open(struct inode *inode, struct file *file) { return seq_open(file, &cpuinfo_op); } static const struct file_operations proc_cpuinfo_operations = { .open = cpuinfo_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int devinfo_show(struct seq_file *f, void *v) { int i = *(loff_t *) v; if (i < CHRDEV_MAJOR_HASH_SIZE) { if (i == 0) seq_printf(f, "Character devices:\n"); chrdev_show(f, i); } #ifdef CONFIG_BLOCK else { i -= CHRDEV_MAJOR_HASH_SIZE; if (i == 0) seq_printf(f, "\nBlock devices:\n"); blkdev_show(f, i); } #endif return 0; } static void *devinfo_start(struct seq_file *f, loff_t *pos) { if (*pos < (BLKDEV_MAJOR_HASH_SIZE + CHRDEV_MAJOR_HASH_SIZE)) return pos; return NULL; } static void *devinfo_next(struct seq_file *f, void *v, loff_t *pos) { (*pos)++; if (*pos >= (BLKDEV_MAJOR_HASH_SIZE + CHRDEV_MAJOR_HASH_SIZE)) return NULL; return pos; } static void devinfo_stop(struct seq_file *f, void *v) { /* Nothing to do */ } static struct seq_operations devinfo_ops = { .start = devinfo_start, .next = devinfo_next, .stop = devinfo_stop, .show = devinfo_show }; static int devinfo_open(struct inode *inode, struct file *filp) { return seq_open(filp, &devinfo_ops); } static const struct file_operations proc_devinfo_operations = { .open = devinfo_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; extern struct seq_operations vmstat_op; static int vmstat_open(struct inode *inode, struct file *file) { return seq_open(file, &vmstat_op); } static const struct file_operations proc_vmstat_file_operations = { .open = vmstat_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; #ifdef CONFIG_PROC_HARDWARE static int hardware_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { int len = get_hardware_list(page); return proc_calc_metrics(page, start, off, count, eof, len); } #endif #ifdef CONFIG_STRAM_PROC static int stram_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { int len = get_stram_list(page); return proc_calc_metrics(page, start, off, count, eof, len); } #endif #ifdef CONFIG_BLOCK extern struct seq_operations partitions_op; static int partitions_open(struct inode *inode, struct file *file) { return seq_open(file, &partitions_op); } static const struct file_operations proc_partitions_operations = { .open = partitions_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; extern struct seq_operations diskstats_op; static int diskstats_open(struct inode *inode, struct file *file) { return seq_open(file, &diskstats_op); } static const struct file_operations proc_diskstats_operations = { .open = diskstats_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; #endif #ifdef CONFIG_MODULES extern struct seq_operations modules_op; static int modules_open(struct inode *inode, struct file *file) { return seq_open(file, &modules_op); } static const struct file_operations proc_modules_operations = { .open = modules_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; #endif #ifdef CONFIG_SLAB extern struct seq_operations slabinfo_op; extern ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *); static int slabinfo_open(struct inode *inode, struct file *file) { return seq_open(file, &slabinfo_op); } static const struct file_operations proc_slabinfo_operations = { .open = slabinfo_open, .read = seq_read, .write = slabinfo_write, .llseek = seq_lseek, .release = seq_release, }; #ifdef CONFIG_DEBUG_SLAB_LEAK extern struct seq_operations slabstats_op; static int slabstats_open(struct inode *inode, struct file *file) { unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL); int ret = -ENOMEM; if (n) { ret = seq_open(file, &slabstats_op); if (!ret) { struct seq_file *m = file->private_data; *n = PAGE_SIZE / (2 * sizeof(unsigned long)); m->private = n; n = NULL; } kfree(n); } return ret; } static int slabstats_release(struct inode *inode, struct file *file) { struct seq_file *m = file->private_data; kfree(m->private); return seq_release(inode, file); } static const struct file_operations proc_slabstats_operations = { .open = slabstats_open, .read = seq_read, .llseek = seq_lseek, .release = slabstats_release, }; #endif #endif static int show_stat(struct seq_file *p, void *v) { int i; unsigned long jif; cputime64_t user, nice, system, idle, iowait, irq, softirq, steal; u64 sum = 0; user = nice = system = idle = iowait = irq = softirq = steal = cputime64_zero; jif = - wall_to_monotonic.tv_sec; if (wall_to_monotonic.tv_nsec) --jif; for_each_possible_cpu(i) { int j; user = cputime64_add(user, kstat_cpu(i).cpustat.user); nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice); system = cputime64_add(system, kstat_cpu(i).cpustat.system); idle = cputime64_add(idle, kstat_cpu(i).cpustat.idle); iowait = cputime64_add(iowait, kstat_cpu(i).cpustat.iowait); irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq); softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq); steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal); for (j = 0 ; j < NR_IRQS ; j++) sum += kstat_cpu(i).irqs[j]; } seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu\n", (unsigned long long)cputime64_to_clock_t(user), (unsigned long long)cputime64_to_clock_t(nice), (unsigned long long)cputime64_to_clock_t(system), (unsigned long long)cputime64_to_clock_t(idle), (unsigned long long)cputime64_to_clock_t(iowait), (unsigned long long)cputime64_to_clock_t(irq), (unsigned long long)cputime64_to_clock_t(softirq), (unsigned long long)cputime64_to_clock_t(steal)); for_each_online_cpu(i) { /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ user = kstat_cpu(i).cpustat.user; nice = kstat_cpu(i).cpustat.nice; system = kstat_cpu(i).cpustat.system; idle = kstat_cpu(i).cpustat.idle; iowait = kstat_cpu(i).cpustat.iowait; irq = kstat_cpu(i).cpustat.irq; softirq = kstat_cpu(i).cpustat.softirq; steal = kstat_cpu(i).cpustat.steal; seq_printf(p, "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu\n", i, (unsigned long long)cputime64_to_clock_t(user), (unsigned long long)cputime64_to_clock_t(nice), (unsigned long long)cputime64_to_clock_t(system), (unsigned long long)cputime64_to_clock_t(idle), (unsigned long long)cputime64_to_clock_t(iowait), (unsigned long long)cputime64_to_clock_t(irq), (unsigned long long)cputime64_to_clock_t(softirq), (unsigned long long)cputime64_to_clock_t(steal)); } seq_printf(p, "intr %llu", (unsigned long long)sum); #if !defined(CONFIG_PPC64) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64) for (i = 0; i < NR_IRQS; i++) seq_printf(p, " %u", kstat_irqs(i)); #endif seq_printf(p, "\nctxt %llu\n" "btime %lu\n" "processes %lu\n" "procs_running %lu\n" "procs_blocked %lu\n", nr_context_switches(), (unsigned long)jif, total_forks, nr_running(), nr_iowait()); return 0; } static int stat_open(struct inode *inode, struct file *file) { unsigned size = 4096 * (1 + num_possible_cpus() / 32); char *buf; struct seq_file *m; int res; /* don't ask for more than the kmalloc() max size, currently 128 KB */ if (size > 128 * 1024) size = 128 * 1024; buf = kmalloc(size, GFP_KERNEL); if (!buf) return -ENOMEM; res = single_open(file, show_stat, NULL); if (!res) { m = file->private_data; m->buf = buf; m->size = size; } else kfree(buf); return res; } static const struct file_operations proc_stat_operations = { .open = stat_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /* * /proc/interrupts */ static void *int_seq_start(struct seq_file *f, loff_t *pos) { return (*pos <= NR_IRQS) ? pos : NULL; } static void *int_seq_next(struct seq_file *f, void *v, loff_t *pos) { (*pos)++; if (*pos > NR_IRQS) return NULL; return pos; } static void int_seq_stop(struct seq_file *f, void *v) { /* Nothing to do */ } extern int show_interrupts(struct seq_file *f, void *v); /* In arch code */ static struct seq_operations int_seq_ops = { .start = int_seq_start, .next = int_seq_next, .stop = int_seq_stop, .show = show_interrupts }; static int interrupts_open(struct inode *inode, struct file *filp) { return seq_open(filp, &int_seq_ops); } static const struct file_operations proc_interrupts_operations = { .open = interrupts_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int filesystems_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { int len = get_filesystem_list(page); return proc_calc_metrics(page, start, off, count, eof, len); } static int cmdline_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf(page, "%s\n", saved_command_line); return proc_calc_metrics(page, start, off, count, eof, len); } static int locks_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { int len = get_locks_status(page, start, off, count); if (len < count) *eof = 1; return len; } static int execdomains_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { int len = get_exec_domain_list(page); return proc_calc_metrics(page, start, off, count, eof, len); } #ifdef CONFIG_MAGIC_SYSRQ /* * writing 'C' to /proc/sysrq-trigger is like sysrq-C */ static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { if (count) { char c; if (get_user(c, buf)) return -EFAULT; __handle_sysrq(c, NULL, 0); } return count; } static const struct file_operations proc_sysrq_trigger_operations = { .write = write_sysrq_trigger, }; #endif struct proc_dir_entry *proc_root_kcore; void create_seq_entry(char *name, mode_t mode, const struct file_operations *f) { struct proc_dir_entry *entry; entry = create_proc_entry(name, mode, NULL); if (entry) entry->proc_fops = f; } void __init proc_misc_init(void) { static struct { char *name; int (*read_proc)(char*,char**,off_t,int,int*,void*); } *p, simple_ones[] = { {"loadavg", loadavg_read_proc}, {"uptime", uptime_read_proc}, {"meminfo", meminfo_read_proc}, {"version", version_read_proc}, #ifdef CONFIG_PROC_HARDWARE {"hardware", hardware_read_proc}, #endif #ifdef CONFIG_STRAM_PROC {"stram", stram_read_proc}, #endif {"filesystems", filesystems_read_proc}, {"cmdline", cmdline_read_proc}, {"locks", locks_read_proc}, {"execdomains", execdomains_read_proc}, {NULL,} }; for (p = simple_ones; p->name; p++) create_proc_read_entry(p->name, 0, NULL, p->read_proc, NULL); proc_symlink("mounts", NULL, "self/mounts"); /* And now for trickier ones */ #ifdef CONFIG_PRINTK { struct proc_dir_entry *entry; entry = create_proc_entry("kmsg", S_IRUSR, &proc_root); if (entry) entry->proc_fops = &proc_kmsg_operations; } #endif create_seq_entry("devices", 0, &proc_devinfo_operations); create_seq_entry("cpuinfo", 0, &proc_cpuinfo_operations); #ifdef CONFIG_BLOCK create_seq_entry("partitions", 0, &proc_partitions_operations); #endif create_seq_entry("stat", 0, &proc_stat_operations); create_seq_entry("interrupts", 0, &proc_interrupts_operations); #ifdef CONFIG_SLAB create_seq_entry("slabinfo",S_IWUSR|S_IRUGO,&proc_slabinfo_operations); #ifdef CONFIG_DEBUG_SLAB_LEAK create_seq_entry("slab_allocators", 0 ,&proc_slabstats_operations); #endif #endif create_seq_entry("buddyinfo",S_IRUGO, &fragmentation_file_operations); create_seq_entry("vmstat",S_IRUGO, &proc_vmstat_file_operations); create_seq_entry("zoneinfo",S_IRUGO, &proc_zoneinfo_file_operations); #ifdef CONFIG_BLOCK create_seq_entry("diskstats", 0, &proc_diskstats_operations); #endif #ifdef CONFIG_MODULES create_seq_entry("modules", 0, &proc_modules_operations); #endif #ifdef CONFIG_SCHEDSTATS create_seq_entry("schedstat", 0, &proc_schedstat_operations); #endif #ifdef CONFIG_PROC_KCORE proc_root_kcore = create_proc_entry("kcore", S_IRUSR, NULL); if (proc_root_kcore) { proc_root_kcore->proc_fops = &proc_kcore_operations; proc_root_kcore->size = (size_t)high_memory - PAGE_OFFSET + PAGE_SIZE; } #endif #ifdef CONFIG_PROC_VMCORE proc_vmcore = create_proc_entry("vmcore", S_IRUSR, NULL); if (proc_vmcore) proc_vmcore->proc_fops = &proc_vmcore_operations; #endif #ifdef CONFIG_MAGIC_SYSRQ { struct proc_dir_entry *entry; entry = create_proc_entry("sysrq-trigger", S_IWUSR, NULL); if (entry) entry->proc_fops = &proc_sysrq_trigger_operations; } #endif }
Tourism research: building from other disciplines Tourism as a field of study is challenged to identify a theoretical core and disciplinary boundaries. While the phenomena of tourism may be considered a system of interlinked parts, the scholarly body of knowledge of tourism may be described as a mosaic of knowledge. This paper identifies a number of methodological problems that tourism research must address. We further propose that future study should focus its attention on core issues of tourism related to the consumer's pursuit of difference in their travel. Further, these core issues should be addressed using the most appropriate disciplinary theory and methodology providing the data needed to produce a holistic picture for deeper analysis.
/** * Load the settings state from the DOM {@link Element}. * * @param element the {@link Element} to load values from. * @see {@link #getState()} */ @Override public void loadState(Element element) { String value = element.getAttributeValue(KEY.VERSION.toString()); if (value != null) version = value; }
Illustration by Thomas Warming When I think of my mother, Danielle Staub, I picture someone beautiful and strong. A single parent, sexual abuse survivor, gay rights advocate, and a fearless defender of those she loves. My mother has a big heart, and I am grateful for the sacrifices she made to provide for my sister and me. I admire her for overcoming great odds. If you believe what you've read about my mother in the press, you probably just rolled your eyes. That's OK—I'm used to that. It has been us against the world since the first day my mother appeared on The Real Housewives of New Jersey. In her two seasons on the show—and the half dozen years following—my mother has been described in many ways. She's been called trash, garbage, a pig, an extortionist, a felon, a husband stealer. She was even branded as a "prostitution whore." Imagine an irate woman flipping over a table as she screamed that name at my mother. Actually, you don't have to imagine at all, because the scene lives on as one of the most iconic moments in reality television history. As for me, I don't need to be reminded. My little sister and I were there. We saw the table fly on our mother, we heard the expletives, we ran as fast as we could to try to catch up catch up as she was chased around the restaurant, and we heard her cries for help. We felt helpless and unable to defend her. Read More: The Undying Love Story of Spencer and Heidi Pratt My name is Christie Staub, and for more than half of my life, my identity can be summed up in one sentence: daughter of one of the most notorious figures in unscripted television history. Illustration by Thomas Warming In May I will graduate from Seton Hall with a 4.0 and double major in psychology and sociology. I've already been accepted to an Ivy League graduate school on full scholarship. While higher education might not be a prized commodity in the reality show universe—at least, it wasn't on my mother's show, with characters being blatantly dismissive of the need for college—it is something my family values. One day, I hope to be a medical professional. I already know what the job entails, because I was born with a rare heart condition. I had my first heart attack at age nine and since then have spent the years in and out of doctor's offices.You didn't see this on television, but everyone knew about my condition behind the scenes—which of course didn't stop producers and housewives from terrorizing my mother in front of my face. There were lots of things you never saw, both onscreen and off. Freshman year of high school, for example, I was cornered by a group of football players. "C'mon, Christine," one of them said, as he motioned towards his crotch. "I'm ready for my daily blow job." I was mortified. Not to mention confused. Behind him, the other guys snickered. "Don't be shy, Christine," he said, moving closer. "I mean, isn't this what you and your mom do for fun?" This wasn't about a 14-year-old freshman. This was about fame, and insinuating someone's daughter was a "slut-in-the-making" made for great TV. I had no idea what they were talking about. My sister and I didn't watch the Real Housewives. We had seen enough crazy when we filmed, and I was more focused on grades and athletics than TV or boys. I called my mother and told her what the boys said. Moments later, she arrived at the school to discover me in full panic attack mode. After she read the riot act to a nonchalant school administrator and threatened to file a police report, I asked her, "What was the boy talking about, mom?" My mother told me the truth. During an on-camera interview, one of the housewives proclaimed that my mother had predilection for giving daily blow-jobs to random men. She had a good source—or so she claimed—and found the information highly disturbing. If the mother was that much of a ho-bag slut, she insinuated, just imagine what she was teaching her daughters? Enter football players in search of their daily blowjobs and my mom racing to the high school to rescue me. Even if this housewife had known truth, it wouldn't have made a difference. Even if she'd known how the soundbite affected me, she still would have said it. This wasn't about a 14-year-old freshman. This was about fame, and insinuating someone's daughter was a "slut-in-the-making" made for great TV. A teenage girl hyperventilating in the Principal's Office? Nobody wanted to see that. With each episode, the bullying and harassment grew worse. After another housewife said my sister and I appeared "dead in the eyes," strangers felt the need to repeat the line to us. When the show labeled my mother a criminal, people called us the children of a felon. My mother's storyline was edited and produced, placing her in scenarios to achieve a desired outcome. She wouldn't normally be involved in these scenarios; they were dictated by the producers, and they just told my mom where to show up for filming. In season one, my mother didn't expect this to happen. These were the early days of reality shows. In 2006, The Hills was in its first season, and the Real Housewives franchise had yet to become a household name. Today, reality stars are savvy, but a decade ago, talent lacked a frame of reference. Other than Queer Eye for the Straight Guy, my mother had never seen a reality show. Just be myself, she thought. Little did she know that myself is how the producers choose to present you. Every story arc needs an antagonist and a protagonist—that is what creates the conflict and resolution, i.e. the drama. Imagine if all of the arguments you've had in your life were filmed by a stranger. Imagine if all of the arguments you've had in your life were filmed by a stranger, and they took out the worst things you said, edited it together, and then presented the footage on TV to define your character. Would you say that characterization was accurate? Anything that is produced, and edited in this manner, is fictitious by default, but unlike an actor, my mother didn't have the advantage of hiding behind a character, not to mention that the term reality forms a perception that is impossible to overcome when the viewers aren't aware of what goes down behind the scenes. The producers had the power to manipulate my mother in anyway they pleased to increase ratings. When the second season ended, my mother decided to leave the show. The network portrayed her decision as her getting fired, but she left to save her life and her kids. Leaving the Real Housewives, though, isn't easy. The show continues to air all over the world, and the character that was produced for my mother has been impossible to overcome. My mother can't find work because of the preconceived notion America has about her. Since she left the show, my family has fallen on hard times. In the end, we had no choice but to run. We packed a few suitcases and left my hometown in New Jersey. Overnight, I left the only home I'd ever known for a nondescript apartment in an anonymous town. We didn't have furniture, so we slept on blow-up mattresses. I worried about my grades. I missed three weeks of school as my mom desperately searched for an educational institution where I would receive more respect and protection—a place where fellow students would not demand blowjobs. I missed my bedroom. I missed furniture. But in the end, I found something much better. I felt safe. Unfortunately, there are some things you cannot ever escape. Even now, all these years later, people see me as a character created by a producer. They see me as the daughter of an infamous reality show prostitution whore. They may not have called me garbage like they called my mother, but that's how they made me feel. They took away my name and my voice. My name is Christine Staub, and I will no longer be the excess debris created by a produced, manipulated, pseudo-reality universe. I will never again be a character defined by unethical producers, and exploited by networks all over the world. I am no longer a child, powerless without a voice. As for my mother, she has been silenced for far too long, and now it is time for her to open up about every dirty detail once and for all. I don't care what any TV viewer believes her to be, or how any media outlet defines her. I know who she is, and I admire her. Now I have a voice, and the time has come for my mother to reclaim her own—to reclaim the respect she deserves.
<filename>include/burst/range/take_exactly.hpp #ifndef BURST__RANGE__TAKE_EXACTLY_HPP #define BURST__RANGE__TAKE_EXACTLY_HPP #include <burst/range/detail/take_exactly.hpp> #include <burst/type_traits/range_iterator.hpp> #include <boost/iterator/iterator_categories.hpp> #include <type_traits> #include <utility> namespace burst { struct take_exactly_t { /*! \brief Функция для откусывания нескольких элементов от начала диапазона \param range Диапазон, от которого нужно откусить кусок. \param n Размер куска. \returns Диапазон, состоящий из `n` первых элементов входного диапазона. \warning Конец входного диапазона не отслеживается. Ответственность за случай `n > range.size()` лежит на пользователе. */ template <typename Range, typename Integer> auto operator () (Range && range, Integer n) const { using range_iterator = range_iterator_t<Range>; using category = typename std::common_type < typename boost::iterators::pure_iterator_traversal<range_iterator>::type, boost::random_access_traversal_tag > ::type; return detail::take_exactly_impl(std::forward<Range>(range), n, category{}); } }; constexpr auto take_exactly = take_exactly_t{}; } // namespace burst #endif // BURST__RANGE__TAKE_EXACTLY_HPP
Dr. Strangelove is a parodic fantasy, of course, based upon the real world Cold War between the United States and the Soviet Union. But according to a fantastic article in this month’s Wired, Stanley Kubrick and Dr. Strangelove were right: the Soviet Union not only had a Doomsday Device during the closing days of the Cold War… it still has it. According to Wired, the device was completed as a response to Ronald Reagan’s proposed Star Wars system of satellite defense. Since orbiting satellites would be powerless to mop up thousands of incoming missiles, the Star Wars platform indicated, instead, an intention of pre-emptive attack. The Soviet’s Doomsday Device, then, would automatically launch nukes at America if it determined, through a complicated algorithm of computed if/then statements that a nuke had struck Soviet soil. Curiously, the Soviet Union never told America it had such a device, and the big reason is surprising: they mostly wanted it to deter themselves from escalating things to the nuclear arena. Luckily, there is one failsafe: eventually, it all comes down to one officer being told by the Doomsday Device to turn the key. If a nuclear crisis in Russia ever does happen, let’s hope he’s more level headed.
The industry is expected to pour $100 million into an effort to squash the November ballot initiative. Donald Trump and Hillary Clinton give drug makers the jitters when they talk about Medicare negotiating the prices of prescription drugs. But the biggest near-term threat to the industry comes from a California ballot initiative that would test a version of that idea in the most populous state. That ballot initiative “is a grenade being rolled into the conversation, and it is being taken very seriously,” says a Republican drug lobbyist in Washington, D.C. Drug companies are expected to pour $100 million into an effort to squash the referendum in what will be a test of the industry’s strength at a time of growing consumer backlash against drug prices. The initiative would require the state to pay no more for prescription drugs than the U.S. Department of Veterans Affairs — one of the few federal agencies allowed to negotiate drug prices. From the industry’s perspective, California could set a dangerous precedent. Besides having an economy the size of many small countries, the liberal bastion is often a laboratory for new ideas that take root and then spread east. That’s even more likely given that the presidential front-runners are pushing the federal government to negotiate drug prices for Medicare. Which is precisely the intention of the initiative’s sponsor, Michael Weinstein, CEO of the Los Angeles-based AIDS Healthcare Foundation. “If we win, we hope it will start a national prairie fire,” he said. Weinstein pursued the ballot measure after years of in-your-face activism on AIDS and after watching the California state legislature fail to do anything about drug prices — a big concern to people with HIV/AIDS who may be taking costly drugs for the rest of their lives. Drug companies have easily trounced such opponents in the past, but the California battle comes at a particularly perilous moment. Public anger at drug prices is at an all-time high, driven by headlines about executives who unapologetically jacked up prices 5,000 percent. That is happening against the backdrop of a campaign cycle in which Americans are bucking the establishment in favor of insurgent candidates like Trump, Sen. Ted Cruz and Sen. Bernie Sanders. It is not a coincidence that the industry is fighting a related referendum in Ohio, suing to try to keep it off the ballot. “PhRMA is an easy target right now,” said Larry Levitt, a senior vice president at the nonprofit Kaiser Family Foundation. “In the past, drug companies have had pretty good public reputations. They make money but also produce something that saves people’s lives. Lately, drug companies have fallen down a few notches in the public's opinion, which makes them more vulnerable." The industry is not taking any chances. Well-known brands like Johnson & Johnson, Bristol-Meyers Squibb, Pfizer and Bayer have already raised roughly $67 million to assemble a crack California team to make sure the proposal loses. By comparison, the AIDS Health Care Foundation has raised $4.3 million — roughly six percent of the drug makers’ war chest. The industry is also recruiting unconventional allies, such as patient advocacy and civil rights groups, raising alarms about potential economic harm to consumers and sowing doubt about the sponsors. It has already signed up roughly 30 groups to oppose the measure, including veterans’ organizations, the California NAACP, the Bonnie J. Addario Lung Cancer Foundation, the Lupus Foundation of Southern California and the California Chamber of Commerce. Several of those groups receive donations from the industry — for instance, the Lupus group gets roughly $1,000 a year from drug companies out of an annual budget of $80,000, says Hollaine Hopkins, the group’s executive director. Kathy Fairbanks, the political consultant leading the pharmaceutical companies’ campaign, also has been trying to recruit California-based HIV groups — rivals to Weinstein’s AIDS Health Care Foundation. So far, all have declined. “The drug companies only win if voters don’t know it’s them, and they are looking for beards,” said Court of Consumer Watchdog, a group supporting the ballot measure. In bold, red letters, the flier runs through the litany of potential problems with the ballot measure — increasing bureaucracy, more paperwork for physicians, a “chilling effect” on research and development for new drugs. It even argues that it could hurt veterans by raising the cost of the drugs they receive. The flier does not mention the ballot measure’s potential effect on the pharmaceutical industry’s bottom line. Backers of the initiative dispute the measure would harm consumers — indeed, they say anywhere from three to seven million residents enrolled in several state-supported health programs would get access to far lower-cost drugs. Specifically, the measure says that state-funded programs such as the California Public Employees' Retirement System, the Medicaid fee-for service outpatient drug program and California’s AIDS Drug Assistance Program must pay no more for prescription drugs than the Department of Veterans Affairs. The VA is held out as a model because unlike most federal health programs, it can negotiate drug prices and gets, at least, the lowest price the drug companies are paid by any commercial client. In exchange for those discounts, the agency puts the medication on the list of drugs VA doctors can prescribe. The outcome of the VA’s negotiations with drug companies aren’t made public, but a 2005 Congressional Budget Office report found that the VA generally pays less than other federal, state and private health programs. But the pharmaceutical industry says the VA is not a good model for the rest of the country because the prices it pays are not public, making it impossible for California to replicate. Drug makers also say the ballot measure may leave people with less access to certain types of drugs, since the VA gets preferential prices by covering only some prescriptions. They also argue that regular folks could pay more because drug companies would shift more of their costs onto the commercial market — and even onto veterans. Drug makers argue that the discounts the VA receives would not be sustainable, if they were extended to a wider swathe of people. If the measure does somehow pass, Fairbanks said drug makers and others are likely to sue — which would cost taxpayers more money. It’s too early to say how much support either side is really drawing. But ballot initiatives are largely won or lost based on the marketing of a big idea, not the minutia of policy details. Only 30 to 40 percent of these measures typically succeed in California, and opponents’ best tactic is to just cast doubt on the cost or feasibility of an idea. “The status quo is really the default option for most people,” said Mark Baldassare, president and CEO of Public Policy Institute of California, which studies ballot measures. If history is any guide, the measure’s backers have a tough climb ahead. The pharmaceutical industry has handily squashed past state-based campaigns, including a 2005 California ballot initiative which would have created a state drug discount program for the poor.
Distributed data mining based on actors for Internet of Things The paper discusses an approach for distributed execution of data mining algorithms based on the actors model and the concept of the Internet of Things. The suggested approach allows us to decompose data mining algorithms into actors and execute them in the distributed environment. It provides data analysis both in centralized systems (cloud computing) and in distributed systems (fog computing) for IoT.
/** * Subscribe method. * * @param l * A log listener to be notify when new log entries arrives. */ public synchronized void addLogListener(LogListener l) { if (l == null) throw new IllegalArgumentException("LogListener can not be null"); if (!listeners.contains(l)) { listeners.addElement(l); } }
/** * Very simple donut chart without legend. * * Initial date: 17 mai 2022<br> * @author srosse, stephane.rosse@frentix.com, http://www.frentix.com * */ public class PieChartComponent extends DefaultD3Component { private static final ComponentRenderer renderer = new PieChartComponentRenderer(); private int layer = 0; private final List<PiePoint> series = new ArrayList<>(); private String title; private String subTitle; public PieChartComponent(String name) { super(name); setDomReplacementWrapperRequired(false); this.isDomReplacementWrapperRequired(); } public int getLayer() { return layer; } public void setLayer(int layer) { this.layer = layer; } public String getTitle() { return title; } public void setTitle(String title) { this.title = title; } public String getSubTitle() { return subTitle; } public void setSubTitle(String subTitle) { this.subTitle = subTitle; } public List<PiePoint> getSerie() { return series; } public void addPoints(PiePoint... points) { if(points != null && points.length > 0 && points[0] != null) { for(PiePoint point:points) { series.add(point); } } } @Override public void validate(UserRequest ureq, ValidationResult vr) { super.validate(ureq, vr); vr.getJsAndCSSAdder().addRequiredStaticJsFile("js/jquery/openolat/jquery.piechart.js"); } @Override public ComponentRenderer getHTMLRendererSingleton() { return renderer; } }
<reponame>Bossabossy/Strawry<filename>Strawry/control/GPIOtest.py<gh_stars>0 import time import RPi.GPIO as GPIO GPIO.setmode(GPIO.BCM) GPIO.setup(7,GPIO.OUT) GPIO.setup(8,GPIO.OUT) GPIO.setup(9,GPIO.OUT) GPIO.setup(10,GPIO.OUT) GPIO.setup(11,GPIO.OUT) pin=7 GPIO.output(pin, 1) time.sleep(1) GPIO.output(pin, 0) time.sleep(1) GPIO.output(pin, 1) pin=8 GPIO.output(pin, 1) time.sleep(1) GPIO.output(pin, 0) time.sleep(1) GPIO.output(pin, 1) pin=9 GPIO.output(pin, 1) time.sleep(1) GPIO.output(pin, 0) time.sleep(1) GPIO.output(pin, 1) pin=10 GPIO.output(pin, 1) time.sleep(1) GPIO.output(pin, 0) time.sleep(1) GPIO.output(pin, 1) pin=11 GPIO.output(pin, 1) time.sleep(1) GPIO.output(pin, 0) time.sleep(1) GPIO.output(pin, 1) time.sleep(5) GPIO.cleanup()
Efficient Geometric, Photometric, and Temporal Calibration of an Array of Unsynchronized Video Cameras Camera-arrays have become popular in many computer vision and computer graphics applications. Among all preprocessing steps, an efficient method to calibrate a large number of cameras is very much desired. The required calibration includes both the geometric and photometric calibration, which are the most common and also well studied for single camera. However, few existing efforts are devoted to camera arrays or to integrate both methods in a fully automatic way. Additionally, most existing camera array systems require or assume implicitly that all the cameras in the array are hardware-synchronized to simplify subsequent application-specific processing such as the calibration of all the cameras. While this constraint is useful, it greatly restricts the use of heterogeneous types of cameras and the configuration of cameras that could be used. In this paper, we propose a novel integrated and fully automatic solution for performing geometric, photometric and temporal calibration (synchronization) of an array of unsynchronized video cameras. In particular, our new method is based on the classic plane based calibration approach. By using a redesigned calibration pattern, the geometric, photometric and temporal calibrations are done in an integrated and extensible framework automatically. Extensive experimental results show that the new method is very easy to use and can achieve high accuracy in the calibrated parameters.
package com.alexfu.sqlitequerybuilder; import com.alexfu.sqlitequerybuilder.utils.AssertUtil; import org.junit.Assert; import org.junit.Test; import com.alexfu.sqlitequerybuilder.api.SQLiteQueryBuilder; public class DropTest { @Test public final void testDropTable() { // Arrange and Act String statement = SQLiteQueryBuilder .drop() .table("myTable") .ifExists() .toString(); // Assert AssertUtil.assertEqual(statement, "DROP TABLE IF EXISTS myTable"); } @Test public final void testDropTableNoIfExists() { // Arrange and Act String statement = SQLiteQueryBuilder .drop() .table("myTable") .toString(); // Assert AssertUtil.assertEqual(statement,"DROP TABLE myTable"); } @Test public final void testDropView() { // Arrange and Act String statement = SQLiteQueryBuilder .drop() .view("myView") .ifExists() .toString(); // Assert AssertUtil.assertEqual(statement,"DROP VIEW IF EXISTS myView"); } @Test public final void testDropViewNoIfExists() { // Arrange and Act String statement = SQLiteQueryBuilder .drop() .view("myView") .toString(); // Assert AssertUtil.assertEqual(statement,"DROP VIEW myView"); } @Test public final void testDropIndex() { // Arrange and Act String statement = SQLiteQueryBuilder .drop() .index("myIndex") .ifExists() .toString(); // Assert AssertUtil.assertEqual(statement,"DROP INDEX IF EXISTS myIndex"); } @Test public final void testDropIndexNoIfExists() { // Arrange and Act String statement = SQLiteQueryBuilder .drop() .index("myIndex") .toString(); // Assert AssertUtil.assertEqual(statement,"DROP INDEX myIndex"); } @Test public final void testDropTrigger() { // Arrange and Act String statement = SQLiteQueryBuilder .drop() .trigger("myTrigger") .ifExists() .toString(); // Assert AssertUtil.assertEqual(statement,"DROP TRIGGER IF EXISTS myTrigger"); } @Test public final void testDropTriggerNoIfExists() { // Arrange and Act String statement = SQLiteQueryBuilder .drop() .trigger("myTrigger") .toString(); // Assert AssertUtil.assertEqual(statement,"DROP TRIGGER myTrigger"); } }
A modular product multi-platform configuration model The product platform strategy has emerged as one of the important enablers of mass customisation where common components used across different product variants are maximised to decrease design, manufacturing and assembly cost. The Modular Product Multi-Platform (MPMP) model is introduced to co-design optimal product platforms along with the best number and combinations of corresponding product family members. The model obtains optimal solutions without using meta-heuristic techniques for large sets of products and components. It captures the different relations between the modular elements of a dynamic platform without a priori knowledge of the number of the changeable platforms and families. A large family of touch screen tablets is used to illustrate the application and advantages of the newly developed dynamic product platform design model. The model uses both assembly and disassembly to customise platforms into product variants which increases the commonality between modular products. Comparison between the MPMP model and the closest model in the literature shows that MPMP model is more efficient (36.674.3% faster) and more accurate as it can deal with zero demand products. The model computation time is affected more by number of product components than the number of products.
Effective Communication in a Diverse Workplace Due to globalization, todays work environment has become diverse as people from different cultures, religions and backgrounds have come together. They bring with them, diverse skills, knowledge and expertise. They have their own views, ideas, perceptions and opinions which are totally different from one another's as they all come from different walks of life. To succeed in a multicultural society, an organization must value the differences of its workforce, respect the individuality of all employees and maintain a climate in which everyone is treated with dignity. Everyone in business today needs to better understand other cultures, as well as other age groups, gender groups, and lifestyle groups. All this can happen only if there is an effective communication among all the employees, from top to bottom. This paper explains how important is it to have a diverse workforce, what difficulties could come in the way of effective communication in such diverse workplaces and how to overcome these difficulties in an effective manner.
In the digital age, organizations increasingly rely on digitally-stored data. To protect against data loss, an organization may use one or more backup systems to back up important data. Due to increasingly complex information technology infrastructures, an organization may create backups from a variety of sources, using a variety of methods, and according to a variety of different schedules. Accordingly, an administrator may face a proliferation of backup jobs to manage. In an attempt to facilitate the administration of backup jobs, traditional backup administration systems may allow an administrator to view a list of backup jobs configured to protect data within an enterprise. Unfortunately, traditional backup administration systems may be cumbersome to navigate, especially when an administrator needs information about the backup status of one or more resources within the enterprise. For example, in order to view backup jobs performed for a specific group of servers (e.g., pertaining to a department within the enterprise), an administrator may need to sort through a large list of backup jobs, either manually or using a list filtering system that may be over-complex or underpowered. For these reasons, managing backup environments using traditional backup administration systems may be difficult, time-consuming, and may introduce opportunities for oversight or other human error. Accordingly, the instant disclosure identifies and addresses a need for additional and improved systems and methods for navigating backup configurations.
MoMA as Educator: The Legacy of Alfred H. Barr, Jr. Sybil Kantors history of the intellectual origins of the Museum of Modern Art (MoMA) is an engaging account of Alfred Barr, Jr.s, pivotal role in acquainting an American audience with the modernist movement in art that had developed in Europe and the Soviet Union in the first part of the twentieth century. Scrupulously documented, Kantors narrative relies heavily on interviews with Barrs contemporaries, his publications, and his extensive correspondence. She also limns portraits of major figures who either influenced his ideas or were instrumental in establishing MoMA. Although the book is very informative about modern art, according to its author it is not intended to be strictly a history of that art. Rather, it is devoted to Barrs early development, his personal characteristics, his attitude toward the cultural and political climate of the times, his definitions of modernism and formalism, and finally, his legacy. Kantor traces Barrs career through the decade of the 1920s, which culminated in his being named the first director of MoMA in 1929; the expansive years of the 1930s and 1940s when, as Kantor puts it, Barr and the museum were at full throttle; and the later years when Barr was relieved of his directors responsibilities to allow him to spend more time on writing and the mounting of exhibitions. He retired from the museum in 1967, began to show signs of ill health in the mid-1970s, and died at the age of 79 in 1981.
# SPDX-License-Identifier: BSD-3-Clause # Depthcharge: <https://github.com/nccgroup/depthcharge> """ Built-in SecurityRisk definitions associated with networking functionality. """ from textwrap import dedent from .. import SecurityImpact _BUILTIN_DEFS = ( ('CONFIG_NETCONSOLE', True, { 'identifier': 'CONFIG_NETCONSOLE', 'summary': 'NetConsole functionality provides unauthenticated access to U-Boot over network', 'impact': SecurityImpact.ATTACK_SURFACE | SecurityImpact.INFO_LEAK | SecurityImpact.EXEC, 'description': dedent(""" U-Boot's NetConsole functionality allows the interactive U-Boot command line to be presented via a network interface, rather than a serial port (i.e. UART) interface. This UDP-based functionality is designed to operate in conjunction with netcat on the corresponding host. The corresponding traffic is unauthenticated and plaintext. Thus, while a helpful development tools, this functionality does not appear to be designed nor intended for use a production settings. Doing so greatly exposes the attack surface of the relevant platform and could allow a network-resident attacker to execute console commands supported in the U-Boot environment. More information about NetConsole functionality can be found in the upstream documentation: <https://source.denx.de/u-boot/u-boot/-/blob/master/doc/README.NetConsole> """), 'recommendation': dedent(""" Disable NetConsole functionality in production/release firmware builds via `CONFIG_NETCONSOLE`. In general, disable any networking functionality that is not required to fulfil functional requirements. For any networking functionality that is necessary and relied upon, consider reviewing it further to determine if it satisfies the platform's security requirements for confidentiality, integrity, authenticity, and availability. """) }), ('CONFIG_CMD_TFTPBOOT', True, { 'identifier': 'CVE-2018-18439', 'summary': 'An excessively large binary loaded via tftpboot can corrupt program memory', 'impact': SecurityImpact.WR_MEM, 'description': dedent("""\ Prior to U-Boot 2019.04-rc1, no size validation was performed on images retrieved over TFTP. Use of this functionality in the presence of a network-resident attacker could allow the attacker to supply a maliciously crafted image that is large enough to overwrite memory containing the running (post-relocation) U-Boot executable. In this threat scenario, the concern is that the memory corruption would lead to malicious code execution. Given that TFTP is an unauthenticated file transfer mechanism that could otherwise be attacked, this vulnerability is envisioned to be abused in situations where U-Boot's Verified Boot functionality is in use. More information can be found in this advisory: <https://www.openwall.com/lists/oss-security/2018/11/02/2> """), # Not recommending backporting individual changes at this time, given that # there's a bit of a gotcha with respect to ensuring that 9cc2323f is also included # when more than one DRAM bank is involved. 'recommendation': 'Update to U-Boot 2019.04.\n', 'affected_versions': ('0.0', '2019.01') }), ('CONFIG_NETCONSOLE', True, { 'identifier': 'CVE-2019-14192', 'impact': SecurityImpact.WR_MEM, 'summary': 'Memory corruption in NetConsole functionality can be triggered via maliciously crafted UDP packets', 'description': dedent("""\ A length argument passed to `nc_input_packet()` in `net/net.c` is obtained directly from a UDP packet, without first performing any validations. This can lead to an out-of-bounds memory write when the tainted length parameter is subsequently used in a `memcpy()` operation. This and related advisories are discussed in the following blog post: <https://securitylab.github.com/research/uboot-rce-nfs-vulnerability> """), 'recommendation': dedent("""\ Update to U-Boot 2019.10 or backport the fix from commit fe72880. Otherwise, disable NetConsole and any other unnecessary networking functionality. """), # None listed in advisory. # TODO: Go spelunking in git caverns to find actual minimum. 'affected_versions': ('0.0', '2019.07') }), # FIXME: It is PAINFUL to me that there's so much duplication here. # The vast majority of all of these NFS/UDP handler CVEs are the # same root cause/pattern, with fixes all landing in the next # release. I see no real value in having this split into multiple # identifiers, and am only doing it to stay consistent with the # excessive number of CVEs... # # Maybe bundle them up into a single meta-identifier for less noise? # Need to get some independent opinions on whether an identifier # like CVE-2019-14193..14204 makes sense to folks. ('CONFIG_CMD_NFS', True, { 'identifier': 'CVE-2019-14193', 'impact': SecurityImpact.WR_MEM, 'summary': 'Memory corruption can be triggered via maliciously crafted NFS messages', 'description': dedent(""" A length argument passed to `nfs_readlink_reply()` in `net/nfs.c` is obtained from a UDP packet field when `udp_packet_handler()` is invoked in `net/net.c`, which does not perform length validation. This leads to an out-of-bounds memory write, triggerable by a network-resident attacker, when the tainted length parameter is subsequently used in a `memcpy()` operation. This and related advisories are discussed in the following blog post: <https://securitylab.github.com/research/uboot-rce-nfs-vulnerability> """), 'recommendation': dedent("""\ Update to U-Boot 2019.10 or backport the fix from commit fe72880. Otherwise, disable NFS support and any other unnecessary networking functionality. """), 'affected_versions': ('0.0', '2019.07') }), ('CONFIG_CMD_NFS', True, { 'identifier': 'CVE-2019-14194', 'impact': SecurityImpact.WR_MEM, 'summary': 'Memory corruption can be triggered via maliciously crafted NFS messages', 'description': dedent("""\ In the `nfs_read_reply()` function found in `net/nfs.c`, a path length is obtained from a network packet and later used as the length parameter in a `memcpy()` invocation performed by the `store_block()` function, without sufficient validation. This results in an out-of-bounds memory write that is triggerable by a network-resident attacker. CVE-2019-14194 applies to the NFSv2 case, while CVE-2019-14198 applies to the NFSv3 case. This and related advisories are discussed in the following blog post: <https://securitylab.github.com/research/uboot-rce-nfs-vulnerability> """), 'recommendation': dedent("""\ Update to U-Boot 2019.10 or backport the fix from commit aa207cf3a. Otherwise, disable NFS support and any networking functionality that is not required. """), 'affected_versions': ('0.0', '2019.07') }), ('CONFIG_CMD_NFS', True, { 'identifier': 'CVE-2019-14195', 'impact': SecurityImpact.WR_MEM, 'summary': 'Memory corruption can be triggered via maliciously crafted NFS messages', 'description': dedent("""\ In the `nfs_readlink_reply()` function found in `net/nfs.c`, a path length is obtained directly from a network packet and used as the length parameter in a `memcpy()` invocation, without validation. This leads to an out-of-bounds memory write that is triggerable by a network-resident attacker, This and related advisories are discussed in the following blog post: <https://securitylab.github.com/research/uboot-rce-nfs-vulnerability> """), 'recommendation': dedent("""\ Update to U-Boot 2019.10 or backport the fix from commit cf3a4f1e. Otherwise, disable NFS support and any networking functionality that is not required. """), 'affected_versions': ('0.0', '2019.07') }), ('CONFIG_CMD_NFS', True, { 'identifier': 'CVE-2019-14196', 'impact': SecurityImpact.WR_MEM, 'summary': 'Memory corruption can be triggered via maliciously crafted NFS messages', 'description': dedent("""\ Insufficient path length validation is performed in `net/nfs.c` - `nfs_lookup_reply()` - prior to using `memcpy()` to copying data to into a global `filefh` buffer, using a length obtained from a received packet. This allows a network-resident attacker to corrupt memory. This and related advisories are discussed in the following blog post: <https://securitylab.github.com/research/uboot-rce-nfs-vulnerability> """), 'recommendation': dedent("""\ Update to U-Boot 2019.10 or backport the fix from commit 5d14ee4e. Otherwise, disable NFS support and any networking functionality that is not required. """), 'affected_versions': ('0.0', '2019.07') }), ('CONFIG_CMD_NFS', True, { 'identifier': 'CVE-2019-14197', 'impact': SecurityImpact.INFO_LEAK, 'summary': 'An out-of-bounds memory read can be induced via maliciously crafted NFS messages', 'description': dedent("""\ In `nfs_read_reply()` a `memcpy()` of data into a response buffer is performed without first validating that a source buffer contains enough data. This could allow a network-resident attacker to read outside the bounds of the source packet buffer. The location and potential contents of the "leaked" buffer depend upon the network driver in use. This and related advisories are discussed in the following blog post: <https://securitylab.github.com/research/uboot-rce-nfs-vulnerability> """), 'recommendation': dedent("""\ Update to U-Boot 2019.10 or backport the fix from commit 741a8a08. Otherwise, disable NFS support and any networking functionality that is not required. """), 'affected_versions': ('0.0', '2019.07') }), ('CONFIG_CMD_NFS', True, { 'identifier': 'CVE-2019-14198', 'impact': SecurityImpact.WR_MEM, 'summary': 'Memory corruption can be triggered via maliciously crafted NFS messages', 'description': dedent("""\ In the `nfs_read_reply()` function found in `net/nfs.c`, a path length is obtained from a network packet and later used as the length parameter in a `memcpy()` invocation performed by the `store_block()` function, without sufficient validation. This results in an out-of-bounds memory write that is triggerable by a network-resident attacker. CVE-2019-14194 applies to the NFSv2 case, while CVE-2019-14198 applies to the NFSv3 case. This and related advisories are discussed in the following blog post: <https://securitylab.github.com/research/uboot-rce-nfs-vulnerability> """), 'recommendation': dedent("""\ Update to U-Boot 2019.10 or backport the fix from commit aa207cf3a. Otherwise, disable NFS support and any networking functionality that is not required. """), 'affected_versions': ('0.0', '2019.07') }), ('CONFIG_NET', True, { 'identifier': 'CVE-2019-14199', 'impact': SecurityImpact.WR_MEM, 'summary': 'Memory corruption can be triggered via maliciously crafted UDP message', 'description': dedent(""" In `net/net.c`, the length argument passed to `udp_packet_handler` implementations may underflow, resulting in a large unsigned integer value. This can lead to memory corruption when the handler later uses this length to copy data. A network-resident attacker can induce this behavior with a maliciously crafted UDP packet. This and related advisories are discussed in the following blog post: <https://securitylab.github.com/research/uboot-rce-nfs-vulnerability> """), 'recommendation': dedent("""\ Update to U-Boot 2019.10 or backport the fix from commit fe728806. Otherwise, disable NFS support and any other unnecessary networking functionality. """), 'affected_versions': ('0.0', '2019.07') }), ('CONFIG_CMD_NFS', True, { 'identifier': 'CVE-2019-14200', 'impact': SecurityImpact.WR_MEM, 'summary': 'Stack memory corruption can be triggered via maliciously crafted NFS messages', 'description': dedent("""\ An unvalidated packet length (see CVE-2019-14199) is used in the NFS handler `rpc_lookup_reply()` when copying data. This can allow a network-resident attacker to corrupt stack memory via maliciously crafted UDP messages. This and related advisories are discussed in the following blog post: <https://securitylab.github.com/research/uboot-rce-nfs-vulnerability> """), 'recommendation': dedent("""\ Update to U-Boot 2019.10 or backport the fix from commit 741a8a08. Otherwise, disable NFS support and any other unnecessary networking functionality. """), 'affected_versions': ('0.0', '2019.07') }), ('CONFIG_CMD_NFS', True, { 'identifier': 'CVE-2019-14201', 'impact': SecurityImpact.WR_MEM, 'summary': 'Stack memory corruption can be triggered via maliciously crafted NFS messages', 'description': dedent("""\ An unvalidated packet length (see CVE-2019-14199) is used in the NFS handler `nfs_lookup_reply()` when copying data. This can allow a network-resident attacker to corrupt stack memory via maliciously crafted UDP messages. This and related advisories are discussed in the following blog post: <https://securitylab.github.com/research/uboot-rce-nfs-vulnerability> """), 'recommendation': dedent("""\ Update to U-Boot 2019.10 or backport the fix from commit 741a8a08. Otherwise, disable NFS support and any other unnecessary networking functionality. """), 'affected_versions': ('0.0', '2019.07') }), ('CONFIG_CMD_NFS', True, { 'identifier': 'CVE-2019-14202', 'impact': SecurityImpact.WR_MEM, 'summary': 'Stack memory corruption can be triggered via maliciously crafted NFS messages', 'description': dedent("""\ An unvalidated packet length (see CVE-2019-14199) is used in the NFS handler `nfs_readlink_reply()` when copying data. This can allow a network-resident attacker to corrupt stack memory via maliciously crafted UDP messages. This and related advisories are discussed in the following blog post: <https://securitylab.github.com/research/uboot-rce-nfs-vulnerability> """), 'recommendation': dedent("""\ Update to U-Boot 2019.10 or backport the fix from commit 741a8a08. Otherwise, disable NFS support and any other unnecessary networking functionality. """), 'affected_versions': ('0.0', '2019.07') }), ('CONFIG_CMD_NFS', True, { 'identifier': 'CVE-2019-14203', 'impact': SecurityImpact.WR_MEM, 'summary': 'Stack memory corruption can be triggered via maliciously crafted NFS messages', 'description': dedent("""\ An unvalidated packet length (see CVE-2019-14199) is used in the NFS handler `nfs_mount_reply()` when copying data. This can allow a network-resident attacker to corrupt stack memory via maliciously crafted UDP messages. This and related advisories are discussed in the following blog post: <https://securitylab.github.com/research/uboot-rce-nfs-vulnerability> """), 'recommendation': dedent("""\ Update to U-Boot 2019.10 or backport the fix from commit 741a8a08. Otherwise, disable NFS support and any other unnecessary networking functionality. """), 'affected_versions': ('0.0', '2019.07') }), ('CONFIG_CMD_NFS', True, { 'identifier': 'CVE-2019-14204', 'impact': SecurityImpact.WR_MEM, 'summary': 'Stack memory corruption can be triggered via maliciously crafted NFS messages', 'description': dedent("""\ An unvalidated packet length (see CVE-2019-14199) is used in the NFS handler `nfs_unmountall_reply()` when copying data. This can allow a network-resident attacker to corrupt stack memory via maliciously crafted UDP messages. This and related advisories are discussed in the following blog post: <https://securitylab.github.com/research/uboot-rce-nfs-vulnerability> """), 'recommendation': dedent("""\ Update to U-Boot 2019.10 or backport the fix from commit 741a8a08. Otherwise, disable NFS support and any other unnecessary networking functionality. """), 'affected_versions': ('0.0', '2019.07') }), )
/** * Create an ACH card reserve * * * * @throws ApiException * if the Api call fails */ @Test public void createAchCardReserveUsingPostTest() throws ApiException { AchCardReserveRequestCO cardReserveRequestCO = null; AchCardReserveResponseVO response = api.createAchCardReserveUsingPost(cardReserveRequestCO); }
/** * Executes a HTTP request and return a domain object * * @param request The HTTP request to execute. * @param Class<T> class of domain object to return * @return <T> T */ public <T> T executeRequest(HttpRequestBase request, Class<T> classType) { HttpResponse response = null; try { response = executeRequest(request); return getResponse(response, classType, getGson()); } finally { close(response); } }
from flask import Blueprint, request, render_template,flash, g, session, redirect, url_for # Define the blueprint: 'auth', set its url prefix: app.url/auth mod_auth = Blueprint('auth', __name__, url_prefix='/auth') # Set the route and accepted methods @mod_auth.route('/signin/') def signin(): #return "From controller" return render_template("auth/signin.html")
<gh_stars>0 package gregtech.api.util; import net.minecraft.entity.Entity; import net.minecraft.entity.player.EntityPlayer; import net.minecraft.util.Vec3; import net.minecraft.world.World; import java.util.Objects; import java.util.function.BiConsumer; import java.util.function.Consumer; @SuppressWarnings("unused") public abstract class WorldSpawnedEventBuilder implements Runnable { private static final String ILLEGAL_STATE_STR1 = "Position, identifier and world must be set"; /* Variables */ private World world; /* Getters, Setters */ public World getWorld() { return world; } public WorldSpawnedEventBuilder setWorld(World world) { this.world = world; return this; } /* Methodes */ @SuppressWarnings("unchecked") public <U extends WorldSpawnedEventBuilder> void times(int times, Consumer<U> action) { Objects.requireNonNull(action); for (int i = 0; i < times; i++) { action.accept((U) this); } } @SuppressWarnings("unchecked") public <U extends WorldSpawnedEventBuilder> void times(int times, BiConsumer<U, Integer> action) { Objects.requireNonNull(action); for (int i = 0; i < times; i++) { action.accept((U) this, i); } } /* Interfaces */ private interface IPositionedWorldSpawnedEvent { Vec3 getPosition(); IPositionedWorldSpawnedEvent setPosition(Vec3 position); IPositionedWorldSpawnedEvent setPosition(double x, double y, double z); } private interface IEntityWorldSpawnedEvent { Entity getEntity(); IEntityWorldSpawnedEvent setEntity(Entity entity); } private interface IEntityPlayerWorldSpawnedEvent { EntityPlayer getEntityPlayer(); IEntityPlayerWorldSpawnedEvent setEntityPlayer(EntityPlayer entity); } private interface IStringIdentifierWorldSpawnedEvent { String getIdentifier(); IStringIdentifierWorldSpawnedEvent setIdentifier(String identifier); } private interface ISoundWorldSpawnedEvent { float getPitch(); float getVolume(); ISoundWorldSpawnedEvent setPitch(float pitch); ISoundWorldSpawnedEvent setVolume(float volume); } /* Abstract Classes */ private abstract static class EntityWorldSpawnedEventBuilder extends WorldSpawnedEventBuilder implements IEntityWorldSpawnedEvent { private Entity entity; @Override public Entity getEntity() { return entity; } @Override public EntityWorldSpawnedEventBuilder setEntity(Entity entity) { this.entity = entity; return this; } } private abstract static class PositionedEntityWorldSpawnedEventBuilder extends EntityWorldSpawnedEventBuilder implements IPositionedWorldSpawnedEvent { private Vec3 position; @Override public Vec3 getPosition() { return position; } @Override public PositionedEntityWorldSpawnedEventBuilder setPosition(Vec3 position) { this.position = position; return this; } @Override public PositionedEntityWorldSpawnedEventBuilder setPosition(double x, double y, double z) { this.position = Vec3.createVectorHelper(x, y, z); return this; } } private abstract static class PositionedWorldSpawnedEventBuilder extends WorldSpawnedEventBuilder implements IPositionedWorldSpawnedEvent { private Vec3 position; @Override public Vec3 getPosition() { return position; } @Override public PositionedWorldSpawnedEventBuilder setPosition(Vec3 position) { this.position = position; return this; } @Override public PositionedWorldSpawnedEventBuilder setPosition(double x, double y, double z) { this.position = Vec3.createVectorHelper(x, y, z); return this; } } private abstract static class StringIdentifierPositionedWorldSpawnedEventBuilder extends PositionedWorldSpawnedEventBuilder implements IStringIdentifierWorldSpawnedEvent { private String identifier; @Override public String getIdentifier() { return identifier; } @Override public StringIdentifierPositionedWorldSpawnedEventBuilder setIdentifier(String identifier) { this.identifier = identifier; return this; } } private abstract static class SoundStringIdentifierPositionedWorldSpawnedEventBuilder extends StringIdentifierPositionedWorldSpawnedEventBuilder implements ISoundWorldSpawnedEvent { private float pitch; private float volume; @Override public float getPitch() { return pitch; } @Override public float getVolume() { return volume; } @Override public SoundStringIdentifierPositionedWorldSpawnedEventBuilder setPitch(float pitch) { this.pitch = pitch; return this; } @Override public SoundStringIdentifierPositionedWorldSpawnedEventBuilder setVolume(float volume) { this.volume = volume; return this; } } /* Implementations */ public static final class ParticleEventBuilder extends StringIdentifierPositionedWorldSpawnedEventBuilder { private Vec3 motion; public Vec3 getMotion() { return motion; } public ParticleEventBuilder setMotion(double x, double y, double z) { this.motion = Vec3.createVectorHelper(x, y, z); return this; } public ParticleEventBuilder setMotion(Vec3 motion) { this.motion = motion; return this; } @Override public ParticleEventBuilder setWorld(World world) { return (ParticleEventBuilder) super.setWorld(world); } @Override public ParticleEventBuilder setPosition(Vec3 position) { return (ParticleEventBuilder) super.setPosition(position); } @Override public ParticleEventBuilder setPosition(double x, double y, double z) { return (ParticleEventBuilder) super.setPosition(x, y, z); } @Override public ParticleEventBuilder setIdentifier(String identifier) { return (ParticleEventBuilder) super.setIdentifier(identifier); } @Override public void run() { if (getPosition() == null || getIdentifier() == null || getMotion() == null || getWorld() == null) throw new IllegalStateException("Position, identifier, motion and world must be set"); getWorld().spawnParticle( getIdentifier(), getPosition().xCoord, getPosition().yCoord, getPosition().zCoord, getMotion().xCoord, getMotion().yCoord, getMotion().zCoord ); } } public static final class SoundEffectEventBuilder extends SoundStringIdentifierPositionedWorldSpawnedEventBuilder { @Override public SoundEffectEventBuilder setWorld(World world) { return (SoundEffectEventBuilder) super.setWorld(world); } @Override public SoundEffectEventBuilder setPosition(Vec3 position) { return (SoundEffectEventBuilder) super.setPosition(position); } @Override public SoundEffectEventBuilder setPosition(double x, double y, double z) { return (SoundEffectEventBuilder) super.setPosition(x, y, z); } @Override public SoundEffectEventBuilder setIdentifier(String identifier) { return (SoundEffectEventBuilder) super.setIdentifier(identifier); } @Override public SoundEffectEventBuilder setPitch(float pitch) { return (SoundEffectEventBuilder) super.setPitch(pitch); } @Override public SoundEffectEventBuilder setVolume(float volume) { return (SoundEffectEventBuilder) super.setVolume(volume); } @Override public void run() { if (getPosition() == null || getIdentifier() == null || getWorld() == null) throw new IllegalStateException(ILLEGAL_STATE_STR1); getWorld().playSoundEffect( getPosition().xCoord, getPosition().yCoord, getPosition().zCoord, getIdentifier(), getPitch(), getVolume() ); } } public static final class SoundEventBuilder extends SoundStringIdentifierPositionedWorldSpawnedEventBuilder { private boolean proximity; public boolean isProximity() { return proximity; } @Override public SoundEventBuilder setWorld(World world) { return (SoundEventBuilder) super.setWorld(world); } @Override public SoundEventBuilder setPosition(Vec3 position) { return (SoundEventBuilder) super.setPosition(position); } @Override public SoundEventBuilder setPosition(double x, double y, double z) { return (SoundEventBuilder) super.setPosition(x, y, z); } @Override public SoundEventBuilder setIdentifier(String identifier) { return (SoundEventBuilder) super.setIdentifier(identifier); } @Override public SoundEventBuilder setPitch(float pitch) { return (SoundEventBuilder) super.setPitch(pitch); } @Override public SoundEventBuilder setVolume(float volume) { return (SoundEventBuilder) super.setVolume(volume); } public SoundEventBuilder setProximity(boolean proximity) { this.proximity = proximity; return this; } @Override public void run() { if (getPosition() == null || getIdentifier() == null || getWorld() == null) throw new IllegalStateException(ILLEGAL_STATE_STR1); getWorld().playSound( getPosition().xCoord, getPosition().yCoord, getPosition().zCoord, getIdentifier(), getPitch(), getVolume(), isProximity() ); } } /** * Positional Data is rounded down due to this targeting a block. */ public static final class RecordEffectEventBuilder extends StringIdentifierPositionedWorldSpawnedEventBuilder { @Override public RecordEffectEventBuilder setWorld(World world) { return (RecordEffectEventBuilder) super.setWorld(world); } @Override public RecordEffectEventBuilder setPosition(Vec3 position) { return (RecordEffectEventBuilder) super.setPosition(position); } @Override public RecordEffectEventBuilder setPosition(double x, double y, double z) { return (RecordEffectEventBuilder) super.setPosition(x, y, z); } @Override public RecordEffectEventBuilder setIdentifier(String identifier) { return (RecordEffectEventBuilder) super.setIdentifier(identifier); } @Override public void run() { if (getPosition() == null || getIdentifier() == null || getWorld() == null) throw new IllegalStateException(ILLEGAL_STATE_STR1); getWorld().playRecord( getIdentifier(), (int) getPosition().xCoord,(int) getPosition().yCoord,(int) getPosition().zCoord ); } } public static final class ExplosionEffectEventBuilder extends PositionedEntityWorldSpawnedEventBuilder { private boolean isFlaming, isSmoking; private float strength; public float getStrength() { return strength; } public ExplosionEffectEventBuilder setStrength(float strength) { this.strength = strength; return this; } public boolean isFlaming() { return isFlaming; } public ExplosionEffectEventBuilder setFlaming(boolean flaming) { isFlaming = flaming; return this; } public boolean isSmoking() { return isSmoking; } public ExplosionEffectEventBuilder setSmoking(boolean smoking) { isSmoking = smoking; return this; } @Override public ExplosionEffectEventBuilder setWorld(World world) { return (ExplosionEffectEventBuilder) super.setWorld(world); } @Override public ExplosionEffectEventBuilder setEntity(Entity entity) { return (ExplosionEffectEventBuilder) super.setEntity(entity); } @Override public ExplosionEffectEventBuilder setPosition(double x, double y, double z) { return (ExplosionEffectEventBuilder) super.setPosition(x, y, z); } @Override public void run() { if (getPosition() == null || getWorld() == null) throw new IllegalStateException("Position and world must be set"); getWorld().newExplosion(getEntity(), getPosition().xCoord, getPosition().yCoord, getPosition().zCoord, strength, isFlaming, isSmoking); } } /** * Positional Data is rounded down due to this targeting a block. */ public static final class ExtinguishFireEffectEventBuilder extends PositionedWorldSpawnedEventBuilder implements IEntityPlayerWorldSpawnedEvent { private int side; private EntityPlayer entityPlayer; public int getSide() { return side; } public ExtinguishFireEffectEventBuilder setSide(int side) { this.side = side; return this; } @Override public EntityPlayer getEntityPlayer() { return entityPlayer; } @Override public ExtinguishFireEffectEventBuilder setEntityPlayer(EntityPlayer entity) { this.entityPlayer = entity; return this; } @Override public ExtinguishFireEffectEventBuilder setWorld(World world) { return (ExtinguishFireEffectEventBuilder) super.setWorld(world); } @Override public ExtinguishFireEffectEventBuilder setPosition(Vec3 position) { return (ExtinguishFireEffectEventBuilder) super.setPosition(position); } @Override public ExtinguishFireEffectEventBuilder setPosition(double x, double y, double z) { return (ExtinguishFireEffectEventBuilder) super.setPosition(x, y, z); } @Override public void run() { if (getEntityPlayer() == null || getPosition() == null || getWorld() == null) throw new IllegalStateException("EntityPlayer, position and world must be set"); getWorld().extinguishFire(getEntityPlayer(), (int) getPosition().xCoord, (int) getPosition().yCoord, (int) getPosition().zCoord, side); } } public static final class SoundAtEntityEventBuilder extends EntityWorldSpawnedEventBuilder implements ISoundWorldSpawnedEvent, IStringIdentifierWorldSpawnedEvent { private float pitch; private float volume; private String identifier; @Override public String getIdentifier() { return identifier; } @Override public SoundAtEntityEventBuilder setIdentifier(String identifier) { this.identifier = identifier; return this; } @Override public float getPitch() { return pitch; } @Override public float getVolume() { return volume; } @Override public SoundAtEntityEventBuilder setPitch(float pitch) { this.pitch = pitch; return this; } @Override public SoundAtEntityEventBuilder setVolume(float volume) { this.volume = volume; return this; } @Override public SoundAtEntityEventBuilder setWorld(World world) { return (SoundAtEntityEventBuilder) super.setWorld(world); } @Override public SoundAtEntityEventBuilder setEntity(Entity entity) { return (SoundAtEntityEventBuilder) super.setEntity(entity); } @Override public void run() { if (getWorld() == null || getIdentifier() == null || getEntity() == null) throw new IllegalStateException("World, Identifier and entity must be set!"); getWorld().playSoundAtEntity(getEntity(), getIdentifier(), volume, pitch); } } public static final class SoundToNearExceptEventBuilder extends WorldSpawnedEventBuilder implements ISoundWorldSpawnedEvent, IStringIdentifierWorldSpawnedEvent, IEntityPlayerWorldSpawnedEvent { private float pitch; private float volume; private String identifier; private EntityPlayer entityPlayer; @Override public String getIdentifier() { return identifier; } @Override public SoundToNearExceptEventBuilder setIdentifier(String identifier) { this.identifier = identifier; return this; } @Override public float getPitch() { return pitch; } @Override public float getVolume() { return volume; } @Override public SoundToNearExceptEventBuilder setPitch(float pitch) { this.pitch = pitch; return this; } @Override public SoundToNearExceptEventBuilder setVolume(float volume) { this.volume = volume; return this; } @Override public SoundToNearExceptEventBuilder setWorld(World world) { return (SoundToNearExceptEventBuilder) super.setWorld(world); } @Override public void run() { if (getWorld() == null || getIdentifier() == null || getEntityPlayer() == null) throw new IllegalStateException("World, Identifier and EntityPlayer must be set!"); getWorld().playSoundAtEntity(getEntityPlayer(), getIdentifier(), volume, pitch); } @Override public EntityPlayer getEntityPlayer() { return entityPlayer; } @Override public SoundToNearExceptEventBuilder setEntityPlayer(EntityPlayer entity) { entityPlayer = entity; return this; } } }
The value and practical utility of intraoperative touch imprint cytology of sentinel lymph node(s) in patients with breast cancer: A retrospective cytology-histology correlation study Objective: Intraoperative evaluation of sentinel lymph nodes (SLNs) for patients with breast cancer is widely performed with frozen section (FS), cytology, or a combination of both. Touch imprint cytology (TIC) reportedly has an equivalent sensitivity to FS. We studied its diagnostic utility to detect SLN metastases. Materials and Methods: Cases of 367 patients with breast cancer who underwent intraoperative valuation of SLNs (507 LNs) were evaluated. All FS and corresponding TIC slides of SLNs of each case were reviewed microscopically for the presence of metastases of any size. If present, the metastatic focus was measured on the FS. Results: Of these 507 SLNs, 82 LNs (16.2%) from 69 women were found to have metastases in the FS and consisted of 5 LNs of isolated tumor cells, 15 of micrometastasis, and 62 of macrometastasis. TIC identified metastases in 69 of these 82 SLNs (sensitivity: 84.1%, specificity: 100%, and accuracy: 97.4%). All macrometastases could be detected by TIC, whereas TIC identified approximately 50% of micrometastases and none of isolated tumor cells. The size detection limit of metastatic foci, defined as the smallest dimension of metastasis detected without false negatives, was 2 mm. The smallest metastatic focus identified was 0.8 mm. Conclusions: TIC of SLNs is of great use given its negative predictive value of 100% for identification of macrometastasis in our study. For intraoperative evaluation of SLNs, based on our data, a practical two-step approach is proposed: SLN evaluation should be initially performed by TIC and then proceed to FS histological analysis only when cytologically positive to determine the size of metastatic focus. INTRODUCTION e prognosis of breast cancer is directly correlated with a quantitative nodal tumor burden, which is a continuous variable and is far more important than the simple presence or absence of nodal disease. More specifically, the number of metastatic foci, size of metastasis, and number of involved lymph nodes all contribute unfavorably to the patient's outcome. e stratification of nodal tumor burden is codified in the current AJCC/UICC N-staging system. patients with clinically negative axilla (cN0). Intraoperative determination of the SLN status provides the surgeon with crucial information on whether immediate axillary lymph node dissection (ALND) should be completed, avoiding second anesthesia. It is reported that women with microscopic metastatic SLNs (pN0 and pN1 mi or metastatic foci ≤2 mm) do not benefit from completion ALND in overall and disease-free survival and axillary recurrence rate. us, in the intraoperative SLN examination, it is of importance to determine the size of metastatic focus, more specifically, whether the metastasis is macrometastatic (>2 mm) or not. Intraoperative SLN evaluation is widely performed in a daily oncosurgical practice, and its methodology includes cytology (scrape, smear, or touch), histologic frozen section (FS), rapid immunohistochemistry, and rapid molecular biology (nucleic acid such as CK-19 messenger RNA amplification by reverse transcription-polymerase chain reaction). Of these, the most important and widely utilized methods are FS, touch imprint cytology (TIC), and a combination of both. TIC is less expensive, quicker, and easier to perform without losing valuable tissues compared to FS and reportedly has an equivalent sensitivity to FS tissue diagnosis for the intraoperative SLN evaluation in breast cancer. In this study, we investigated the diagnostic sensitivity and specificity of TIC and its utility to detect SLN metastases in patients with early breast cancer, primarily clinically significant ones. e size detection limit of metastatic foci was also assessed. Based on our data, we proposed a practical and efficient intraoperative approach to SLNs, best-utilizing TIC. MATERIALS AND METHODS A retrospective search for the pathology database files at Asahikawa Medical University Hospital, Asahikawa, Japan, revealed a total of 367 patients with breast cancer (cN0) who underwent intraoperative SLN examinations (507 lymph nodes), assessed by both FS and TIC, in a period between June 2015 and June 2018. Both FS and TIC slides of the SLNs from all these cases were retrieved and microscopically reviewed independently by two observers (YU and HT). All FS slides were examined for the presence or absence of metastasis of any size, and if present, the largest focus was measured and its histologic type was recorded (If metastatic deposits were multiple, the largest contiguous tumor deposit was measured). en, all TIC slides were also examined for the presence or absence of metastatic cancer cells. e discrepancy between the two observers was resolved by joint review. Based on these data, the diagnostic sensitivity, specificity, and accuracy of TIC to detect SLN metastases of any size as well as clinically significant ones were calculated, and identifiable minimal size of metastatic focus was also assessed. e size detection limit of metastatic foci, defined as the smallest dimension of metastasis detected by TIC without false negatives, was determined as well. In addition, the patient demographics and breast tumor characteristics of each case were recorded. At the time of intraoperative consultation, each SLN received was serially cross-sectioned in its entirety perpendicularly to its long axis with intervals of 2 mm. Touch imprint was taken from each cut surface and stained with rapid Papanicolaou. en, all of the sectioned tissue pieces were entirely processed for FS using Cryofilm  transfer method (Leica Biosystems). In this peculiar FS method, a 5-m-thick FS was prepared with a special attached adhesive film, together stained with H and E, transferred together to a glass slide, and mounted under a coverslip. is method allows for better sectioning and evaluation of fatty lymph nodes, especially their periphery, compared to the conventional FS method, leading to a possible increase in sensitivity to detect small metastasis. is study has been approved by the Institutional Review Board at Asahikawa Medical University (#18269). RESULTS A total of 507 SLNs from 367 patients (all women; average age: 59.6 years, range: 28-89 years) were examined intraoperatively during this 37-month study period. No male cases were present. e SLNs measured 5-28 mm in the greatest dimension. e number of SLNs examined per case ranged from 1 to 4 (mostly one). Of these 507 SLNs, 82 lymph nodes (16.2%) from 69 women were histologically found to have breast cancer metastases of any size in the FS examined and consisted of 5 nodes of isolated tumor cells (ITCs: metastatic focus ≤0.2 mm or <200 cells), 15 nodes of micrometastasis (0.2< ≤2 mm), and 62 nodes of macrometastasis (>2 mm) . In terms of histological types, 70 cases were invasive carcinoma of no special type (IC-NST), 9 invasive lobular carcinoma (ILC), and 3 other types of carcinoma (e.g., invasive micropapillary carcinoma). All of the micrometastasis and ITC cases were IC-NST, except for one case of micrometastasis, which was ILC. As shown in Table 1 and Figure 1, TIC could detect metastasis in 69 of these 82 SLNs (84.1%) with histologically identifiable metastases, whereas no false-positive cases (i.e., positive cytology but negative histology) were present (sensitivity: 84.1%, specificity: 100%, and accuracy: 97.4%). All of the macrometastases could be successfully detected by TIC (sensitivity and specificity: 100%). On the other hand, TIC could identify approximately 50% of micrometastases, which included one case of ILC micrometastasis (1 mm focus). None of ITCs were detected by TIC. e smallest size of metastatic focus detected by TIC was 0.8 mm (one case). e size detection limit of metastatic foci was determined to be 2 mm. DISCUSSION TIC is rapid, simple, inexpensive, and does not require sophisticated instruments or loose tissues. In surgical pathology practice, it is routinely used for intraoperative consultation as an adjunct to histopathology (FS). For an intraoperative SLN examination of breast cancer, TIC reportedly showed high concordance with FS in diagnosis. In our investigation, we could successfully detect all of macrometastatic foci by TIC (i.e., negative predictive value of 100% for cytological identification of macrometastasis). e lower size limit of detection, the smallest metastatic dimension that could be identified by TIC without false negatives (i.e., positive histology but negative cytology), was 2 mm. Moreover, TIC was sensitive enough to identify about half of micrometastatic foci, mostly of ≥1 mm, with the minimal size of detection of 0.8 mm . Recently, Bruzzone et al. conducted a similar study using intraoperative scrape cytology (ISC) of bisected SLNs and reported its sensitivity of 100% for macrometastasis, which is in keeping with our imprint-based results. On the other hand, their reported sensitivity for micrometastasis was much higher than ours (84% vs. 46.7%). For detection of macrometastasis in SLNs, there is probably no methodological difference between TIC and ISC, whereas for detection of micrometastasis, touch imprint might not be able to transfer fewer cancer cells to the slides as effectively as scraping even if multiple serially sectioned surfaces are touch imprinted. Cytological identification of metastatic LC in SLN is known to be challenging given its low-grade cytomorphology with some resemblance to surrounding lymphocytes and its tendency to lose cellular cohesiveness. In our study, nine cases of ILC (eight macrometastases and one micrometastasis) were found in 82 metastatic SLNs, and our TIC examination, with enough knowledge of the above-mentioned cytological characteristics, could successfully detect all of these LC cases. is is in keeping with the earlier study by Creager et al., in which there were no statistically significant differences in sensitivity, specificity, or accuracy for the intraoperative detection of LC versus ductal carcinoma in SLNs using TIC. Table 1: Number of sentinel lymph nodes and patients with and without metastasis, stratified by size of metastasis and detection rate by touch imprint cytology. SLNs without metastasis (patients) Histology Detected For breast cancer patients with microscopic metastatic spread (i.e., pN0 and pN1 mi) in SLN(s), completion ALND is usually not performed. e presence of macrometastasis, even one focus, in the SLN(s) traditionally mandated to complete ALND. is strategy is still followed as the standard of management in some countries such as Japan. However, novel study results of the multicenter American College of Surgeons Oncology Group Z0011 trial for patients with early-stage invasive breast cancer (cT1-T2 cN0), having one or two SLNs with macrometastasis, revealed noninferior rates of overall, disease-free, and locoregional recurrence-free survivals between ALND group and SLN biopsy alone group as long as they were treated with breast-conserving surgery followed by tangential adjuvant radiation. ese novel findings were adopted by the current NCCN guidelines. is novel study caused a drastic paradigm shift, and ALND is no longer necessary or needed for the majority of early breast cancer cases. Likewise, it is reported that cases of intraoperative SLN examination markedly declined after the Z0011 criteria were applied as a standard in axillary management. e intraoperative SLN examination most likely tends to be limited to cases that do not meet the Z0011 criteria. Regardless of the number of SLNs examined intraoperatively, accurate identification of macrometastasis, which is "clinically significant," is important. Moreover, it would be far more important in countries where the traditional axillary management strategy is still applied (such as Japan). In this setting, TIC is of great use given the NPV of 100% for cytological identification of macrometastasis in our study. Based on our results, a practical and efficient intraoperative two-step approach to the SLN(s) is proposed as follows: TIC is initially performed on each serially sectioned piece of LN(s) and is examined cytologically, and when it is negative, an intraoperative diagnosis of "no macrometastasis identified" can be rendered. When it is positive, FS should be proceeded, using only cytologically positive tissue piece(s), and the metastatic focus should be histologically measured since TIC is sensitive enough to detect about half of micrometastases. In this approach, TIC might be more effective than ISC since the former can detect fewer cases of micrometastasis (i.e., lower false positivity) albeit 100% sensitivity for macrometastasis in both methodologies. Given a low rate of SLN macrometastasis in early breast cancers, this strategy is easier to perform, quicker, and more cost-effective than the conventional FS examination. CONCLUSIONS We demonstrated a 100% sensitivity for macrometastasis in intraoperative SLN cytological examination with TIC, and utilizing this cytological method, proposed a practically useful intraoperative two-step strategy to examine the SLNs in breast cancer patients.
HIGH TEMPERATURE ELECTRONICS, COMMUNICATIONS, AND SUPPORTING TECHNOLOGIES FOR VENUS MISSIONS NASA Glenn Research Center is presently leading the development of electronics and sensors capable of pro­ longed stable operation in harsh 500°C environments. These technologies are being developed for engine envi­ ronments but also have Venus planetary exploration ap­ plications. This paper discusses these high temperature electronic and sensor technologies as well as their rele­ vance to Venus missions. A specific application describ­ ing a Venus instrument, a Venus Integrated Weather Sensor (VIWS) System, is described.
from django.shortcuts import render, redirect from pages.forms import FormularioPost from django.contrib import messages from django.db import connection from pages.models import Post from django.core.paginator import Paginator from bs4 import BeautifulSoup from urllib.request import urlopen from careerjet_api import CareerjetAPIClient import pymysql import psycopg2 import psycopg2.extras # import psycopg2.extras # Función principal def index(request): # listado_ofertas = Post.objects.all() # paginator = Paginator(listado_ofertas, 3) # pagina = request.GET.get("page") or 1 # ofertas = paginator.get_page(pagina) # pagina_actual = int(pagina) # paginas = range(1, ofertas.paginator.num_pages + 1) keyword = request.POST.get("keyword", default=None) location = request.POST.get("location", default=None) offer_list_view = None if keyword is not None and location is not None: api_offer_list = get_offers_api(keyword, location) #db_offer_list = get_offers_db(keyword, location) save_to_db(request, api_offer_list) offer_list_view = api_offer_list #list_view = db_offer_list + api_offer_list print(offer_list_view) return render(request, "oferta_empleo.html", {"offer_list_view": offer_list_view}) # Función para obtener datos de la api def get_offers_api(keyword, location): client = CareerjetAPIClient("es_ES") search_results = client.search({ 'location': location, 'keywords': keyword, 'sort': 'date', 'pagesize': '100', # 'page': '5', 'affid': '213e213hd12344552', 'user_ip': '172.16.17.32', 'url': 'http://www.example.com/jobsearch?q=dba&l=barcelona', 'user_agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) ' 'Chrome/86.0.4240.198 Safari/537.36 Edg/86.0.622.69 ' }) # Devolvemos la lista de resultados api_offer_list = [] for elm in search_results['jobs']: offer = { "title": elm.get('title', None), "company": elm.get('company', None), "salary": elm.get('salary', None), "sal_min": elm.get('salary_min', None), "sal_max": elm.get('salary_max', None), "publi_date": elm.get('date', None), "description": elm.get('description', None), "url": elm.get('url', None), "site": elm.get('site', None) } api_offer_list.append(offer) return api_offer_list # Función para obtener datos de la BD def get_offers_db(keyword, location): db = psycopg2.connect(dbname="remotejob", user="postgres", password="<PASSWORD>", host="localhost", port=5432) # preparar un objeto de cursor usando el método cursor() # cursor = db.cursor() cursor = db.cursor(cursor_factory=psycopg2.extras.DictCursor) try: # Devolvemos la lista de resultados # db_offer_list = [] cursor.execute("select * from oferta, provincia where oferta.nombre_ofrt = %s and provincia.nombre_prov %s", (keyword, location,)) result = cursor.fetchall() return result # db_offer_list.append(result) except: # Revertir en caso de que haya algún error db.rollback() # Guardar en BD def save_to_db(request, api_offer_list): db = psycopg2.connect(dbname="remotejob", user="postgres", password="<PASSWORD>", host="localhost", port=5432) # preparar un objeto de cursor usando el método cursor() cursor = db.cursor() # cursor = db.cursor(cursor_factory=psycopg2.extras.DictCursor) try: for offer in api_offer_list: # Ejecutar procedimiento almacenado SQL cursor.execute(f"insert into oferta (nombre_ofrt, empr_id, cat_id) values ({offer[0]}, '1', '1');") # Hacer el commit a la base de datos db.commit() messages.success(request, f"La oferta se ha guardado correctamente.") except: # Revertir en caso de que haya algún error db.rollback() # # Usamos esta función para agregar ofertas a nuestra BBDD # with connection.cursor() as cursor: # try: # for offer in offer_list: # # Ejecutar procedimiento almacenado MySQL # # Ejecutar procedimiento almacenado MySQL # cursor.execute( # "insert into empresa(nombre_empr, cif, email, site) values ('xibalba', 'W47601469', /" # "'<EMAIL>', 'https://www.xibalba.com');") # # cursor.execute(f"CALL sp_insert_data({offer[0]}, )") # row = cursor.fetchone() # # Hacemos el commit a la base de datos # connection.commit() # return row # except: # # Revertir en caso de que haya algún error # connection.rollback() # # def index(request): # # return HttpResponse('Hola bienvenid@ a TecnoJob') # return render(request, "searcher.html") # def offer_insert(request): # conn = psycopg2.connect(dbname="tecnojob00", user="postgres", password="<PASSWORD>", host="localhost", port=5432) # cursor = conn.cursor() # cursor.execute("insert into offer (title, salary, remote, publi_date, company_id, cat_id) values (%s, %s, %s, %s, %s, %s);", ('DBA', 10000, True, '2020-11-28', 4, 1)) # conn.commit() # cursor.close() # conn.close() # return HttpResponse('Registro Insertado') # # return render(request, "offer_create.html") # # # def offer_select(request): # conn = psycopg2.connect(dbname="tecnojob00", user="postgres", password="<PASSWORD>", host="localhost", port=5432) # cursor = conn.cursor() # cursor.execute("select * from offer;") # return HttpResponse(cursor.fetchall()) # # return render(request, "offer_read.html") # # # def offer_update(request): # conn = psycopg2.connect(dbname="tecnojob00", user="postgres", password="<PASSWORD>", host="localhost", port=5432) # return render(request, "offer_update.html") # # # def offer_delete(request): # conn = psycopg2.connect(dbname="tecnojob00", user="postgres", password="<PASSWORD>", host="localhost", port=5432) # return render(request, "offer_delete.html") def crear_oferta(request): try: with connection.cursor() as cursor: # Ejecutar procedimiento almacenado MySQL cursor.execute("insert into oferta(nombre_ofrt, descrip, salario, sal_min, sal_max, tcontr, periodo, /" "remote, url, site, fecha_publi, empr_id, cat_id) values('dba', '21000', '20000', '25000', /" "'media jornada', 'fin servicio', 'False', 'https://www.google.es', 'www.google.es', /" "'2020/07/12', '1', '1');") row = cursor.fetchone() # Hacemos el commit a la base de datos connection.commit() return row except: # Revertir en caso de que haya algún error connection.rollback() def crear_empresa(request, company_list): try: with connection.cursor() as cursor: # Ejecutar procedimiento almacenado MySQL cursor.execute("insert into empresa(nombre_empr, cif, email, site) values ('xibalba', 'W47601469', /" "'<EMAIL>', 'https://www.xibalba.com');") row = cursor.fetchone() # Hacemos el commit a la base de datos connection.commit() return row except: # Revertir en caso de que haya algún error connection.rollback() def crear_categoria(request, cat_list): try: with connection.cursor() as cursor: # Ejecutar procedimiento almacenado MySQL cursor.execute("insert into categoria (nombre_cat) values ('Base de datos');") row = cursor.fetchone() # Hacemos el commit a la base de datos connection.commit() return row except: # Revertir en caso de que haya algún error connection.rollback() def crear_post(request): if request.method == "POST": form = FormularioPost(request.POST, request.FILES) if form.is_valid(): post = form.save(commit=False) post.autor_id = request.user.id post.save() titulo = form.cleaned_data.get("titulo") messages.success(request, f"La oferta {titulo} se ha creado correctamente") return redirect("oferta") else: for msg in form.error_messages: messages.error(request, form.error_messages[msg]) form = FormularioPost() return render(request, "crear_oferta.html", {"form": form}) def crear_cv(request): pass def eliminar_post(request, post_id): try: post = Post.objects.get(pk=post_id) except Post.DoesNotExist: messages.error(request, 'La publicación que quieres eliminar no existe.') return redirect("blog") if post.autor != request.user: messages.error(request, 'No eres el autor de esta publicación.') return redirect("blog") post.delete() messages.success(request, f"El post {post.titulo} ha sido eliminado!") return redirect("blog") # if __name__ == '__main__': # res = get_offers('es_ES') # for offer in res: # print(offer[0]) # Definimos las variables para nuestras fuentes de URL # BASE_URL = "https://www.opcionempleo.com/" # regions_URL = "https://www.opcionempleo.com/ofertas-empleo-provincia-de-barcelona-34873.html" # Las listas que contendrán nuestras comarcas y ciudades # region_URL_list = [] # cities_name_list = [] # Nuestra función para buscar y crear la lista de comarcas # def getRegionLinks(regions_URL): # Obtener la fuente de la página URL de comarcas # html = urlopen(regions_URL).read() # Crear el objeto BeautifulSoup # soup = BeautifulSoup(html, "lxml") # Usamos un método Bs para encontrar la sección de enlaces # region_page = soup.find_all() # Luego un bucle para obtener los enlaces de todos las comarcas # y construimos cada enlace de lsa comarcas usando los relativos # for cities in region_page: # links = cities.findAll('a') # for a in links: # if a.text != '': # region_URL_list.append(BASE_URL + a['href']) # finalmente devolvemos la lista de URL de las comarcas # return region_URL_list # url = 'https://www.opcionempleo.com/ofertas-empleo-provincia-de-barcelona-34873.html' # page = requests.get(url) # soup = BeautifulSoup(page.content, 'html.parser') # # ciud = soup.find('a', a_attr='Granollers, Barcelona') # # print(ciud)
Delays to Surgery and Coronal Malalignment Are Associated with Reoperation after Open Tibia Fractures in Tanzania. BACKGROUND Treatment of diaphyseal open tibia fractures often results in reoperation and impaired quality of life. Few studies, particularly in resource-limited settings, have described factors associated with outcomes after these fractures. QUESTIONS/PURPOSES Which patient demographic, perioperative, and treatment characteristics are associated with an increased risk of reoperation after treatment of open tibia fractures with intramedullary nailing or an external fixation device in Tanzania? Which patient demographic, perioperative, and treatment characteristics are associated with worse 1-year quality of life after treatment of open tibia fractures with intramedullary nailing or an external fixation device in Tanzania? METHODS A prospective study was completed in parallel to a similarly conducted RCT at a tertiary referral center in Tanzania that enrolled adult patients with diaphyseal open tibia fractures from December 2015 to March 2017. Patients were treated with either a statically locked intramedullary nail or external fixator and examined at 2 weeks, 6 weeks, 3 months, 6 months, and 1 year postoperatively. The primary outcome, reoperation, was any deep infection or nonunion treated with a secondary intervention. The secondary outcome was the 1-year EuroQol-5D (EQ-5D) index score. There were 394 patients screened and ultimately, 267 patients enrolled in the study (240 from the primary RCT and 27 followed for the purposes of this study). Of these, 90% (240 of 267) completed 1-year follow-up and were included in the final analysis. This group comprised 110 patients who underwent IMN and 130 who had external fixation; follow-up was similar between study groups. Patients were an average of 33 years old and were primarily males who sustained road traffic injuries resulting in AO/Orthopaedic Trauma Association (OTA) classification type A or B fractures. There were 51 reoperations. For the purposes of analysis, all patients were pooled to identify all other factors, in addition to treatment type, associated with increased risk of reoperation and 1-year quality of life. An exploratory bivariable analysis identifying various factors associated with reoperation risk and EQ-5D was subsequently included in a multivariate modeling procedure to control for confounding of effect on our primary outcome. Multivariable modeling was performed using standard hierarchical modeling simplification procedures with log-likelihood ratios. Alpha levels were set to 0.05. RESULTS After controlling for potentially confounding variables such as gender, smoking status, mechanism of injury, and treatment type, the following factors were independently associated with reoperation: Time from hospital presentation to surgery more than 24 hours (odds ratio 7.7 [95% confidence interval 2.1 to 27.8; p = 0.002), AO/OTA fracture classification Type 42C fracture (OR 4.2 ; p = 0.02), OTA-Open Fracture Classification muscle loss (OR 7.5 ; p = 0.02), and varus coronal angle on an immediate postoperative AP radiograph (OR 4.8 ; p = 0.002). After again controlling for confounding variables such as gender, smoking status, mechanism of injury, and treatment type factors independently associated with worse 1-year EQ-5D scores included: Wound length ≥ 10 cm ( = -0.081 ; p = 0.006), OTA-Open Fracture Classification muscle loss ( = -0.133 ; p = 0.002), and OTA-Open Fracture Classification bone loss ( = -0.111 ; p = 0.03). We observed a modest, but independent association between reoperation and worse 1-year EQ-5D scores ( = -0.113 ; p < 0.001). CONCLUSIONS We found two potentially modifiable factors associated with the risk of reoperation: reducing time to surgical treatment and avoiding varus coronal angulation during definitive stabilization. Hospitals may be able to minimize time to surgery, and thus, reoperation, by increasing the number of available operative personnel and space and emphasizing the importance of open tibia fractures as an injury requiring emergent orthopaedic management. Given the lack of fluoroscopy in the study setting and similar settings, surgeons should emphasize appropriate fracture alignment, even into slight valgus, to avoid varus angulation and subsequent reoperation risk. LEVEL OF EVIDENCE Level II, therapeutic study.
Modeling resource management in cellular systems using Petri nets Modeling and analysis tools are essential for the design and evaluation of complex systems. This is particularly true for cellular systems, where, for instance, a variety of handoff, channel allocation, and data-transmission algorithms have been proposed. The capabilities of Petri nets (PNs) are used as a novel approach in the analysis of handoff, dynamic channel allocation (DCA), and cellular digital packet data resource management problems. The generalized stochastic PN (GSPN) models are obtained and analyzed as continuous-time Markov chains (MCs) derived from the reachability graphs. Solution of the MC results in performance indicators, which show the impacts of different algorithms on the system behavior.
// Connect tries to connect and starts the thread writing the buffer if // successful. It will return nil on connection refused even though no // connection is established. func (wr *NetWriter) Connect() error { var err error wr.Conn, err = net.Dial(wr.network, wr.address) if err != nil { if operr, ok := err.(*net.OpError); ok { err = operr.Err } if scerr, ok := err.(*os.SyscallError); ok { err = scerr.Err } if err == syscall.ECONNREFUSED { time.Sleep(500 * time.Millisecond) err = nil } } if err != nil { log.Printf("net_writer: connect(%s, %s): %v", wr.network, wr.address, err) return err } if wr.Conn != nil { wr.LocalIP = wr.Conn.LocalAddr().(*net.TCPAddr).IP.String() go wr.eofReader() } return nil }
Software testing is becoming increasingly important. Owners, operators, and users of computer software expect and demand high standards of reliability. If a software system, such as a business software application, experiences a failure, data integrity can be affected (e.g., data can be lost or corrupted). Typically, software testing involves testing of individual software components. For example, software testing can include testing a login screen for various combinations of good and bad login conditions. Software testing can also include testing a data entry form for proper operation using various combinations of input data. While software testing of individual software components can be a relatively straightforward task, software testing of multiple software components, or multiple applications, such as multiple components or applications of a complex software system, can be difficult. For example, software testing across multiple components or applications may require passing parameters between various testing steps. A software testing framework for testing a complex software system, such as a system comprising multiple components or applications, can be difficult to maintain. For example, in order to adequately test a complex software system, a large number of test scenarios may be required. Creating and modifying a large number of test scenarios can be time consuming and prone to errors. Therefore, there exists ample opportunity for improvement in technologies related to software testing of complex software systems.
Impact of the COVID-19 Crisis on the Working of Saudi Women, and her Role in Confronting Them Background: The COVID-19 epidemic has undoubtedly affected the working conditions of large segments of society. More specifically, a growing body of studies has raised the possibility that precautionary measures and closures, as a result of, the COVID-19 crisis could affect women and men working in different ways, mostly due to the traditional division of domestic work between the genders in Saudi society. Objective: In this study, we are trying to explore how the impact of the closure epidemic on domestic responsibilities and the work from home on men and women. Methods: The researchers developed a questionnaire to identify the impact of the closure on childcare, domestic chores, and the work environment within the home, and applied it to 370 faculty members and teachers, with an average age of (38.5±9.6). Results: The results indicated that there were statistically significant differences between men and women in childcare and domestic chores, which affected in favour of women. Additionally, the results indicated that the sample of students with children was significantly affected during the lockdown compared to peers without children. However, there were no differences between the faculty staff and the teachers on the dimensions of the questionnaire. In addition, there were no differences in the level of age over the questionnaire dimensions between them. Conclusion: Based on these results, the study recommended the importance of urging university officials and the Department of Education to provide a range of rescue and stimulus packages, including support to faculty members and female teachers by providing flexible working hours after the epidemic, part-time work arrangements, telecommuting, support during pregnancy, and parenting. In addition, they should take into account the disparity between women and men in domestic responsibilities when evaluating for scientific promotion or managerial positions. INTRODUCTION By late December 2019, the COVID-19 epidemic, which is so lethal, plagued the entire world in Wuhan city and rapidly worsened around the world in the first three months of 2020. 1,2 Therefore, The Kingdom of Saudi Arabia government has imposed a policy of strict quarantine and physical separation between its people through towns and cities. To, control the infection source, and reduce the spread of the epidemic in society. Likewise, implemented various emergency measures, including government agencies with employees working from home via the Internet. On March 8, 2020, The Ministry of Education announced that, under preventive and precautionary measures to control COVID-19, it was decided to suspend the study in all regions and governorates of the Kingdom and to activate the distance learning system during the suspension period. 3 MacIntyre, Gregersen state that online teaching is the dominant method of teaching now, as it replaced the traditional method of teaching. 4 As well as, the lack of external activities for children to vent all suppressed energy has increased the competition between siblings. The fact that the house was quarantined escalated the situation. As a result, children get out of their tension through tantrums and violent outbursts., marking an escalation of the crisis among parents. 5 In line with, Boretti despite unprecedented national measures to combat the spread of the disease which have contributed to reducing the increase in the rate of infection and fatality, as well as to reducing the prevalence of the epidemic within the Kingdom. 6 These precautionary decisions have had a significant impact on changing the daily lifestyle both within the family and the work within Saudi society. Furthermore, there was a significant negative impact on the level of job performance from home during the closure period. 7 Many recent studies have shown that the boundaries of work and family are becoming blurred, and the gender distribution of responsibilities within the family is becoming clearer. In the same place, Zamarro, Perez-Arce point out that gender inequality has worsened during closure. 11 More specifically, strike a balance between personal professional roles is a challenge for many women, especially working women, who had children who needed their attention. In particular, the global epidemic, COVID-19, has caused a lot of difficulties for women: health concerns for self and loved ones, social and material divergence, travel restrictions, closed borders, lack of daily necessities, work pressures, and demands. 4,12 Accordingly, Increase women's responsibilities as primary caregivers and as employees who need to work from home. This was previously described as the double burden or second shift, increasing demand for both family and work. 13 Once women have children and take care of their responsibilities, gender inequality is further strengthened. 10 This double burden is one of the obstacles to work-life balance where the negative impact between work and domestic duties has a significant effect on women. 14 Moreover, some studies have indicated that the boundaries of work and family are becoming blurred, and the gender distribution of responsibilities within the family is becoming clearer. 9,10 Besides, some recent studies point out that gender inequality has worsened during the quarantine period. 15,16 Adisa et al. suggest that if state governments do not undertake proactive interventions to reduce these consequences, the COVID-19 crisis and beyond will have many negative consequences for women and families for many years. 17 Likewise, Zhou indicates that many families need to raise and educate their children without the support of educational and educational institutions, which will put more pressure on mothers than men inside the house. 18 However, if parents do not increase their household contributions, the epidemic may exacerbate gender gaps in childcare and the burden of domestic work at the expense of women's work obligations. Furthermore, Thbaud et al. imply that women and men in some countries may assert that the domestic tasks, which should be performed will be equal for each, but men are likely to ignore these responsibilities, leaving them to the wife. 19 From this perspective, greater clarity in the distribution of childcare and domestic work responsibilities may not be a motivation for men to fulfill their homework responsibilities. Instead, the loss of childcare sup-port through educational institutions may increase women's unpaid domestic as well as job work, causing further disruption to their jobs and working lives. 20 This study assumes that actions resulting from the COVID-19 epidemic have increased the couple's time at home with family and children, while reducing time in paid work for many people. However, the main question is whether moving to work at home, home education and self-isolation hurt women more than men. For instance, Jessen and Waights report that working mothers combine their paid work with the care and education of their children during the COVID-19 crisis by working long hours in the evening. 21 In the same context, Andersen et al. express that the spread of COVID-19 has led women to devote more time to caring for and educating their children, while men remain relatively less affected. 22 In addition, Collins et al. point out that when examining a sample of couples from February to April 2020 in the United States of America, mothers with young children reduced their working hours from four to five times more than fathers. As a result, the gender gap in working hours had widened by 20-50 percent, which had a negative impact on women. 20 Likewise, while women were already doing most of the unpaid care work in the world before the emergence of the COVID-19 epidemic, emerging research suggests that the crisis and its post-closure response have significantly increased the burden on women. 8 In particular, women suffer a greater reduction in well-being than men during the crisis. According to other results, Andrew Set up that women bore the majority of overtime (childcare and domestic work) in Italy and the United Kingdom. 26 As well, Adams-Prassl state that women were more likely to lose jobs than men. 27 The authors argue that the epidemic had a clear impact on the parents' work and that women were more affected in their careers than men during the COVID-19 crisis. Therefore, we join this growing body of research in trying to illustrate gender differences in employment during the COVID-19 crisis. As far as we know, there are no published studies showing gender differences in job performance during the COVID-19 epidemic in Saudi Arabia. Thereby, this study aims to measure the degree of gender differences in the level of job performance during the COVID-19 epidemics in Saudi Arabia. Accordingly, the problem of the study could be formulated in the following main question: Are there differences between males and females in the level of job performance during the COVID-19 pandemic in Saudi Arabia? Study Design and Sample This study uses data from a CT survey conducted in Saudi Arabia, following the end of curfew and closure. More spe-cifically, the authors used an online questionnaire distributed through social media apps, and participants were encouraged to distribute the questionnaire. Participants received the request for a survey through WhatsApp groups of colleagues, family or friends, faculty, and teachers in Riyadh and Najran, Saudi Arabia. Informed approvals were obtained via the Internet before questions were followed up. In this case, informed consent offered two options of "yes," for those who volunteered to participate in the study, and "no," for those who did not want to participate. Only those who chose the positive answer were taken to the questionnaire page to complete the questionnaire. Respondents were clearly informed of the purpose and objectives of the study and they were free to withdraw at any time, without giving reasons, and all information and opinions provided would be anonymous and confidential. The study protocol was approved by the Board of Institutional Audit of Princess Nourabint Abdurrahman University in Riyadh. Surveys were completed by 380 responding parents. A total of 10 cases were excluded because the response was contrary to the attached instructions with the questionnaire, of the remaining 370 respondents, 244 (76%) were women, and 126 (34%) were men, with an average age of (38.5±9.6). as well as, 67% of faculty at Princess Noura and Najran Universities, 32% of teachers in Riyadh and Najran education. 86% have children, 89% work in the government sector. Questionnaire A questionnaire has been built to collect data by researchers after reviewing relevant literature. 7,8,17,22,25,26 The questionnaire consists of two main sections: Section I, collected information on the socio-demographic characteristics of respondents, including age, gender, marital status, level of education, and employment status. Section II, collected information on significant changes in domestic work and working conditions after closure, consisting of three dimensions: the first dimension, measures the impact of the work from home on performance during the COVID-19 crisis, it has 7 items. For example, I have to complete my job work at night when the boys go to sleep. The second dimension measures the usual role in doing the domestic work and caring for children, and it contains 5 items. For example, the COVID-19 crisis has greatly affected my habits in caring for my children. The third dimension measures the contribution to domestic work and childcare after the COVID-19 crisis and contains 6 items. For example, my contribution to domestic work takes more time after the COVID-19 crisis. The Likert 3-point scale was used (agree -neutral -disagree), the scores were distributed from 3 to 1, 1 to "disagree," 3 to "agree." The questionnaire was tested in terms of face, content, and constructiveness by an arbitration panel of 3 specialists in sociology and psychology. Instrument reliability was done using Cronbach's Alpha coefficient test, indicating high reliability of three dimensions (0.88, 0.92, 0.89, 0.91), total questionnaire (R = 0.90) Data analysis We applied descriptive and inferential statistics to analyze the data. The descriptive statistics included frequency, percentage, average, and standard deviation; these were analyzed using SPSS 21 (IBM., 2012). To address the research question, we conducted a univariate analysis to compare the differences between participants' characteristics based on the impact of the COVID-19 pandemic on their professional work. Table 1 shows the descriptive statistics of the main variables. A total of 370 parents from Riyadh and Najran participated in this study. The number of men was lower than the number of women, which was 126, with an average of (33.06%), and the number of women was 244, with an average of (66.94%). All of the sample members were employed in the field of education, both the 250 faculty members in universities, with an average of (66.94%), and the rest of the sample of teachers in general education schools, the age of the sample was divided into four levels, with the highest number in the sample aged 40-49, reaching 34.5% of the total sample. As well, the number of parents with children was more than the number of parents without children, parents with children accounting for 86.4% of the total sample. Table 2 shows how the closure period has affected the level of work performance of faculty staff and teachers differently for both men and women, as demonstrated by the responses to the three questionnaire dimensions. The first dimension, the performance has been affected by your work from home. The second dimension, your routine in doing your domestic work and caring for children. The third dimension, your contribution to domestic work. The gender variable in the three dimensions of the questionnaire is statistically significant, indicating that the COVID-19 crisis disproportionately affected the working conditions of female faculty members and teachers, compared to their male counterparts. In addition, faculty staff and teachers with children report that they were significantly affected during the closure period compared to peers without children. However, there were no differences between the faculty staff and the teachers on the dimensions of the questionnaire and no differences in the level of life of faculty staff and teachers. DISCUSSION The COVID-19 crisis caused radical changes in the working life of most parents within and outside the family. Therefore, many measures have been taken; its impact on Saudi society has been significant, such as closure, social exclusion, and self-isolation. According to the evidence, the impact of the epidemic on families with children in education was more severe, especially when educational institutions and childcare places were closed down. The impact on parents within the family working as teachers or faculty members may reasonably be expected to be uneven. Based on a survey of faculty staff and teachers in schools in Riyadh and Najran, Saudi Arabia, the gender gap in the impact of the COVID-19 crisis on the working conditions of academics has been notable and statistically significant. As well, the gap was worrying between teachers and faculty members with children compared to those without children. More specifically, the daily routine of female teachers and faculty with children has been disproportionately affected by the closure associated with the epidemic, as the burden on women has increased. Hence, these results largely correspond to the results of several studies indicating that mothers with young children have reduced their working hours from four to five times more than fathers work. As a result, the gender gap in working hours has widened by 20-50%. 8,10,21,26 These findings point to another negative effect of the COVID-19 epidemic, highlighting the challenges that pose to women's working hours and employment 20. Furthermore, these results are consistent with the results of other studies, which indicate that work in universities, where career advancement, depends on the number and quality of a person's scientific publications, is not essentially compatible with childcare. 10 In the same vein, Lutter and Schrder indicate that having children leads to a decrease in women's academic productivity compared to men's. 27 In this case, closing schools and caring for children means that children are at home, and need care, for at least six more hours a day. Mothers do less paid work two hours a day than fathers, but they do childcare work and domestic work within two more hours each. Accordingly, mothers combine paid work and other activities (almost childcare) in 47% of their working hours, compared to 30% of fathers' working hours. 22 Likewise, women had significantly reduced working time than fathers, especially those with primary school-age children or younger children at home, whose care and home education requirements are severe. 20 Our results provide strong support for recent research that has found similar gender gaps. 20 Our findings indicate that the traditional gender distribution of work within the family disproportionately affects men and women working as teachers and faculty members. Despite the results of our studies may not allow us to explain the causal mechanism at work, one of the most acceptable possibilities, consistent with the culture of our Arab societies, is that closure may have forced women teachers and faculty members to give priority to home care and child care responsibilities, to promote traditional gender roles in the home. 20 We believe that, under the current circumstances of the absence of a specific date for the normalization of life, through the normal return of students to their schools and universities, the gender gap in the perceived challenges of family care and work requirements is unlikely to fade soon, if there is a disruption of educational institutions as a result of the worsening of the epidemic in the coming months. Moreover, there were no differences between the faculty staff and the teachers, due to the similar working conditions of both faculty and teacher, as students are taught online, each with a school schedule and required teaching hours, and they have the same household tasks and responsibilities. Certainly, the increasing importance of distance learning by the Saudi Ministry of Education will require many faculty staff and teachers in educational institutions to reorganize their teaching strategy for Internet connectivity, which may come at the expense of faculty research activities or teacher promotion requirements. Thus, while it is too early to know the long-term consequences of this trend for faculty research activities or promotion requirements for teachers, the gender gap in perceived disorders in daily routines may translate into gender disparities in meeting fully occupational requirements. Future research that goes deeper into these possibilities may help us better understand how the COVId-19 epidemic affects families around the world. CONCLUSION Our paper adds to previous literature on gender equality, an important topic in the social sciences, and the COVID-19 crisis has highlighted a long-standing problem. More specifically, our Arab societies, as a result of, the culture of masculine society; the inequality faced by women, who often do more childcare and domestic work. We contribute to the literature by providing new studies illustrating the impact of the epidemic crisis on gender inequality in academia and education. According to the study results, female faculty members and teachers are unable to prepare promotion research or to promote as teachers to higher positions in a position of vulnerability compared to male faculty peers and teachers, as it is a justice issue that may expose women to higher unemployment or occupational risk in the future. We hope that our findings will increase awareness of this problem. Some measures can be taken to ensure that domestic responsibilities are balanced between spouses. As a result, universities and education departments could provide additional support, such as childcare support, to female faculty members and teachers whose research productivity or promotion may be disproportionately affected. Universities and education departments should take this disparity into account when evaluating for scientific promotion or managerial positions. Despite the advantages offered by remote work of the opportunity for parents to take care of their children while completing their professional tasks, on the other hand, remote work may have unintended consequences for gender inequality. Thus, educational institutions should take gender equality into account when designing and implementing telecommuting policies. We hope that the results of this research will encourage officials to view this vital issue in greater depth, and to provide full support to female faculty and teachers by providing more flexible working hours after the epidemic ends, parttime work arrangements, telecommuting, support during pregnancy, and parenting. Thereby, supporting work-life balance and the quality of its practices are crucial factors in facilitating women's quality work. However, the study has a few limitations. Firstly, since the study is CT, the results may not be generalizable to other professions. Secondly, the small sample size means that the results cannot be disseminated to all female faculty and teachers in Saudi Arabia and Arab communities. However, if we want to circulate it within Saudi Arabia and other Arab communities, it will be cautiously. Funding: We are thankful for funding from the Center for Promising Research in Social Research and Women's in Princess NourahbintAbdulrahman University in the Kingdom of Saudi Arabia in 2020.
/* <NAME> / @greenindia https://www.janasabuj.github.io */ #include <bits/stdc++.h> using namespace std; #define crap ios::sync_with_stdio(false);cin.tie(NULL);cout.tie(NULL) #define int long long int #define double long double typedef vector<int> vi; typedef vector<vector<int>> vvi; #define endl "\n" void print1d(const vector<int>& vec) {for (auto val : vec) {cout << val << " ";} cout << endl;} void print2d(const vector<vector<int>>& vec) {for (auto row : vec) {for (auto val : row) {cout << val << " ";} cout << endl;}} const int N = 105; int n, x; vi arr(N); int t[1000005]; int dp(int x) { // if (x < 0) return INT_MAX; if (x == 0) return 0; if (t[x] != -1) return t[x]; int coins = INT_MAX; for (int i = 0; i < n; ++i) { coins = min(coins, 1 + dp(x - arr[i])); } return t[x] = coins; } signed main() { crap; cin >> n >> x; for (int i = 0; i < n; ++i) { cin >> arr[i]; } memset(t, -1, sizeof(t)); int val = dp(x); cout << (val == INT_MAX ? -1 : val) << endl; return 0; }
Analysis of Trend of Studies on Microneedle Treatment System (MTS) Objectives The purpose of this study was to analyze the microneedle therapy system (MTS) and its research methods for the past 10 years in Korea. Methods Data on microneedle therapy system were collected using NDSL, KISS, RISS, and OASIS electronic databases from January 2010 to August 2021. microneedle, derma stamp, microneedle therapy system were used as the keywords. The present study, however, excluded data that were 1) unrelated to the microneedle therapy system, 2) from review/meta/protocol studies, and 3) from overseas studies. Data selected through the primary screening process, animal studies, case reports, and clinical data were included in the analysis. However, information data not related to the microneedle therapy system were excluded from the study. Results Among the MTS-related papers published from January 2010 to August 2021, 7 animal research, 2 clinical trials, and 10 case studies were published. Based on the research topics, there were 8 papers on skin improvement and skin diseases, 7 papers on hair growth and hair loss, 3 papers on stability, and 1 paper on peripheral facial paralysis. Conclusion Most of the studies related to MTS focused on skin, hair, and stability. The effect of MTS on hair growth and skin improvement has been confirmed, and it has been proven to have significant effects on the treatment of acne, acne scars, and hair loss in clinical practice. No serious side effects were observed during the MTS treatment, and the safety assessment confirmed that it was safe for use. INTRODUCTION MTS is a device designed to create multiple microscopic channels in the skin through mechanical holes wherein a 0.25-2.0 mm roller equipped with about 192-200 needles is rolled on the skin surface. It is a treatment method that uses the skin's own regenerative ability to regenerate new collagen. Initially, MTS was used as a method to increase drug delivery. However, studies have reported that it is effective in improving various scars and wrinkles just by mechanically making a hole in the skin without the application of active drug ingredients. MTS is a treatment that accelerates the cell replacement cycle and induces collagen production through skin stimulation. It can be said to originate from the method called skin acupunc-ture therapy (皮膚鍼療法 皮膚鍼療法, PIBUCHIMYOBEOB) in Oriental Medicine. As the average life expectancy increases, there is a growing desire to maintain a young and healthy life rather than simply extending the lifespan. Therefore, active research is also being conducted using MTS for the treatment of skin aging and hair loss. Therefore, we conducted this review to provide a basis for future MTS treatment and research by examining research trends using MTS published in Korea from January 2010 to August 2021. Data sources and searches Based on the electronic database for documents published from January 2010 to August 2021, this study was conducted using search terms such as "microneedle, " "derma stamp, " "microneedle therapy system" in NDSL, KISS, RISS, and OASIS. 2. Data selection (Fig. 1) By retrieving the data titles in the database from January 2010 to August 2021, the study was conducted on the original text with topics containing 1) microneedle, 2) microneedle therapy system, and 3) literature in domestic academic journals and the domestic dissertations. If the original text is selected, 1) original text unrelated to the microneedle therapy system, 2) original text related to clinical review/meta/protocol studies, 3) overseas studies, and 4) original texts that do not fall under 2010-2021 were excluded from the study. The following data were included through the primary screening process: 1) animal studies, 2) case reports, and 3) clinical data. From the above, 1) no informational data and 2) data unrelated to the microneedle therapy system were excluded from the study. Data analysis The frequency and distribution of each item are summarized in a table. If necessary, the overall distribution was plotted in a graph, and tables and graphs were prepared using Excel 2019 (Microsoft Corp., Redmond, WA, USA). RESULTS The number of papers analyzed in this study is as follows (Fig. 2). According to the type of paper, there were 7 animal research, 2 clinical trials, and 10 case studies (Fig. 3). Animal research The list of animal research for MTS is as follows ( Table 1). 1) Research animal Three papers were studied using hairless mice, two were studied using C57BL/6 mice, and two were studied using C57BL/6N mice. 2) Research purpose Of the seven papers, one paper researched the dermal proliferative effect and stability of MTS, two papers researched the stability of MTS, three papers researched the hair growth effect of MTS, and one paper researched the most optimal length and the cycle of MTS for hair growth. 3) Research method Kim et al. evaluated the dermal proliferative effect and stability of AMTS and AMTS-H by comparing MTS and AMTS and AMTS-H. Park et al. evaluated the stability of Disk type-Microneedle Therapy System (DTS) using the conventional MTS and DTS. Park et al. evaluated the stability of Digital Hand ® and Digital Pro ® by comparing DTS with automatized stamp type microneedles Digital Hand ® and Digital Pro ®. Lee et al. studied the hair growth effect by comparing the control group, the Minoxidil group, and the MTS group. Ju et al. studied the hair growth effect by comparing the control group, the Minoxidil group, the MTS group, and the MTS + Ganoderma lucidum extract (GLE) group. Kang et al. studied the hair growth effect by dividing the group into four groups as follows: the control group, the Minoxidil group, the MTS group, and the MTS + Hwangryeonhaedoktang Pharmacopuncture Solution (HRHDT) group. Kim et al. studied the hair growth effect according to the length and cycle of DTS. 4) Assessment methods Kim et al. evaluated efficacy after the biopsy using H&M staining, Masson trichrome staining, and Western blot analysis. Herein, the erythema was observed by visual inspection, and the transepidermal water loss (TEWL) and stratum corneum (SC) hydration were measured using the Tewameter, whereas the stability was evaluated by observing epidermal micropore recovery and checking the number and type of inflammatory cells by biopsy. Park et al. evaluated the stability by observing neutrophils, lymphocytes, and basophils using staining with hematoxylin and eosin after the biopsy and observing erythema by visual inspection. Park et al. evaluated the stability by visually observing erythema and needle marks; by observing TEWL using a Tewameter; and by observing inflammatory cell infiltration, desquamation of the stratum corneum, or disruption of the basal layer with biopsy. Lee et al. analyzed hair growth effect by visual observation using dermoscopy; by observation of hair follicles with hematoxylin and eosin staining after biopsy; and by testing antibody of bromodeoxyuridine (BrdU), fibroblast growth factor (FGF), and vascular endothelial growth factor (VEGF) by immunohistochemistry and PT-PCR. Ju et al. and Kang et al. evaluated the hair growth effect by visual observation after pulling out hair to observe the hair length. The density and diameter of the hair were observed using a Folliscope. After the biopsy, Western blot analysis was performed to confirm IGF-1 and VEGF. Hematoxylin and eosin staining was used to observe the size and number of hair follicles and the length of the hair root. Moreover, the RT-PCR was performed. In addition, the body weight and organ weight of mice were measured to check side effects and stress. Kim et al. observed hair growth and hair density using photographs and a Folliscope and evaluated the hair growth effect by performing RT-PCR and immunohistochemistry with biopsy samples. Clinical trials The list of clinical trials for MTS is as follows ( Table 2). 1) Research subject Lee et al. studied 25 women, whereas Koo studied 50 ordinary people, but among them, only 38 people were analyzed due to various reasons such as dropping out of the study. 2) Research and evaluation methods Lee et al. conducted a randomized, controlled, blinded split-face study. DTS treatment, namely DTS + Human Embryonic Stem Cells Conditioned Medium (hESC-EPC CM) 0.5 mL treatment, was performed on both left and right sides of the face. After repeated five treatment sessions at 2-week intervals, photographs were taken and self-assessment questionnaires were conducted. In addition, color changes were evaluated using a Mexameter, whereas collagen regeneration was evaluated using a Visiometer SV600. Koo performed the study into two groups: the skin toner + MTS group and the ginseng extract dissolved toner + MTS group. Then, after performing MTS treatment once a week for a total of three times, T zone oil change, pore change, a skin tone change, and pigmentation change were evaluated using an A-ONE Smart One-click automatic facial diagnostic device. Case studies The list of case studies for MTS is represented in Table 3. 1) Research disease and patient Out of a total of 10 case studies, one was for patients with acne, one for patients with aging skin, four for patients with acne scars, one for patients with peripheral facial paralysis, one for a pediatric patient with different types of alopecia, and two for patients with androgenetic alopecia (AGA). 2) Treatment and evaluation methods Seong et al. studied a total of 7 cases wherein they treated acne only with the Auto-Microneedle Therapy System (AMTS) without using any herbal medicine. A total of 7 treatments were performed once a week for 7 weeks. Changes in pores, wrinkles, pigmentation, sebum, porphyrin, and skin tone were evaluated with Janus facial diagnostics. The Korean Acne Grading System (KAGS) was classified and evaluated, and digital infrared thermographic imaging (DITI) was conducted. In addition, quality of life in various skin conditions was evaluated with Skindex-29, and satisfaction was evaluated using a questionnaire. Seong et al. conducted a retrospective study analyzing charts of patients treated from July 2009 to March 2010. Derma Stamp treatment was performed once a week for a total of five times. A total of 10 patients were analyzed. Among them, 2 cases received herbal medicines for symptoms such as general pain, coldness, and obesity, whereas the remaining 8 cases did not receive herbal medicines. For evaluation, a Janus facial tester was used to assess the changes in pores, wrinkles, pigmentation, sebum, porphyrin, and skin tone. In addition, satisfaction was evaluated using a questionnaire. Lee et al. treated acne scars with a Derma Microneedle Roller, a type of MTS, and scar regeneration acupuncture method. The evaluation was conducted using photographs, KAGS, Qualitative Global Acne Scarring Grading system (QGASC), and Cardiff Acne Disability Index in Korean (KCADI), whereas the satisfaction was evaluated using a questionnaire. Lee et al. studied patients with peripheral facial paralysis who were treated with AMTS two or more times on the affected face. In addition, acupuncture, pharmacopuncture, herbal medicine, electroacupuncture, intradermal acupuncture, and taping therapy were performed. Evaluation was conducted using the Yanagihara unweighted grading system, Sunnybrook Facial Grading System (SBGS), and Facial Disability Index (FDI). After treatment, stability evaluations such as needle pain and fatigue were also performed. Ju et al. performed MTS treatment on pediatric patients with complex pediatric alopecia in the frontal and temporal regions. In addition to MTS treatment, herbal medicine, acupuncture treatment on the lesion site, pharmacopuncture, scalp care, high-frequency treatment, low-frequency treatment, and cupping were performed. The treatment effect was observed using photographs. Heo et al. treated six patients with acne scars using AMTS and scar reconstruction therapy but did not use other drugs or treatments along with it. Efficacy was evaluated based on photographs and QGASC. A patient satisfaction survey was also conducted. Kim et al. studied 27 patients with acne scars who had previously visited the hospital and completed 12 procedures for the treatment of acne scars. The treatment with AMTS, Subcision + Beevenom Pharmacopuncture, AMTS, and Beevenom Pharmacopuncture was followed in the given order as one set, and a total of three sets were repeated for 12 weeks. No other medications or treatments that would affect acne were administered during the treatment period. Evaluation was conducted using KAGS, Echelle d' evaluation Clinique des Cicatrices d' Acne (ECCA), and patient satisfaction survey, and side effects were observed. Lee et al. studied patients with acne scars from March 2015 to September 2015 and treated them for a period of 5-10 months using AMTS, Subcision, and Hwangryeonhaedoktang Pharmacopuncture. The total number of treatments ranged from less than 5 to more than 10. According to the treatment method, 25 people were assigned to the MTS group, 27 people to the Subcision group, 20 people to the MTS + Subcision group, and 23 people to the Subcision + pharmacopuncture group. The evaluation was conducted using KAGS and QGASC, and side effects and adverse reactions were observed. Ro et al. analyzed the therapeutic effect based on the depth of microneedle in patients with AGA. The MTS depth was 0.5 mm and 0.3 mm on the right side and the left side of the scalp, respectively. Each patient received six treatments at 2-week intervals for a period of 3 months. For the evaluation, hair density and hair diameter were observed with a phototrichogram, and photographs were used for the analysis. Ro et al. studied the effects of GFC including fibroblast growth factor 9 (FGF9) on patients with AGA. GFC with FGF9 + MTS treatment was performed on the right side of the scalp, whereas MTS + Normal saline treatment was performed on the left side of the scalp. Each patient received six treatments at 2-week intervals for a period of 3 months. In the evaluation, hair density and hair diameter were observed with a phototrichogram, and photographs were used for the analysis. MTS is a treatment method that regenerates the skin by making numerous microholes in the skin and using a natural wound healing mechanism along with the medicinal effects of the transdermal drug delivery system (TDDS). Studies have been reported that MTS is effective in improving various scars and wrinkles just by making a microhole mechanically, which helps the drug to penetrate the skin more effectively by creating an effective TDDS. Among various drug delivery systems (DDS), the main advantages of TDDS through the skin are that it has less toxicity to the human body, does not cause gastrointestinal disturbances, and does not have any primary metabolic process in the liver (first-pass effect) compared to oral or vascular DDS. However, despite these advantages, TDDS has several limitations to over-come. First, there is a limit to the concentration of drugs that can be delivered through the skin. Most of the drugs used in TDDS should be fat soluble and of low molecular weight. Recently, various methods have been researched to overcome the limitations of TDDS. Among them, MTS is used in TDDS for the treatment of hair loss, hyperhidrosis (using botulinum toxin), and melasma (using vitamins). Accordingly, we conducted the present review to help future treatment and research using MTS by looking at research trends using MTS that have been published in Korea from January 2010 to August 2021. According to the type of study, we analyzed 7 animal research, 2 clinical trials, and 10 case studies. According to the research topics, there were 8 papers on skin improvement and skin diseases, 7 papers on hair growth and hair loss, 3 papers on stability, and 1 paper on peripheral facial paralysis. In the animal research, in an experiment, hairless mice were used to determine the dermal proliferative effect or stability and C57BL/6 and C57BL/6N mice were used to determine the hair growth effect. Kim et al. carried out the study into four groups: the control group, the MTS group, the AMTS group, and the AMTS-H group and evaluated the dermal proliferative effect and stability of AMTS and AMTS-H with biopsy, visual inspection, and Tewameter. In the AMTS and AMTS-H groups, a significant increase in dermal thickness and dermal density and an increase in procollagen-1 were confirmed. In addition, it was confirmed that all micropores of the epidermis were closed 1 h after the treatment and that no stability problems related to treatment, such as erythema, were found. Park et al. evaluated the stability of DTS by carrying out a biopsy and visual inspection by dividing it into three groups such as control group, MTS group, and DTS group. In all groups, no significant skin inflammatory reactions or other abnormal signs other than mild erythema were observed; no significant infiltration of neutrophils, lymphocytes, or basophils was observed; and no exfoliation of the stratum corneum or destruction of the epidermis was observed. Park et al. performed the study in four groups as follows: the control group, the Digital Hand ® group, the Digital Pro ® group, and the DTS group to observe the stability based on visual inspection, Tewameter, and biopsy. Stability was confirmed due to the absence of inflammatory cell infiltration, desquamation of the stratum corneum, or disruption of the basal layer and the absence of erythema and needle marks on the face on the next day after the procedure. Because Digital Hand ® and Digital Pro ® are stamp methods, they have the advantages of having accurate power, are more comfortable due to their ease of insertion into the skin, and are judged to avoid the minor abrasion often caused by microneedle rollers. Lee et al. divided the group into three groups: the control group, the Minoxidil group, and the MTS group. They used dermoscopy, biopsy, immunohistochemistry, and RT-PCR to examine the hair growth effect, wherein they confirmed an increase in hair density, hair diameter, and hair growth. Ju et al. performed the study in four groups as follows: the control group, the Minoxidil group, the MTS group, and the MTS + GLE group and analyzed the hair growth effect by visual inspection, Folliscope, biopsy, and RT-PCR. Nonetheless, the stress and side effects were observed by measuring the body weight and organ weight of experimental mice. The promotion of hair growth such as improvement in the hair density, thickness, and length was observed in the Minoxidil group, the MTS group, and the GLE + MTS group, wherein no stress or side effects were observed. The GLE + MTS group was found to be more effective in hair growth than the MTS group. Kang et al. divided the group into four groups: the control group, the Minoxidil group, the MTS group, and the MTS + HRHDT group. They analyzed the hair growth effect using visual inspection, Folliscope, biopsy, and RT-PCR and measured the weight and organ weight of mice to determine stress and side effects. The hair growth effect was significantly increased in the Minoxidil group, the MTS group, and the MTS + HRHDT group, wherein no stress or side effects were observed. The MTS + HRHDT group was found to be more effective for hair growth than the MTS group. Kim et al. observed the optimal length and cycle of DTS on hair growth by observing the length and cycle of optical DTS showing the hair growth effect with photographs, Folliscope, RT-PCR, and immunohistochemistry by varying the length and cycle of DTS. It was confirmed that the optimal length and cycle of DTS on hair growth was 0.25 mm/10 cycles and 0.5 mm/10 cycles. In two clinical trials by Lee et al., randomized, controlled, blinded split-face studies, skin improvement was mainly studied. Lee et al. performed DTS treatment and DTS + hESC-EPC CM 0.5 mL treatment on the left and right sides of the face, respectively. Using photographs, a self-assessment questionnaire, Mexameter, and Visiometer, a significant improvement in pigmentation and wrinkles was confirmed in the DTS + hESC-EPC CM group than that in the DTS group. Koo carried out a study in the skin toner + MTS group and the ginseng extract dissolved toner + MTS group and confirmed a significant improvement in T zone oil reduction, pore reduction, and pigmentation in the ginseng extract dissolved toner + MTS group using the ONE Smart One-click automatic facial diagnostic device. There were a total of 10 case studies, 4 of which were the most studied cases for patients with acne scars, and 2 other cases were the next most common for patients with AGA. There was one case report each for patients with acne, aging skin, peripheral facial paralysis, and pediatric complex type hair loss. Seong et al. confirmed the improvement in pore size, wrinkles, spots, sebum, porphyrin, and skin tone using the Janus facial diagnostic machine after treating acne using ATMS in 7 patients who visited the hospital for acne. The improvement of acne was confirmed based on KAGS and Skindex-29 outcomes. Seong et al. is a retrospective chart study of 10 patients who visited the hospital for aging skin. After treatment for aging skin using Derma Stamp, improvement in pore size, wrinkles, spots, sebum, porphyrin, and skin tone was confirmed using Janus facial diagnostics. Although the treatment effect of the oily skin was superior to that of the dry skin in all aspects, treatment satisfaction was confirmed to be higher in dry skin patients. However, in this study, the average re-evaluation period was 129.8 days and the difference between the last treatment date and the last facial examination was 7-362 days, which was irregular. Lee et al. treated acne scars with the Derma Microneedle Roller and scar regeneration acupuncture method for five patients with acne scars and confirmed the improvement in acne scars using photographs, KAGS, QGASC, and KCADI. Lee et al. studied 27 patients with peripheral facial paralysis and treated the affected face using an MTS. In addition, acupuncture, pharmacopuncture, herbal medicine, electroacupuncture, intradermal acupuncture, and taping therapy related to peripheral facial paralysis were performed. The stability and improvement in the Yanagihara unweighted grading system, SBGS, and FDI were confirmed. It was noted that the AMTS combination treatment in peripheral facial paralysis could lead to clinical improvement, wherein the absence of a control group was considered as the limitation of the study. Ju et al. performed MTS treatment along with herbal medicine treatment, lesion site acupuncture treatment, herbal acupuncture treatment, scalp care, etc. for pediatric patients with complex pediatric hair loss in the frontal and temporal regions. The www.journal-jop.org improvement of the lesion site was confirmed by photographs. Heo et al. confirmed the improvement in acne scars based on photographs and QGASC by treating six patients with acne scars using AMTS and scar reconstruction. Kim et al. targeted 27 patients who visited the hospital with acne scars and had completed 12 treatments. After 12 weeks of treatment by repeating a total of 3 sets in order, significant improvement in acne scars was confirmed based on KAGS and ECCA. Lee et al. divided patients with acne scars into 25 people in the MTS group, 27 people in the subcision group, 20 people in the MTS + Subcision group, and 23 people in the Subcision + pharmacopuncture group and evaluated the treatment effect according to the treatment method for acne scars using KAGS and QGASC as a standard, and improvement in acne scars was confirmed in all groups, wherein the treatment period was significantly shorter in the MTS + Subcision group. Ro et al. divided the 11 cases of patients with AGA into groups treated with MTS to the depth of 0.5 mm and 0.3 mm on the right side and on the left side of the scalp, respectively. They checked the hair density and hair diameter using a phototrichogram. A significant improvement in hair density and hair diameter was confirmed in both groups. It was also confirmed that the group treated with MTS to a depth of 0.5 mm showed a more significant effect on hair density than the group treated with MTS to a depth of 0.3 mm. Ro et al. performed MTS + GFC with FGF9 treatment and MTS + Normal saline treatment on the right side and on the left side of the scalp of 22 patients with AGA, respectively, and checked their hair density and hair diameter with a phototrichogram. Significant improvement in the hair density and hair diameter was confirmed in both the MTS + GFC with FGF9 treatment group and the MTS + Normal saline treatment group. As such, most of the studies using MTS, including the safety evaluation, focused on skin diseases such as acne scars, acne, aging skin, and hair loss. So, the scope of the studies using MTS was limited. In addition, in most case studies, MTS was combined with other treatments. However, a limitation was that the number of clinical trials was small. Therefore, we think that various types of research are needed along with the active use of MTS in various fields, such as in skin diseases and hair loss. CONCLUSION 1. As a result of analyzing the studies using MTS according to the study type, there were 7 animal research, 2 clinical trials, and 10 case studies. 2. As a result of analyzing research using MTS according to the study topic, there were 8 papers related to skin improvement and skin diseases, 7 papers related to hair growth and hair diseases, 3 papers about stability, and 1 paper about peripheral facial paralysis. 3. Among the animal research, 1 of them studied the dermal proliferative effect and stability of MTS, 2 of them studied the stability of MTS, 3 of them studied the hair growth effect of MTS, and 1 of them studied the optimal length and cycle of MTS for the hair growth effect. 4. Clinical trials mainly studied the skin improvement effect. 5. Of the case reports, there were 4 patients with acne scars; 2 patients with AGA; and 1 patient each with acne, acne, aging skin, peripheral facial paralysis, and pediatric complex type hair loss. In the case studies, in addition to MTS, various treatments such as subcision, herbal medicine, acupuncture, pharmacopuncture, etc. were used concurrently. 6. MTS treatment showed a hair growth effect and skin improvement effect in most studies. Moreover, significant effects were confirmed in the treatment of acne, acne scars, and hair loss. No serious side effects were observed, and most of the erythema and needle marks disappeared the next day of the treatment. Safety assessment confirmed that MTS was safe for use.
Roughly three and a half hours into the hearing, after Gravert had completed her testimony, Assemblywoman Eloise Gomez Reyes, D-San Bernardino, noted that the Assembly probably hadn’t entered into any nondisclosure agreements. If it wasn’t clear before, it suddenly become obvious how the Legislature, ever so progressive and always ready to tell the rest of us how to live, has managed to tap dance around policing itself against the most basic of workplace hazards: creeps who would lord positions of power over underlings in service of their own needs. Leaders don’t ask and they make it hard for women to tell. Assembly Speaker Anthony Rendon deserves acknowledgment for allowing the hearing to take place, albeit six weeks after 140-plus women signed a #MeToo letter detailing indignities they have endured as they worked as staffers, lobbyists and consultants. Assemblywoman Laura Friedman, D-Glendale, did an admirable job of chairing the hearing. But whatever credit may be due California’s majority party for starting to come clean, ever so gingerly, on Tuesday about this ongoing and expensive problem, it pales next to the contribution of women who have been leading the #WeSaidEnough campaign in California’s Capitol, and testified about the harassment they and others have endured. They’re the young women who work in the Capitol one day and then don’t. Men who harass them understand that the best way to cover their tracks is “to get rid of the evidence” – evidence being the victim of unwanted advances and worse. Women quit because they become disgusted and fearful, or because their bosses find problems with their work and force them out. “We are so aware of how powerless we are,” Lopez said. Though not part of her public testimony, Lopez told the New York Times that a legislator in 2016 masturbated in front her in a restroom of a Capitol-area bar. She hasn’t named the legislator, and no legislator at the hearing questioned her about it. They didn’t want details to be publicly disclosed, evidently. Nor did any legislator question Democratic Party women’s caucus chair, Christine Pelosi, when she testified that there are “rapists” and “molesters” who work in the Capitol. If there are, police ought to be informed. Public safety is at stake. Alicia Lewis, former Senate staffer and chief of staff to an Assembly member, testified that the system “shields the perpetrator first.” Staffers, she said, “don’t believe” process works. For good reason. Gravert told the committee that there has been no investigation of an Assembly member for harassment during her three years on the job. No word on whether the Assembly is investigating Lopez’s accusation about the masturbating legislator. The Assembly, she said, has records of eight investigations that occurred before she arrived. But the Assembly policy is to purge records after six years, astonishingly, given that members can serve a dozen years and generally run for other offices. Assemblyman Vince Fong, R-Bakersfield, asked a basic question: “How many complaints in totality” have there been against members of the Assembly or senior staffers? “We don’t track complaints. We only track investigations,” Gravert answered. “Isn’t that problematic?” Fong asked. Yes, it is. But the reason it has remained the policy is obvious: The Assembly leadership doesn’t count complaints because it doesn’t want to know. It doesn’t provide the public access to details of investigations because it doesn’t want voters to know. And it’s time for that way of doing business to stop.
package org.swrlapi.drools.owl.classes; import org.checkerframework.checker.nullness.qual.NonNull; import org.checkerframework.dataflow.qual.SideEffectFree; import org.swrlapi.builtins.arguments.SWRLClassExpressionBuiltInArgument; import org.swrlapi.drools.extractors.DroolsSWRLBuiltInArgumentExtractor; import org.swrlapi.drools.owl.core.DroolsTernaryObject; import org.swrlapi.drools.owl.properties.OP; import org.swrlapi.exceptions.TargetSWRLRuleEngineException; /** * This class represents an OWL object minimum cardinality class expression in Drools. * * @see org.semanticweb.owlapi.model.OWLObjectMinCardinality */ public class OMinCCE extends DroolsTernaryObject<String, OP, Integer> implements CE { private static final long serialVersionUID = 1L; public OMinCCE(@NonNull String ceid, @NonNull OP p, @NonNull Integer card) { super(ceid, p, card); } public OMinCCE(@NonNull String ceid, @NonNull String propertyID, @NonNull Integer card) { super(ceid, new OP(propertyID), card); } @NonNull @Override public String getceid() { return getT1(); } @NonNull public OP getP() { return getT2(); } @NonNull public Integer getCard() { return getT3(); } @NonNull @SideEffectFree @Override public String toString() { return "OMinCE" + super.toString(); } @NonNull @Override public SWRLClassExpressionBuiltInArgument extract( @NonNull DroolsSWRLBuiltInArgumentExtractor extractor) throws TargetSWRLRuleEngineException { return extractor.extract(this); } }
def field_options( cls ): return cls._get_cache()
// Copyright © 2017 The Things Network // Use of this source code is governed by the MIT license that can be found in the LICENSE file. package component import ( "time" "github.com/TheThingsNetwork/ttn/utils/errors" "github.com/apex/log" "github.com/mwitkow/go-grpc-middleware" "golang.org/x/net/context" // See https://github.com/grpc/grpc-go/issues/711" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" ) func (c *Component) ServerOptions() []grpc.ServerOption { unary := func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { var peerAddr string peer, ok := peer.FromContext(ctx) if ok { peerAddr = peer.Addr.String() } var peerID string meta, ok := metadata.FromContext(ctx) if ok { id, ok := meta["id"] if ok && len(id) > 0 { peerID = id[0] } } logCtx := c.Ctx.WithFields(log.Fields{ "CallerID": peerID, "CallerIP": peerAddr, "Method": info.FullMethod, }) t := time.Now() iface, err := handler(ctx, req) err = errors.BuildGRPCError(err) logCtx = logCtx.WithField("Duration", time.Now().Sub(t)) if grpc.Code(err) == codes.OK || grpc.Code(err) == codes.Canceled { logCtx.Debug("Handled request") } else { logCtx.WithField("ErrCode", grpc.Code(err)).WithError(err).Debug("Handled request with error") } return iface, err } stream := func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { var peerAddr string peer, ok := peer.FromContext(stream.Context()) if ok { peerAddr = peer.Addr.String() } var peerID string meta, ok := metadata.FromContext(stream.Context()) if ok { id, ok := meta["id"] if ok && len(id) > 0 { peerID = id[0] } } logCtx := c.Ctx.WithFields(log.Fields{ "CallerID": peerID, "CallerIP": peerAddr, "Method": info.FullMethod, }) t := time.Now() logCtx.Debug("Start stream") err := handler(srv, stream) err = errors.BuildGRPCError(err) logCtx = logCtx.WithField("Duration", time.Now().Sub(t)) if grpc.Code(err) == codes.OK || grpc.Code(err) == codes.Canceled { logCtx.Debug("End stream") } else { logCtx.WithField("ErrCode", grpc.Code(err)).WithError(err).Debug("End stream with error") } return err } opts := []grpc.ServerOption{ grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(unary)), grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(stream)), } if c.tlsConfig != nil { opts = append(opts, grpc.Creds(credentials.NewTLS(c.tlsConfig))) } return opts }
// AllArguments returns all arguments from the given cli.Context func AllArguments(c *cli.Context) []string { sl := []string{c.Args().First()} sl = append(sl, c.Args().Tail()...) if c.Args().First() == `` && len(sl) == 1 { return []string{} } return sl }