content
stringlengths
7
2.61M
Image-assisted GNSS/INS navigation for UAV-based mobile mapping systems during GNSS outages Nowadays, unmanned aerial vehicles (UAVs) have evolved into an alternative to traditional mapping platforms for some applications due to their flexibility and cost savings. Most UAVs, which are used as mobile mapping systems (MMS), depend on utilizing position and orientation system (POS) onboard the platform. Usually, POS consists of Global Navigation Satellite System (GNSS) as a positioning sensor integrated with an Inertial Navigation System (INS), which encompasses Inertial Measurement Unit (IMU) as an orientation sensor. This GNSS/INS integration is usually performed in either a loosely coupled (LC) or a tightly coupled (TC) scheme. Although LC is a simple scheme that uses the GNSS solutions to aid the INS navigation, TC has the advantage of being able to integrate raw GNSS measurements and INS when less than four GNSS satellites are tracked, which makes it a better candidate for more MMS applications. However, for some environments with large GNSS outages, especially where no satellites are tracked, TC architecture is still not a convenient solution, even with using smoothers afterward. In this research, a low-cost UAV MMS is developed combining an inexpensive POS, a camera, and a spinning multi-beam LiDAR for different mapping applications. In addition, a processing strategy is proposed to refine the trajectory during GNSS outages. This strategy comprises applying a two-stage Kalman filter along with smoothers. The first stage is a TC scheme KF followed by a smoother using GNSS and INS measurements as inputs. The second stage is a LC scheme KF utilizing the output from the first stage and a vision-based trajectory to aid trajectory refinement during GNSS outages. The vision-based trajectory is derived from integrated sensor orientation (ISO) based on using a bundle adjustment (BA) procedure for a collected block of images for the area of interest, without using any ground control points. This technique enhances the accuracy of the estimated position by 50% in the planimetric direction and 25% in the vertical direction during GNSS outages. In addition, the estimated heading accuracy of the platform is enhanced by 25%. The derived vision-aided GNSS/INS trajectory is used to derive more accurate georeferenced information from other sensors, such as LiDAR, onboard mapping platform. Finally, quantitative and quantitative quality control are conducted to evaluate the vision-aided GNSS/INS trajectory derived from the proposed processing strategy.
/* * This file is generated by jOOQ. */ package com.kushtrimh.tomorr.dal.tables.records; import com.kushtrimh.tomorr.dal.tables.AppUser; import org.jooq.Field; import org.jooq.Record1; import org.jooq.Record4; import org.jooq.Row4; import org.jooq.impl.UpdatableRecordImpl; import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.Id; import javax.persistence.Table; import javax.persistence.UniqueConstraint; import java.time.LocalDateTime; /** * This class is generated by jOOQ. */ @SuppressWarnings({"all", "unchecked", "rawtypes"}) @Entity @Table( name = "app_user", schema = "public", uniqueConstraints = { @UniqueConstraint(name = "app_user_pkey", columnNames = {"id"}) } ) public class AppUserRecord extends UpdatableRecordImpl<AppUserRecord> implements Record4<String, String, String, LocalDateTime> { private static final long serialVersionUID = 1L; /** * Setter for <code>public.app_user.id</code>. */ public void setId(String value) { set(0, value); } /** * Getter for <code>public.app_user.id</code>. */ @Id @Column(name = "id", nullable = false, length = 32) public String getId() { return (String) get(0); } /** * Setter for <code>public.app_user.address</code>. */ public void setAddress(String value) { set(1, value); } /** * Getter for <code>public.app_user.address</code>. */ @Column(name = "address", nullable = false, length = 1024) public String getAddress() { return (String) get(1); } /** * Setter for <code>public.app_user.type</code>. */ public void setType(String value) { set(2, value); } /** * Getter for <code>public.app_user.type</code>. */ @Column(name = "type", nullable = false, length = 64) public String getType() { return (String) get(2); } /** * Setter for <code>public.app_user.created_at</code>. */ public void setCreatedAt(LocalDateTime value) { set(3, value); } /** * Getter for <code>public.app_user.created_at</code>. */ @Column(name = "created_at", nullable = false, precision = 6) public LocalDateTime getCreatedAt() { return (LocalDateTime) get(3); } // ------------------------------------------------------------------------- // Primary key information // ------------------------------------------------------------------------- @Override public Record1<String> key() { return (Record1) super.key(); } // ------------------------------------------------------------------------- // Record4 type implementation // ------------------------------------------------------------------------- @Override public Row4<String, String, String, LocalDateTime> fieldsRow() { return (Row4) super.fieldsRow(); } @Override public Row4<String, String, String, LocalDateTime> valuesRow() { return (Row4) super.valuesRow(); } @Override public Field<String> field1() { return AppUser.APP_USER.ID; } @Override public Field<String> field2() { return AppUser.APP_USER.ADDRESS; } @Override public Field<String> field3() { return AppUser.APP_USER.TYPE; } @Override public Field<LocalDateTime> field4() { return AppUser.APP_USER.CREATED_AT; } @Override public String component1() { return getId(); } @Override public String component2() { return getAddress(); } @Override public String component3() { return getType(); } @Override public LocalDateTime component4() { return getCreatedAt(); } @Override public String value1() { return getId(); } @Override public String value2() { return getAddress(); } @Override public String value3() { return getType(); } @Override public LocalDateTime value4() { return getCreatedAt(); } @Override public AppUserRecord value1(String value) { setId(value); return this; } @Override public AppUserRecord value2(String value) { setAddress(value); return this; } @Override public AppUserRecord value3(String value) { setType(value); return this; } @Override public AppUserRecord value4(LocalDateTime value) { setCreatedAt(value); return this; } @Override public AppUserRecord values(String value1, String value2, String value3, LocalDateTime value4) { value1(value1); value2(value2); value3(value3); value4(value4); return this; } // ------------------------------------------------------------------------- // Constructors // ------------------------------------------------------------------------- /** * Create a detached AppUserRecord */ public AppUserRecord() { super(AppUser.APP_USER); } /** * Create a detached, initialised AppUserRecord */ public AppUserRecord(String id, String address, String type, LocalDateTime createdAt) { super(AppUser.APP_USER); setId(id); setAddress(address); setType(type); setCreatedAt(createdAt); } }
/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */ /* * Copyright (c) 2014-2017, Regents of the University of California, * Arizona Board of Regents, * Colorado State University, * University Pierre & <NAME>ie, Sorbonne University, * Washington University in St. Louis, * Beijing Institute of Technology, * The University of Memphis. * * This file is part of NFD (Named Data Networking Forwarding Daemon). * See AUTHORS.md for complete list of NFD authors and contributors. * * NFD is free software: you can redistribute it and/or modify it under the terms * of the GNU General Public License as published by the Free Software Foundation, * either version 3 of the License, or (at your option) any later version. * * NFD is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR * PURPOSE. See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along with * NFD, e.g., in COPYING.md file. If not, see <http://www.gnu.org/licenses/>. */ #include "table/strategy-choice.hpp" #include "tests/test-common.hpp" #include "../fw/dummy-strategy.hpp" namespace nfd { namespace tests { using fw::Strategy; class StrategyChoiceFixture : public BaseFixture { protected: StrategyChoiceFixture() : sc(forwarder.getStrategyChoice()) , strategyNameP("/strategy-choice-P/%FD%00") , strategyNameQ("/strategy-choice-Q/%FD%00") { DummyStrategy::registerAs(strategyNameP); DummyStrategy::registerAs(strategyNameQ); } /** \brief insert StrategyChoice entry at \p prefix for \p instanceName * \return constructed instance name */ Name insertAndGet(const Name& prefix, const Name& instanceName) { BOOST_REQUIRE(sc.insert(prefix, instanceName)); bool isFound; Name foundName; std::tie(isFound, foundName) = sc.get(prefix); BOOST_REQUIRE(isFound); return foundName; } /** \brief determine whether the effective strategy type at \p prefix is \p S * \tparam S expected strategy type */ template<typename S> bool isStrategyType(const Name& prefix) { Strategy& effectiveStrategy = sc.findEffectiveStrategy(prefix); return dynamic_cast<S*>(&effectiveStrategy) != nullptr; } template<typename Q> Name findInstanceName(const Q& query) { return sc.findEffectiveStrategy(query).getInstanceName(); } protected: Forwarder forwarder; StrategyChoice& sc; const Name strategyNameP; const Name strategyNameQ; }; BOOST_AUTO_TEST_SUITE(Table) BOOST_FIXTURE_TEST_SUITE(TestStrategyChoice, StrategyChoiceFixture) BOOST_AUTO_TEST_CASE(Versioning) { const Name strategyNameV("/strategy-choice-V"); const Name strategyNameV0("/strategy-choice-V/%FD%00"); const Name strategyNameV1("/strategy-choice-V/%FD%01"); const Name strategyNameV2("/strategy-choice-V/%FD%02"); const Name strategyNameV3("/strategy-choice-V/%FD%03"); const Name strategyNameV4("/strategy-choice-V/%FD%04"); const Name strategyNameV5("/strategy-choice-V/%FD%05"); VersionedDummyStrategy<1>::registerAs(strategyNameV1); VersionedDummyStrategy<3>::registerAs(strategyNameV3); VersionedDummyStrategy<4>::registerAs(strategyNameV4); // unversioned: choose latest version BOOST_CHECK_EQUAL(this->insertAndGet("/A", strategyNameV), strategyNameV4); BOOST_CHECK(this->isStrategyType<VersionedDummyStrategy<4>>("/A")); // exact version: choose same version BOOST_CHECK_EQUAL(this->insertAndGet("/B", strategyNameV1), strategyNameV1); BOOST_CHECK(this->isStrategyType<VersionedDummyStrategy<1>>("/B")); BOOST_CHECK_EQUAL(this->insertAndGet("/C", strategyNameV3), strategyNameV3); BOOST_CHECK(this->isStrategyType<VersionedDummyStrategy<3>>("/C")); BOOST_CHECK_EQUAL(this->insertAndGet("/D", strategyNameV4), strategyNameV4); BOOST_CHECK(this->isStrategyType<VersionedDummyStrategy<4>>("/D")); // lower version: choose next higher version BOOST_CHECK_EQUAL(this->insertAndGet("/E", strategyNameV0), strategyNameV0); BOOST_CHECK(this->isStrategyType<VersionedDummyStrategy<1>>("/E")); BOOST_CHECK_EQUAL(this->insertAndGet("/F", strategyNameV2), strategyNameV2); BOOST_CHECK(this->isStrategyType<VersionedDummyStrategy<3>>("/F")); // higher version: failure StrategyChoice::InsertResult res5 = sc.insert("/G", strategyNameV5); BOOST_CHECK(!res5); BOOST_CHECK(!res5.isRegistered()); } BOOST_AUTO_TEST_CASE(Parameters) { // no parameters BOOST_CHECK_EQUAL(this->insertAndGet("/A", strategyNameP), strategyNameP); // one parameter Name oneParamName = Name(strategyNameP).append("param"); BOOST_CHECK_EQUAL(this->insertAndGet("/B", oneParamName), oneParamName); // two parameters Name twoParamName = Name(strategyNameP).append("x").append("y"); BOOST_CHECK_EQUAL(this->insertAndGet("/C", twoParamName), twoParamName); // parameter without version is disallowed Name oneParamUnversioned = strategyNameP.getPrefix(-1).append("param"); BOOST_CHECK(!sc.insert("/D", oneParamUnversioned)); } BOOST_AUTO_TEST_CASE(InsertLongName) { Name n1; while (n1.size() < NameTree::getMaxDepth()) { n1.append("A"); } Name n2 = n1; while (n2.size() < NameTree::getMaxDepth() * 2) { n2.append("B"); } BOOST_CHECK(sc.insert(n1, strategyNameP)); BOOST_CHECK(!sc.insert(n2, strategyNameP)); } BOOST_AUTO_TEST_CASE(Get) { BOOST_CHECK(sc.insert("/", strategyNameP)); // { '/'=>P } auto getRoot = sc.get("/"); BOOST_CHECK_EQUAL(getRoot.first, true); BOOST_CHECK_EQUAL(getRoot.second, strategyNameP); auto getA = sc.get("/A"); BOOST_CHECK_EQUAL(getA.first, false); } BOOST_AUTO_TEST_CASE(FindEffectiveStrategy) { const Name strategyNameZ("/strategy-choice-Z/%FD%00"); // unregistered strategyName BOOST_CHECK(sc.insert("/", strategyNameP)); // { '/'=>P } BOOST_CHECK_EQUAL(this->findInstanceName("/"), strategyNameP); BOOST_CHECK_EQUAL(this->findInstanceName("/A"), strategyNameP); BOOST_CHECK_EQUAL(this->findInstanceName("/A/B"), strategyNameP); BOOST_CHECK(sc.insert("/A/B", strategyNameP)); // { '/'=>P, '/A/B'=>P } BOOST_CHECK_EQUAL(this->findInstanceName("/"), strategyNameP); BOOST_CHECK_EQUAL(this->findInstanceName("/A"), strategyNameP); BOOST_CHECK_EQUAL(this->findInstanceName("/A/B"), strategyNameP); // same entry, same instance BOOST_CHECK_EQUAL(&sc.findEffectiveStrategy("/"), &sc.findEffectiveStrategy("/A")); // different entries, distinct instances BOOST_CHECK_NE(&sc.findEffectiveStrategy("/"), &sc.findEffectiveStrategy("/A/B")); sc.erase("/A"); // no effect // { '/'=>P, '/A/B'=>P } BOOST_CHECK_EQUAL(this->findInstanceName("/"), strategyNameP); BOOST_CHECK_EQUAL(this->findInstanceName("/A"), strategyNameP); BOOST_CHECK_EQUAL(this->findInstanceName("/A/B"), strategyNameP); BOOST_CHECK(sc.insert("/A", strategyNameQ)); // { '/'=>P, '/A/B'=>P, '/A'=>Q } BOOST_CHECK_EQUAL(this->findInstanceName("/"), strategyNameP); BOOST_CHECK_EQUAL(this->findInstanceName("/A"), strategyNameQ); BOOST_CHECK_EQUAL(this->findInstanceName("/A/B"), strategyNameP); sc.erase("/A/B"); // { '/'=>P, '/A'=>Q } BOOST_CHECK_EQUAL(this->findInstanceName("/"), strategyNameP); BOOST_CHECK_EQUAL(this->findInstanceName("/A"), strategyNameQ); BOOST_CHECK_EQUAL(this->findInstanceName("/A/B"), strategyNameQ); BOOST_CHECK(!sc.insert("/", strategyNameZ)); // non existent strategy BOOST_CHECK(sc.insert("/", strategyNameQ)); BOOST_CHECK(sc.insert("/A", strategyNameP)); // { '/'=>Q, '/A'=>P } BOOST_CHECK_EQUAL(this->findInstanceName("/"), strategyNameQ); BOOST_CHECK_EQUAL(this->findInstanceName("/A"), strategyNameP); BOOST_CHECK_EQUAL(this->findInstanceName("/A/B"), strategyNameP); BOOST_CHECK_EQUAL(this->findInstanceName("/D"), strategyNameQ); } BOOST_AUTO_TEST_CASE(FindEffectiveStrategyWithPitEntry) { shared_ptr<Data> dataABC = makeData("/A/B/C"); Name fullName = dataABC->getFullName(); BOOST_CHECK(sc.insert("/A", strategyNameP)); BOOST_CHECK(sc.insert(fullName, strategyNameQ)); Pit& pit = forwarder.getPit(); shared_ptr<Interest> interestAB = makeInterest("/A/B"); shared_ptr<pit::Entry> pitAB = pit.insert(*interestAB).first; shared_ptr<Interest> interestFull = makeInterest(fullName); shared_ptr<pit::Entry> pitFull = pit.insert(*interestFull).first; BOOST_CHECK_EQUAL(this->findInstanceName(*pitAB), strategyNameP); BOOST_CHECK_EQUAL(this->findInstanceName(*pitFull), strategyNameQ); } BOOST_AUTO_TEST_CASE(FindEffectiveStrategyWithMeasurementsEntry) { BOOST_CHECK(sc.insert("/A", strategyNameP)); BOOST_CHECK(sc.insert("/A/B/C", strategyNameQ)); Measurements& measurements = forwarder.getMeasurements(); measurements::Entry& mAB = measurements.get("/A/B"); measurements::Entry& mABCD = measurements.get("/A/B/C/D"); BOOST_CHECK_EQUAL(this->findInstanceName(mAB), strategyNameP); BOOST_CHECK_EQUAL(this->findInstanceName(mABCD), strategyNameQ); } BOOST_AUTO_TEST_CASE(Erase) { NameTree& nameTree = forwarder.getNameTree(); sc.insert("/", strategyNameP); size_t nNameTreeEntriesBefore = nameTree.size(); sc.insert("/A/B", strategyNameQ); sc.erase("/A/B"); BOOST_CHECK_EQUAL(nameTree.size(), nNameTreeEntriesBefore); } BOOST_AUTO_TEST_CASE(Enumerate) { sc.insert("/", strategyNameP); sc.insert("/A/B", strategyNameQ); sc.insert("/A/B/C", strategyNameP); sc.insert("/D", strategyNameP); sc.insert("/E", strategyNameQ); BOOST_CHECK_EQUAL(sc.size(), 5); std::map<Name, Name> map; // namespace=>strategyName for (StrategyChoice::const_iterator it = sc.begin(); it != sc.end(); ++it) { map[it->getPrefix()] = it->getStrategyInstanceName(); } BOOST_CHECK_EQUAL(map.at("/"), strategyNameP); BOOST_CHECK_EQUAL(map.at("/A/B"), strategyNameQ); BOOST_CHECK_EQUAL(map.at("/A/B/C"), strategyNameP); BOOST_CHECK_EQUAL(map.at("/D"), strategyNameP); BOOST_CHECK_EQUAL(map.at("/E"), strategyNameQ); BOOST_CHECK_EQUAL(map.size(), 5); } class PStrategyInfo : public fw::StrategyInfo { public: static constexpr int getTypeId() { return 10; } }; BOOST_AUTO_TEST_CASE(ClearStrategyInfo) { Measurements& measurements = forwarder.getMeasurements(); BOOST_CHECK(sc.insert("/", strategyNameP)); // { '/'=>P } measurements.get("/").insertStrategyInfo<PStrategyInfo>(); measurements.get("/A").insertStrategyInfo<PStrategyInfo>(); measurements.get("/A/B").insertStrategyInfo<PStrategyInfo>(); measurements.get("/A/C").insertStrategyInfo<PStrategyInfo>(); BOOST_CHECK(sc.insert("/A/B", strategyNameP)); // { '/'=>P, '/A/B'=>P } BOOST_CHECK(measurements.get("/").getStrategyInfo<PStrategyInfo>() != nullptr); BOOST_CHECK(measurements.get("/A").getStrategyInfo<PStrategyInfo>() != nullptr); BOOST_CHECK(measurements.get("/A/B").getStrategyInfo<PStrategyInfo>() != nullptr); BOOST_CHECK(measurements.get("/A/C").getStrategyInfo<PStrategyInfo>() != nullptr); BOOST_CHECK(sc.insert("/A", strategyNameQ)); // { '/'=>P, '/A/B'=>P, '/A'=>Q } BOOST_CHECK(measurements.get("/").getStrategyInfo<PStrategyInfo>() != nullptr); BOOST_CHECK(measurements.get("/A").getStrategyInfo<PStrategyInfo>() == nullptr); BOOST_CHECK(measurements.get("/A/B").getStrategyInfo<PStrategyInfo>() != nullptr); BOOST_CHECK(measurements.get("/A/C").getStrategyInfo<PStrategyInfo>() == nullptr); sc.erase("/A/B"); // { '/'=>P, '/A'=>Q } BOOST_CHECK(measurements.get("/").getStrategyInfo<PStrategyInfo>() != nullptr); BOOST_CHECK(measurements.get("/A").getStrategyInfo<PStrategyInfo>() == nullptr); BOOST_CHECK(measurements.get("/A/B").getStrategyInfo<PStrategyInfo>() == nullptr); BOOST_CHECK(measurements.get("/A/C").getStrategyInfo<PStrategyInfo>() == nullptr); } BOOST_AUTO_TEST_SUITE_END() // TestStrategyChoice BOOST_AUTO_TEST_SUITE_END() // Table } // namespace tests } // namespace nfd
Police in Denmark fined a 28-year-old woman for wearing a full face veil, the first time a punishment was meted out since it became illegal on Wednesday. According to local media, police issued the fine in the city of Horsholm, in the northeastern region of Nordsjaelland, after being called to a shopping centre on Friday. The woman in the veil encountered another female who tried to tear it off, resulting in a minor scuffle. "During the fight her niqab [veil] came off, but by the time we arrived she had put it back on again," police officer David Borchersen said, according to Danish news agency Ritzau. The woman was fined $156 for wearing a full face veil in public and asked to either take the garment off or leave the shopping centre. She chose to leave. The fine was the result of a new law banning all face-covering clothing that went into effect on August 1. Danish legislators passed the law presented by Denmark's centre-right governing coalition last May. The law was also backed by the Social Democrats and the far-right Danish People's Party, leading to a 75 to 30 vote in favour. Violating the regulation results in a fine of 1,000 kroner ($156). Repeated violators will be fined up to 10,000 kroner ($1,560). When the ban went into effect, dozens of women came out in the Danish capital, Copenhagen, to protest it. The Danish government said the regulation is not aimed at any religion. But the law - popularly known as the "burqa ban" - is seen by some as directed at Muslim women who choose to wear the face veil in public. Following the Danish vote in May, Amnesty International's Gauri van Gulik said in a statement: "All women should be free to dress as they please and to wear clothing that expresses their identity or beliefs. "This ban will have a particularly negative impact on Muslim women who choose to wear the niqab or burqa. If the intention of this law was to protect women's rights, it fails abjectly." The Danish ban follows similar ones in Belgium, France, the Netherlands, Bulgaria and parts of Switzerland. The European Court of Human Rights last year upheld a Belgian ban on wearing the face veil in public. France was the first European country to ban the veil in public places with a law that took effect in 2011. Lena Larsen, project director of the Oslo Coalition on Freedom of Religion or Belief at the University of Oslo, said illegalising clothing is "polarising" and "not productive". "I don't think that this [law] will obtain any productive aim of integration or peaceful coexistence," she told Al Jazeera in an interview from Oslo, Norway's capital. "What we are witnessing is an expression of Danish identity politics, legalising an ethical concern and we are seeing polarisation," Larsen added. "Women who are considered to be oppressed and in need to be saved from unwanted social control - they are actually strong voices for wearing the face veil with arguments of personal freedom to choose whatever they want to wear." Denmark immigration law: A sign of things to come?
<reponame>cragkhit/elasticsearch public void writeConfiguration(Writer out) throws IOException { if (myResource == null) { out.append("# Unable to print configuration resource\n"); } else { URL url = myResource.getUrl(); InputStream in = url.openStream(); if (in != null) { try { IOUtils.copy(in, out); } finally { IOUtils.closeQuietly(in); } } else { out.append("# Unable to print configuration resource\n"); } } }
<filename>pkg/controllers/inventory/inventory_controller.go package inventory import ( "context" "encoding/json" "fmt" "reflect" "strings" "time" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/client-go/util/retry" metal3v1alpha1 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1" objectreferencesv1 "github.com/openshift/custom-resource-status/objectreferences/v1" hivev1 "github.com/openshift/hive/apis/hive/v1" hiveinternalv1alpha1 "github.com/openshift/hive/apis/hiveinternal/v1alpha1" inventoryv1alpha1 "github.com/stolostron/multicloud-operators-foundation/pkg/apis/inventory/v1alpha1" bmaerrors "github.com/stolostron/multicloud-operators-foundation/pkg/controllers/inventory/errors" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/reference" "k8s.io/klog" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" k8slabels "github.com/stolostron/multicloud-operators-foundation/pkg/utils" ) const ( // RoleLabel is the key name for the role label associated with the asset RoleLabel = "metal3.io/role" // ClusterDeploymentNameLabel is the key name for the name label associated with the asset's clusterDeployment ClusterDeploymentNameLabel = "metal3.io/cluster-deployment-name" // ClusterDeploymentNamespaceLabel is the key name for the namespace label associated with the asset's clusterDeployment ClusterDeploymentNamespaceLabel = "metal3.io/cluster-deployment-namespace" // BareMetalHostKind contains the value of kind BareMetalHost BareMetalHostKind = "BareMetalHost" ) const ( // assetSecretRequeueAfter specifies the amount of time, in seconds, before requeue assetSecretRequeueAfter int = 60 ) func SetupWithManager(mgr manager.Manager) error { if err := addBMAReconciler(mgr, newBMAReconciler(mgr)); err != nil { klog.Errorf("Failed to create baremetalasset controller, %v", err) return err } if err := addCDReconciler(mgr, newCDReconciler(mgr)); err != nil { klog.Errorf("Failed to create baremetalasset controller, %v", err) return err } return nil } // newReconciler returns a new reconcile.Reconciler func newBMAReconciler(mgr manager.Manager) reconcile.Reconciler { return &ReconcileBareMetalAsset{client: mgr.GetClient(), scheme: mgr.GetScheme()} } // add adds a new Controller to mgr with r as the reconcile.Reconciler func addBMAReconciler(mgr manager.Manager, r reconcile.Reconciler) error { // Create a new controller c, err := controller.New("baremetalasset-controller", mgr, controller.Options{Reconciler: r}) if err != nil { return err } // Watch for changes to primary resource BareMetalAsset err = c.Watch(&source.Kind{Type: &inventoryv1alpha1.BareMetalAsset{}}, &handler.EnqueueRequestForObject{}) if err != nil { return err } // Watch for changes to SyncSets and requeue BareMetalAssets with the name and matching cluster-deployment-namespace label // (which is also the syncset namespace) err = c.Watch( &source.Kind{Type: &hivev1.SyncSet{}}, handler.EnqueueRequestsFromMapFunc( handler.MapFunc(func(a client.Object) []reconcile.Request { syncSet, ok := a.(*hivev1.SyncSet) if !ok { // not a SyncSet, returning empty klog.Error("SyncSet handler received non-SyncSet object") return []reconcile.Request{} } bmas := &inventoryv1alpha1.BareMetalAssetList{} err := mgr.GetClient().List(context.TODO(), bmas, client.MatchingLabels{ ClusterDeploymentNamespaceLabel: syncSet.Namespace, }) if err != nil { klog.Errorf("Could not list BareMetalAsset %v with label %v=%v, %v", syncSet.Name, ClusterDeploymentNamespaceLabel, syncSet.Namespace, err) } var requests []reconcile.Request for _, bma := range bmas.Items { if syncSet.Name == bma.Name { requests = append(requests, reconcile.Request{ NamespacedName: types.NamespacedName{ Name: bma.Name, Namespace: bma.Namespace, }, }) } } return requests }, ), ), ) if err != nil { return err } // Watch for changes to ClusterSync err = c.Watch( &source.Kind{Type: &hiveinternalv1alpha1.ClusterSync{}}, handler.EnqueueRequestsFromMapFunc( handler.MapFunc(func(a client.Object) []reconcile.Request { clusterSync, ok := a.(*hiveinternalv1alpha1.ClusterSync) if !ok { // not a ClusterSync, returning empty klog.Error("ClusterSync handler received non-ClusterSync object") return []reconcile.Request{} } bmas := &inventoryv1alpha1.BareMetalAssetList{} err := mgr.GetClient().List(context.TODO(), bmas, client.InNamespace(clusterSync.Namespace)) if err != nil { klog.Error("Could not list BareMetalAssets", err) } var requests []reconcile.Request for _, bma := range bmas.Items { if bma.Spec.ClusterDeployment.Name == clusterSync.Name { requests = append(requests, reconcile.Request{ NamespacedName: types.NamespacedName{ Name: bma.Name, Namespace: bma.Namespace, }, }) } } return requests }), )) if err != nil { return err } // Watch for changes to ClusterDeployments and requeue BareMetalAssets with labels set to // ClusterDeployment's name (which is expected to be the clusterName) err = c.Watch( &source.Kind{Type: &hivev1.ClusterDeployment{}}, handler.EnqueueRequestsFromMapFunc( handler.MapFunc(func(a client.Object) []reconcile.Request { clusterDeployment, ok := a.(*hivev1.ClusterDeployment) if !ok { // not a Deployment, returning empty klog.Error("ClusterDeployment handler received non-ClusterDeployment object") return []reconcile.Request{} } bmas := &inventoryv1alpha1.BareMetalAssetList{} err := mgr.GetClient().List(context.TODO(), bmas, client.MatchingLabels{ ClusterDeploymentNameLabel: clusterDeployment.Name, ClusterDeploymentNamespaceLabel: clusterDeployment.Namespace, }) if err != nil { klog.Errorf("could not list BareMetalAssets with label %v=%v, %v", ClusterDeploymentNameLabel, clusterDeployment.Name, err) } var requests []reconcile.Request for _, bma := range bmas.Items { requests = append(requests, reconcile.Request{ NamespacedName: types.NamespacedName{ Name: bma.Name, Namespace: bma.Namespace, }, }) } return requests }), )) if err != nil { return err } return nil } // blank assignment to verify that ReconcileBareMetalAsset implements reconcile.Reconciler var _ reconcile.Reconciler = &ReconcileBareMetalAsset{} // ReconcileBareMetalAsset reconciles a BareMetalAsset object type ReconcileBareMetalAsset struct { // This client, initialized using mgr.Client() above, is a split client // that reads objects from the cache and writes to the apiserver client client.Client scheme *runtime.Scheme } // Reconcile reads that state of the cluster for a BareMetalAsset object and makes changes based on the state read // and what is in the BareMetalAsset.Spec // The Controller will requeue the Request to be processed again if the returned error is non-nil or // Result.Requeue is true, otherwise upon completion it will remove the work from the queue. func (r *ReconcileBareMetalAsset) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { klog.Info("Reconciling BareMetalAsset") // Fetch the BareMetalAsset instance instance := &inventoryv1alpha1.BareMetalAsset{} err := r.client.Get(ctx, request.NamespacedName, instance) if err != nil { if errors.IsNotFound(err) { // Request object not found, could have been deleted after reconcile request. // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. // Return and don't requeue return reconcile.Result{}, nil } // Error reading the object - requeue the request. return reconcile.Result{}, err } // Check DeletionTimestamp to determine if object is under deletion if instance.GetDeletionTimestamp().IsZero() { if !contains(instance.GetFinalizers(), BareMetalAssetFinalizer) { klog.Info("Finalizer not found for BareMetalAsset. Adding finalizer") instance.ObjectMeta.Finalizers = append(instance.ObjectMeta.Finalizers, BareMetalAssetFinalizer) if err := r.client.Update(ctx, instance); err != nil { klog.Errorf("Failed to add finalizer to baremetalasset, %v", err) return reconcile.Result{}, err } } } else { // The object is being deleted if contains(instance.GetFinalizers(), BareMetalAssetFinalizer) { return r.deleteSyncSet(ctx, instance) } return reconcile.Result{}, nil } for _, f := range []func(context.Context, *inventoryv1alpha1.BareMetalAsset) error{ r.ensureLabels, r.checkAssetSecret, r.cleanupOldHiveSyncSet, r.checkClusterDeployment, r.ensureHiveSyncSet, } { err = f(ctx, instance) if err != nil { switch { case bmaerrors.IsNoClusterError(err): klog.Info("No cluster specified") return reconcile.Result{}, r.updateStatus(ctx, instance) case bmaerrors.IsAssetSecretNotFoundError(err): // since we won't be notified when the secret is created, requeue after some time klog.Infof("Secret not found, RequeueAfter.Duration %v seconds", assetSecretRequeueAfter) return reconcile.Result{RequeueAfter: time.Duration(assetSecretRequeueAfter) * time.Second}, r.updateStatus(ctx, instance) } klog.Errorf("Failed reconcile, %v", err) if statusErr := r.updateStatus(ctx, instance); statusErr != nil { klog.Errorf("Failed to update status, %v", statusErr) } return reconcile.Result{}, err } } klog.Info("BareMetalAsset Reconciled") return reconcile.Result{}, r.updateStatus(ctx, instance) } func (r *ReconcileBareMetalAsset) updateStatus(ctx context.Context, instance *inventoryv1alpha1.BareMetalAsset) error { err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { newInstance := &inventoryv1alpha1.BareMetalAsset{} err := r.client.Get(ctx, types.NamespacedName{Name: instance.Name, Namespace: instance.Namespace}, newInstance) if err != nil { if errors.IsNotFound(err) { return nil } return err } if equality.Semantic.DeepEqual(newInstance.Status, instance.Status) { return nil } newInstance.Status = instance.Status return r.client.Status().Update(ctx, newInstance) }) return err } // checkAssetSecret verifies that we can find the secret listed in the BareMetalAsset func (r *ReconcileBareMetalAsset) checkAssetSecret(ctx context.Context, instance *inventoryv1alpha1.BareMetalAsset) error { secretName := instance.Spec.BMC.CredentialsName secret := &corev1.Secret{} err := r.client.Get(ctx, types.NamespacedName{Name: secretName, Namespace: instance.Namespace}, secret) if err != nil { if errors.IsNotFound(err) { klog.Errorf("Secret (%s/%s) not found, %v", instance.Namespace, secretName, err) meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ Type: inventoryv1alpha1.ConditionCredentialsFound, Status: metav1.ConditionFalse, Reason: inventoryv1alpha1.ConditionReasonSecretNotFound, Message: err.Error(), }) return bmaerrors.NewAssetSecretNotFoundError(secretName, instance.Namespace) } return err } // add secret reference to status secretRef, err := reference.GetReference(r.scheme, secret) if err != nil { klog.Errorf("Failed to get reference from secret, %v", err) return err } if err := objectreferencesv1.SetObjectReference(&instance.Status.RelatedObjects, *secretRef); err != nil { klog.Errorf("Failed to set reference, %v", err) return err } // add condition to status meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ Type: inventoryv1alpha1.ConditionCredentialsFound, Status: metav1.ConditionTrue, Reason: inventoryv1alpha1.ConditionReasonSecretFound, Message: fmt.Sprintf("A secret with the name %v in namespace %v was found", secretName, instance.Namespace), }) // Set BaremetalAsset instance as the owner and controller if secret.OwnerReferences == nil || len(secret.OwnerReferences) == 0 { if err := controllerutil.SetControllerReference(instance, secret, r.scheme); err != nil { klog.Errorf("Failed to set ControllerReference, %v", err) return err } if err := r.client.Update(ctx, secret); err != nil { klog.Errorf("Failed to update secret with OwnerReferences, %v", err) return err } } return nil } func (r *ReconcileBareMetalAsset) ensureLabels(ctx context.Context, instance *inventoryv1alpha1.BareMetalAsset) error { labels := k8slabels.CloneAndAddLabel(instance.Labels, ClusterDeploymentNameLabel, instance.Spec.ClusterDeployment.Name) labels = k8slabels.AddLabel(labels, ClusterDeploymentNamespaceLabel, instance.Spec.ClusterDeployment.Namespace) labels = k8slabels.AddLabel(labels, RoleLabel, string(instance.Spec.Role)) if !reflect.DeepEqual(labels, instance.Labels) { instance.Labels = labels return r.client.Update(ctx, instance) } return nil } // checkClusterDeployment verifies that we can find the ClusterDeployment specified in the BareMetalAsset func (r *ReconcileBareMetalAsset) checkClusterDeployment(ctx context.Context, instance *inventoryv1alpha1.BareMetalAsset) error { clusterDeploymentName := instance.Spec.ClusterDeployment.Name clusterDeploymentNamespace := instance.Spec.ClusterDeployment.Namespace // if the clusterDeploymentName is not specified, we need to handle the possibility // that it has been removed from the spec if clusterDeploymentName == "" { meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ Type: inventoryv1alpha1.ConditionClusterDeploymentFound, Status: metav1.ConditionFalse, Reason: inventoryv1alpha1.ConditionReasonNoneSpecified, Message: "No cluster deployment specified", }) meta.RemoveStatusCondition(&instance.Status.Conditions, inventoryv1alpha1.ConditionAssetSyncStarted) meta.RemoveStatusCondition(&instance.Status.Conditions, inventoryv1alpha1.ConditionAssetSyncCompleted) return bmaerrors.NewNoClusterError() } // If a clusterDeployment is specified, we need to find it cd := &hivev1.ClusterDeployment{} err := r.client.Get( ctx, types.NamespacedName{Name: clusterDeploymentName, Namespace: clusterDeploymentNamespace}, cd) if err != nil { if errors.IsNotFound(err) { klog.Errorf("ClusterDeployment (%s/%s) not found, %v", clusterDeploymentNamespace, clusterDeploymentName, err) meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ Type: inventoryv1alpha1.ConditionClusterDeploymentFound, Status: metav1.ConditionFalse, Reason: inventoryv1alpha1.ConditionReasonClusterDeploymentNotFound, Message: err.Error(), }) return err } return err } // add condition meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ Type: inventoryv1alpha1.ConditionClusterDeploymentFound, Status: metav1.ConditionTrue, Reason: inventoryv1alpha1.ConditionReasonClusterDeploymentFound, Message: fmt.Sprintf("A ClusterDeployment with the name %v in namespace %v was found", cd.Name, cd.Namespace), }) return nil } func (r *ReconcileBareMetalAsset) ensureHiveSyncSet(ctx context.Context, instance *inventoryv1alpha1.BareMetalAsset) error { assetSyncCompleted := r.checkHiveClusterSync(ctx, instance) hsc := r.newHiveSyncSet(instance, assetSyncCompleted) found := &hivev1.SyncSet{} err := r.client.Get(ctx, types.NamespacedName{Name: hsc.Name, Namespace: hsc.Namespace}, found) if err != nil { if errors.IsNotFound(err) { err := r.client.Create(ctx, hsc) if err != nil { klog.Errorf("Failed to create Hive SyncSet, %v", err) meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ Type: inventoryv1alpha1.ConditionAssetSyncStarted, Status: metav1.ConditionFalse, Reason: inventoryv1alpha1.ConditionReasonSyncSetCreationFailed, Message: "Failed to create SyncSet", }) return err } meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ Type: inventoryv1alpha1.ConditionAssetSyncStarted, Status: metav1.ConditionTrue, Reason: inventoryv1alpha1.ConditionReasonSyncSetCreated, Message: "SyncSet created successfully", }) return nil } // other error. fail reconcile meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ Type: inventoryv1alpha1.ConditionAssetSyncStarted, Status: metav1.ConditionFalse, Reason: inventoryv1alpha1.ConditionReasonSyncSetGetFailed, Message: "Failed to get SyncSet", }) klog.Errorf("Failed to get Hive SyncSet (%s/%s), %v", hsc.Namespace, hsc.Name, err) return err } // rebuild the expected SyncSet if the one we found is missing Resources // because it means we have successfully applied if len(found.Spec.SyncSetCommonSpec.Resources) == 0 { hsc = r.newHiveSyncSet(instance, true) } // Add SyncSet to related objects hscRef, err := reference.GetReference(r.scheme, found) if err != nil { klog.Errorf("Failed to get reference from SyncSet, %v", err) return err } if err := objectreferencesv1.SetObjectReference(&instance.Status.RelatedObjects, *hscRef); err != nil { klog.Errorf("Failed to set reference, %v", err) return err } // Add labels to copy for comparison to minimize updates labels := k8slabels.CloneAndAddLabel(found.Labels, ClusterDeploymentNameLabel, instance.Spec.ClusterDeployment.Name) labels = k8slabels.AddLabel(labels, ClusterDeploymentNamespaceLabel, instance.Spec.ClusterDeployment.Namespace) labels = k8slabels.AddLabel(labels, RoleLabel, string(instance.Spec.Role)) // Update Hive SyncSet CR if it is not in the desired state if !reflect.DeepEqual(hsc.Spec, found.Spec) || !reflect.DeepEqual(labels, found.Labels) { klog.Infof("Updating Hive SyncSet (%s/%s)", hsc.Namespace, hsc.Name) found.Labels = labels found.Spec = hsc.Spec err := r.client.Update(ctx, found) if err != nil { klog.Errorf("Failed to update Hive SyncSet (%s/%s), %v", hsc.Namespace, hsc.Name, err) meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ Type: inventoryv1alpha1.ConditionAssetSyncStarted, Status: metav1.ConditionFalse, Reason: inventoryv1alpha1.ConditionReasonSyncSetUpdateFailed, Message: "Failed to update SyncSet", }) return err } meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ Type: inventoryv1alpha1.ConditionAssetSyncStarted, Status: metav1.ConditionTrue, Reason: inventoryv1alpha1.ConditionReasonSyncSetUpdated, Message: "SyncSet updated successfully", }) } return nil } func (r *ReconcileBareMetalAsset) newHiveSyncSet(instance *inventoryv1alpha1.BareMetalAsset, assetSyncCompleted bool) *hivev1.SyncSet { bmhJSON, err := newBareMetalHost(instance, assetSyncCompleted) if err != nil { klog.Errorf("Error marshaling baremetalhost, %v", err) return nil } hsc := &hivev1.SyncSet{ TypeMeta: metav1.TypeMeta{ Kind: "SyncSet", APIVersion: "hive.openshift.io/v1", }, ObjectMeta: metav1.ObjectMeta{ Name: instance.Name, Namespace: instance.Spec.ClusterDeployment.Namespace, // syncset should be created in the same namespace as the clusterdeployment Labels: map[string]string{ ClusterDeploymentNameLabel: instance.Spec.ClusterDeployment.Name, ClusterDeploymentNamespaceLabel: instance.Spec.ClusterDeployment.Namespace, RoleLabel: string(instance.Spec.Role), }, }, Spec: hivev1.SyncSetSpec{ SyncSetCommonSpec: hivev1.SyncSetCommonSpec{ Resources: []runtime.RawExtension{ { Raw: bmhJSON, }, }, Patches: []hivev1.SyncObjectPatch{}, ResourceApplyMode: hivev1.SyncResourceApplyMode, Secrets: []hivev1.SecretMapping{ { SourceRef: hivev1.SecretReference{ Name: instance.Spec.BMC.CredentialsName, Namespace: instance.Namespace, }, TargetRef: hivev1.SecretReference{ Name: instance.Spec.BMC.CredentialsName, Namespace: inventoryv1alpha1.ManagedClusterResourceNamespace, }, }, }, }, ClusterDeploymentRefs: []corev1.LocalObjectReference{ { Name: instance.Spec.ClusterDeployment.Name, }, }, }, } if assetSyncCompleted { // Do not delete the BareMetalHost that we are about to remove hsc.Spec.SyncSetCommonSpec.ResourceApplyMode = hivev1.UpsertResourceApplyMode // Remove the BareMetalHost from the list of resources to sync hsc.Spec.SyncSetCommonSpec.Resources = []runtime.RawExtension{} // Specify the BareMetalHost as a patch hsc.Spec.SyncSetCommonSpec.Patches = []hivev1.SyncObjectPatch{ { APIVersion: metal3v1alpha1.GroupVersion.String(), Kind: BareMetalHostKind, Name: instance.Name, Namespace: inventoryv1alpha1.ManagedClusterResourceNamespace, Patch: string(bmhJSON), PatchType: "merge", }, } } return hsc } func newBareMetalHost(instance *inventoryv1alpha1.BareMetalAsset, assetSyncCompleted bool) ([]byte, error) { bmhSpec := map[string]interface{}{ "bmc": map[string]string{ "address": instance.Spec.BMC.Address, "credentialsName": instance.Spec.BMC.CredentialsName, }, "hardwareProfile": instance.Spec.HardwareProfile, "bootMACAddress": instance.Spec.BootMACAddress, } if !assetSyncCompleted { bmhSpec["online"] = true } bmhJSON, err := json.Marshal(map[string]interface{}{ "kind": BareMetalHostKind, "apiVersion": metal3v1alpha1.GroupVersion.String(), "metadata": map[string]interface{}{ "name": instance.Name, "namespace": inventoryv1alpha1.ManagedClusterResourceNamespace, "labels": map[string]string{ ClusterDeploymentNameLabel: instance.Spec.ClusterDeployment.Name, ClusterDeploymentNamespaceLabel: instance.Spec.ClusterDeployment.Namespace, RoleLabel: string(instance.Spec.Role), }, }, "spec": bmhSpec, }) if err != nil { return []byte{}, err } return bmhJSON, nil } func (r *ReconcileBareMetalAsset) checkHiveClusterSync(ctx context.Context, instance *inventoryv1alpha1.BareMetalAsset) bool { //get related syncSet syncSetNsN := types.NamespacedName{ Name: instance.Name, Namespace: instance.Spec.ClusterDeployment.Namespace, } foundSyncSet := &hivev1.SyncSet{} err := r.client.Get(ctx, syncSetNsN, foundSyncSet) if err != nil { meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ Type: inventoryv1alpha1.ConditionAssetSyncCompleted, Status: metav1.ConditionFalse, Reason: inventoryv1alpha1.ConditionReasonSyncStatusNotFound, Message: fmt.Sprintf("Problem getting Hive SyncSet for Name %s in Namespace %s, %v", syncSetNsN.Name, syncSetNsN.Namespace, err), }) return false } //get related clusterSync clusterSyncNsN := types.NamespacedName{ Name: instance.Spec.ClusterDeployment.Name, Namespace: instance.Spec.ClusterDeployment.Namespace, } foundClusterSync := &hiveinternalv1alpha1.ClusterSync{} if r.client.Get(ctx, clusterSyncNsN, foundClusterSync) != nil { meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ Type: inventoryv1alpha1.ConditionAssetSyncCompleted, Status: metav1.ConditionFalse, Reason: inventoryv1alpha1.ConditionReasonSyncStatusNotFound, Message: fmt.Sprintf("Problem getting Hive ClusterSync for ClusterDeployment.Name %s in Namespace %s, %v", clusterSyncNsN.Name, clusterSyncNsN.Namespace, err), }) return false } //find locate the correct syncstatus foundSyncStatuses := []hiveinternalv1alpha1.SyncStatus{} for _, syncStatus := range foundClusterSync.Status.SyncSets { if syncStatus.Name == instance.Name { foundSyncStatuses = append(foundSyncStatuses, syncStatus) } } if len(foundSyncStatuses) != 1 { err = fmt.Errorf("unable to find SyncStatus with Name %v in ClusterSyncs %v", instance.Name, clusterSyncNsN.Name) meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ Type: inventoryv1alpha1.ConditionAssetSyncCompleted, Status: metav1.ConditionFalse, Reason: inventoryv1alpha1.ConditionReasonSyncStatusNotFound, Message: err.Error(), }) return false } foundSyncStatus := foundSyncStatuses[0] if foundSyncStatus.ObservedGeneration != foundSyncSet.Generation { klog.Errorf("SyncStatus.ObserveGeneration does not match SyncSet.Generation, %v", err) meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ Type: inventoryv1alpha1.ConditionAssetSyncStarted, Status: metav1.ConditionFalse, Reason: inventoryv1alpha1.ConditionReasonSyncSetNotApplied, Message: "SyncSet not yet been applied", }) return false } return r.checkHiveSyncStatus(ctx, instance, foundSyncSet, foundSyncStatus) } func (r *ReconcileBareMetalAsset) checkHiveSyncStatus(ctx context.Context, instance *inventoryv1alpha1.BareMetalAsset, syncSet *hivev1.SyncSet, syncSetStatus hiveinternalv1alpha1.SyncStatus, ) bool { resourceCount := len(syncSet.Spec.Resources) patchCount := len(syncSet.Spec.Patches) if resourceCount == 1 { if syncSetStatus.Result == hiveinternalv1alpha1.SuccessSyncSetResult { meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ Type: inventoryv1alpha1.ConditionAssetSyncCompleted, Status: metav1.ConditionTrue, Reason: inventoryv1alpha1.ConditionReasonSyncSetAppliedSuccessful, Message: "Successfully applied SyncSet", }) return true } meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ Type: inventoryv1alpha1.ConditionAssetSyncCompleted, Status: metav1.ConditionFalse, Reason: inventoryv1alpha1.ConditionReasonSyncSetAppliedFailed, Message: fmt.Sprintf("Failed to apply SyncSet with err %s", syncSetStatus.FailureMessage), }) return false } if patchCount == 1 { if syncSetStatus.Result == hiveinternalv1alpha1.SuccessSyncSetResult { meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ Type: inventoryv1alpha1.ConditionAssetSyncCompleted, Status: metav1.ConditionTrue, Reason: inventoryv1alpha1.ConditionReasonSyncSetAppliedSuccessful, Message: "Successfully applied SyncSet", }) return true } meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ Type: inventoryv1alpha1.ConditionAssetSyncCompleted, Status: metav1.ConditionFalse, Reason: inventoryv1alpha1.ConditionReasonSyncSetAppliedFailed, Message: fmt.Sprintf("Failed to apply SyncSet with err %s", syncSetStatus.FailureMessage), }) if strings.Contains(syncSetStatus.FailureMessage, "not found") { if r.client.Delete(ctx, syncSet) != nil { klog.Errorf("Failed to delete syncSet %v", instance.Name) } } return false } err := fmt.Errorf( "unexpected number of resources found on SyncSet. Expected (1) Found (%v)", resourceCount, ) meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{ Type: inventoryv1alpha1.ConditionAssetSyncCompleted, Status: metav1.ConditionFalse, Reason: inventoryv1alpha1.ConditionReasonUnexpectedResourceCount, Message: err.Error(), }) return false } func (r *ReconcileBareMetalAsset) deleteSyncSet(ctx context.Context, instance *inventoryv1alpha1.BareMetalAsset) (reconcile.Result, error) { if instance.Spec.ClusterDeployment.Namespace == "" && instance.Spec.ClusterDeployment.Name == "" { instance.ObjectMeta.Finalizers = remove(instance.ObjectMeta.Finalizers, BareMetalAssetFinalizer) return reconcile.Result{}, r.client.Update(ctx, instance) } syncSet := r.newHiveSyncSet(instance, false) foundSyncSet := &hivev1.SyncSet{} err := r.client.Get(ctx, types.NamespacedName{Name: syncSet.Name, Namespace: syncSet.Namespace}, foundSyncSet) if err != nil { if errors.IsNotFound(err) { instance.ObjectMeta.Finalizers = remove(instance.ObjectMeta.Finalizers, BareMetalAssetFinalizer) return reconcile.Result{}, r.client.Update(ctx, instance) } klog.Errorf("Failed to get Hive SyncSet (%s/%s) in cleanup, %v", syncSet.Namespace, syncSet.Name, err) return reconcile.Result{}, err } // Only update the SyncSet if the BareMetalHost is not defined in the // Resources section if len(foundSyncSet.Spec.SyncSetCommonSpec.Resources) == 0 { foundSyncSet.Spec = syncSet.Spec return reconcile.Result{}, r.client.Update(ctx, foundSyncSet) } // Don't delete the SyncSet until the ClusterSync is applied if r.checkHiveClusterSync(ctx, instance) { return reconcile.Result{}, r.client.Delete(ctx, syncSet) } return reconcile.Result{}, nil } func (r *ReconcileBareMetalAsset) cleanupOldHiveSyncSet(ctx context.Context, instance *inventoryv1alpha1.BareMetalAsset) error { // If clusterDeployment.Namespace is updated to a new namespace or removed from the spec, we need to // ensure that existing syncset, if any, is deleted from the old namespace. // We can get the old syncset from relatedobjects if it exists. hscRef := corev1.ObjectReference{} for _, ro := range instance.Status.RelatedObjects { if ro.Name == instance.Name && ro.Kind == "SyncSet" && ro.APIVersion == hivev1.SchemeGroupVersion.String() && ro.Namespace != instance.Spec.ClusterDeployment.Namespace { hscRef = ro break } } if hscRef == (corev1.ObjectReference{}) { // Nothing to do if no such syncset was found return nil } // Delete syncset in old namespace klog.Infof("Cleaning up Hive SyncSet in old namespace (%s/%s)", hscRef.Name, hscRef.Namespace) err := r.client.Delete(ctx, &hivev1.SyncSet{ ObjectMeta: metav1.ObjectMeta{ Namespace: hscRef.Namespace, Name: hscRef.Name, }, }) if err != nil { if !errors.IsNotFound(err) { klog.Errorf("Failed to delete Hive SyncSet (%s/%s), %v", hscRef.Name, hscRef.Namespace, err) return err } } // Remove SyncSet from related objects err = objectreferencesv1.RemoveObjectReference(&instance.Status.RelatedObjects, hscRef) if err != nil { klog.Errorf("Failed to remove reference from status.RelatedObjects, %v", err) return err } return nil } // Checks whether a string is contained within a slice func contains(slice []string, s string) bool { for _, item := range slice { if item == s { return true } } return false } // Removes a given string from a slice and returns the new slice func remove(slice []string, s string) (result []string) { for _, item := range slice { if item == s { continue } result = append(result, item) } return }
<gh_stars>0 import java.io.File; import java.io.FileNotFoundException; import java.util.ArrayList; import java.util.Scanner; public class SMTester { public static void main (String[] args) { SMInstance ex1 = new SMInstance("example-1-instance.txt"); Matching matching1 = new Matching(ex1, "example-1-matching-1.txt"); Matching matching2 = new Matching(ex1, "example-1-matching-2.txt"); System.out.println("Current SMInstance:\n" + ex1); System.out.println("Testing the stability of the following matching:\n" + matching1); ex1.setMatching(matching1); if (ex1.isStable()) { System.out.println("The matching is stable!"); } else { System.out.println("The matching is not stable: " + ex1.getBlockingPair() + " is a blocking pair!\n"); } System.out.println("Testing the stability of the following matching:\n" + matching2); ex1.setMatching(matching2); if (ex1.isStable()) { System.out.println("The matching is stable!"); } else { System.out.println("The matching is not stable: " + ex1.getBlockingPair() + " is a blocking pair!\n"); } SMInstance ex2 = new SMInstance("example-2-instance.txt"); System.out.println("Current SMInstance:\n" + ex2); System.out.println("Computing stable matching..."); ex2.computeStableMatching(); System.out.println(ex2.getMatching()); SMInstance ex3 = new SMInstance("example-3-instance.txt"); System.out.println("Current SMInstance:\n" + ex3); System.out.println("Computing stable matching..."); ex3.computeStableMatching(); System.out.println(ex3.getMatching()); } }
Presumptive incoming House Intelligence Committee Chairman Adam Schiff (D-Calif.) said on CBS Tuesday that the outgoing GOP majority has "done nothing" to provide oversight of President Trump's alleged unethical and illegal behavior. "What we've seen in the last few weeks, it's actually quite shocking," Schiff said. Schiff said that ex-Trump attorney Michael Cohen's cooperation with prosecutors and the arraignment that followed show that Trump was "secretly negotiating" with the Kremlin about a real estate deal "all the way into the middle of [the campaign season]." The Framingham, Mass. native added that "the Kremlin was helping with the coverup." Host Stephen Colbert asked Schiff what he plans to do when he takes the committee gavel from Rep. Devin Nunes (R-Calif.) in 2019. "Republicans have done nothing to oversee any of the allegations of malfeasance and that stops now," Schiff said. He said that Trump's relationship with German financial giant DeutscheBank is suspect, claiming that major American banks wouldn't do business with the Trump organization, but the Frankfurt-based bank did. Schiff said the New York State government fined DeutscheBank hundreds of millions of dollars for "laundering Russian money." Schiff said Trump can no longer demand that his personal finances be kept secret, noting that Trump Organization CFO Allen Weisselberg is cooperating with special counsel Robert Mueller. "He is not in a position to draw red lines," Schiff warned.
#include<bits/stdc++.h> #define fr(i,x,y) for(int i=x;i<=y;++i) #define frl(i,x,y) for(int i=x;i<y;++i) #define rf(i,x,y) for(int i=x;i>=y;--i) #define ls (x<<1) #define rs (x<<1|1) #define fi first #define se second #define mp make_pair #define ll long long using namespace std; const int N=1e5+10; struct data{ ll sum; vector<pair<ll,int> > pre,bac; data(){ sum=0; pre.clear(); bac.clear(); } }tr[N<<2]; int n,m,bz; int a[N]; ll get(data A,data B){ ll ans=0; int lena=A.bac.size(),lenb=B.pre.size(); frl(i,0,lena) frl(j,0,lenb) { if((A.bac[i].se|B.pre[j].se)>=bz) { ans+=A.bac[i].fi*B.pre[j].fi; } } return ans; } data up(data A,data B){ // printf("Asum=%lld\n",A.sum); // frl(i,0,A.pre.size()) printf("pre%lld %d\n",A.pre[i].fi,A.pre[i].se); // frl(i,0,A.bac.size()) printf("bac%lld %d\n",A.bac[i].fi,A.bac[i].se); // printf("Bsum=%lld\n",B.sum); // frl(i,0,B.pre.size()) printf("pre%lld %d\n",B.pre[i].fi,B.pre[i].se); // frl(i,0,B.bac.size()) printf("bac%lld %d\n",B.bac[i].fi,B.bac[i].se); data res; res.sum=A.sum+B.sum; res.sum+=get(A,B); int lena=A.pre.size(),lenb=B.pre.size(); frl(i,0,lena) { res.pre.push_back(A.pre[i]); } frl(i,0,lenb) { pair<ll,int> ss=res.pre.back(); int pos=A.pre[lena-1].se|B.pre[i].se; ll tmp=B.pre[i].fi; if(pos==ss.se) { res.pre.back().fi+=tmp; } else { res.pre.push_back(mp(tmp,pos)); } } //hb hz lena=A.bac.size(),lenb=B.bac.size(); frl(i,0,lenb) { res.bac.push_back(B.bac[i]); } frl(i,0,lena) { pair<ll,int> ss=res.bac.back(); int pos=B.bac[lenb-1].se|A.bac[i].se; ll tmp=A.bac[i].fi; if(pos==ss.se) { res.bac.back().fi+=tmp; } else { res.bac.push_back(mp(tmp,pos)); } } // printf("ressum=%lld\n",res.sum); // frl(i,0,res.pre.size()) printf("pre%lld %d\n",res.pre[i].fi,res.pre[i].se); // frl(i,0,res.bac.size()) printf("bac%lld %d\n",res.bac[i].fi,res.bac[i].se); return res; } void build(int x,int l,int r){ if(l==r) { scanf("%d",&a[l]); if(a[l]>=bz) tr[x].sum=1; tr[x].pre.push_back(mp(1,a[l])); tr[x].bac.push_back(mp(1,a[l])); return ; } int mid=(l+r)>>1; build(ls,l,mid),build(rs,mid+1,r); tr[x]=up(tr[ls],tr[rs]); // printf("x=%d l=%d r=%d sium=%lld\n",x,l,r,tr[x].sum); } void Change(int x,int l,int r,int L,int v){ if(l==r) { if(v>=bz) tr[x].sum=1; else tr[x].sum=0; tr[x].pre.clear(); tr[x].bac.clear(); tr[x].pre.push_back(mp(1,v)); tr[x].bac.push_back(mp(1,v)); return ; } int mid=(l+r)>>1; if(L<=mid) Change(ls,l,mid,L,v); else Change(rs,mid+1,r,L,v); tr[x]=up(tr[ls],tr[rs]); // printf("x=%d l=%d r=%d sium=%lld\n",x,l,r,tr[x].sum); } data Ask(int x,int l,int r,int L,int R){ // printf("x=%d l=%d r=%d L=%d R=%d\n",x,l,r,L,R); if(L<=l&&r<=R) { return tr[x]; } int mid=(l+r)>>1; if(R<=mid) return Ask(ls,l,mid,L,R); else if(L>mid) return Ask(rs,mid+1,r,L,R); else { // printf("haha\n"); return up(Ask(ls,l,mid,L,R),Ask(rs,mid+1,r,L,R)); } } int main(){ scanf("%d%d%d",&n,&m,&bz); build(1,1,n); fr(i,1,m) { int x,y,z; scanf("%d%d%d",&x,&y,&z); if(x==2) { printf("%lld\n",Ask(1,1,n,y,z).sum); } else { Change(1,1,n,y,z); } } return 0; }
A brief survey of Machine Learning Methods in Identification of Mitochondria Proteins in Malaria Parasite. The number of human death caused by malaria is increasing day-by-day. In fact, the mitochondrial proteins of the malaria parasite play vital roles in the organism. For developing effective drugs and vaccines against infection, it is necessary to accurately identify mitochondrial proteins of the malaria parasite. Although, precise details for the mitochondrial proteins can be provided by biochemical experiments, they are expensive and time-consuming. In this review, we summarized the machine learning-based methods for mitochondrial proteins identification in malaria parasite and compared the construction strategies of these computational methods. Finally, we also discussed the future development of mitochondrial proteins recognition with algorithms.
/* * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "FeatureUnification.h" #include "IRUtils.h" #include <memory> #include <set> #include <vector> #include <cassert> using std::make_unique; namespace { bool is_static_layout(const coco::FeatureLayout::ID *id) { if (id == coco::FeatureLayouts::BHWC::uid()) { return true; } if (id == coco::FeatureLayouts::BCHW::uid()) { return true; } return false; } bool is_static_layout(const coco::FeatureLayout *l) { return is_static_layout(l->id()); } bool is_static_layout(const coco::FeatureObject *f) { return is_static_layout(f->layout()); } /** * @brief Return ture if a given 'feature' is the candidate of unification */ bool candidate(const coco::FeatureObject *f) { return is_static_layout(f); } /** * @brief Return true if two features are compatible * * Two features are referred to as compatible if these feature are interchangeable. * * NOTE The current implementation of "compatible" is sound, but incomplete. * * Soundness: * For all feature objects "lhs" and "rhs" that "compatible(lhs, rhs)" returns true, * "lhs" and "rhs" are interchangeable. * * Completeness: * For all interchangeable feature objects "lhs" and "rhs", "compatible(lhs, rhs)" returns true. */ bool compatible(const coco::FeatureObject *lhs, const coco::FeatureObject *rhs) { assert(candidate(lhs) && candidate(rhs)); if (lhs->layout()->id() != rhs->layout()->id()) { return false; } if (lhs->layout()->batch() != rhs->layout()->batch()) { return false; } if (!(lhs->layout()->shape() == rhs->layout()->shape())) { return false; } return true; } /** * @brief A FeatureGroup denotes a group of FeatureObject(s) * * Each FeatureGroup includes at most 1 DEF FeatureObject (a FeatureObject that has a producer), * and may include multiple USE FeatureObject(s) (a FeatureObject that has no producer). * * NOTE FeatureUnification pass internally uses this FeatureGroup to store a group of compatible * FeatureObject(s) */ class FeatureGroup { public: explicit FeatureGroup(coco::FeatureObject *feature) { insert(feature); } public: uint32_t size(void) const { return _uses.size() + (_def ? 1 : 0); } public: void insert(coco::FeatureObject *feature) { if (feature->def() != nullptr) { assert(_def == nullptr); _def = feature; } else { _uses.insert(feature); } } public: coco::FeatureObject *parent(void) const { if (_def) { return _def; } assert(_uses.size() > 0); return *(_uses.begin()); } public: std::set<coco::FeatureObject *> children(void) const { auto res = _uses; res.erase(parent()); return res; } private: coco::FeatureObject *_def = nullptr; std::set<coco::FeatureObject *> _uses; }; } // namespace namespace enco { void unify_feature(enco::Code *code) { auto m = code->module(); for (uint32_t n = 0; n < m->entity()->bag()->size(); ++n) { std::vector<std::unique_ptr<FeatureGroup>> groups; auto assign_group = [&](coco::FeatureObject *feature) { // Find a compatible FeatureGroup FeatureGroup *group = nullptr; for (const auto &g : groups) { FeatureGroup *candidate = g.get(); if (!compatible(candidate->parent(), feature)) { continue; } group = candidate; break; } if (group == nullptr) { // Insert FeatureObject into a new FeatureGroup groups.emplace_back(make_unique<FeatureGroup>(feature)); } else { // Insert FeatureObject into the compatible FeatureGroup group->insert(feature); } }; auto bag = m->entity()->bag()->at(n); for (auto o : coco::dependent_objects(bag)) { if (auto feature = o->asFeature()) { if (candidate(feature)) { assign_group(feature); } } } for (const auto &g : groups) { auto group = g.get(); for (const auto child : group->children()) { subst(child, group->parent()); assert(child->def() == nullptr); assert(child->uses()->size() == 0); m->entity()->object()->destroy(child); } } } } } // namespace enco
/** * \function igraph_sparsemat_count_nonzerotol * Count nonzero elements of a sparse matrix, ignoring elements close to zero * * Count the number of matrix entries that are closer to zero than \p * tol. * \param The input matrix, column-compressed. * \param Real scalar, the tolerance. * \return Error code. * * Time complexity: TODO. */ long int igraph_sparsemat_count_nonzerotol(igraph_sparsemat_t *A, igraph_real_t tol) { int i, n; int res = 0; igraph_real_t *ptr; IGRAPH_CHECK(igraph_sparsemat_dupl(A)); ptr = A->cs->x; n = A->cs->nz == -1 ? A->cs->p[A->cs->n] : A->cs->nz; if (n == 0) { return 0; } for (i = 0; i < n; i++, ptr++) { if (*ptr < - tol || *ptr > tol) { res++; } } return res; }
Australian Medical Students' Association History In 1960, medical students from around Australia met in Brisbane for their first national conference. Never before had Australian medical students come together as one body to discuss ideas, share information and voice their opinions and concerns. Since then, the Australian Medical Students' Association has grown and evolved into one of Australia's largest student representative bodies. AMSA National Convention AMSA National Convention is the largest student-run conference in the world and brings together 1,000 medical students from across Australia and New Zealand. Since its inception in 1960, Convention has grown and evolved to be the highlight of the medical student calendar. The Convention program consists of 4 inspiring academic days, one sports day including the annual Emergency Medical Challenge and 5 themed social evenings culminating in the Gala Ball. In its 61st year, Convention will descend on Melbourne from the 29th of June 2020. AMSA Global Health Conference Originally known as the Developing World Conference, and established in 2005 by members of the Australian Medical Students' Association, the Global Health Conference is a meeting of medical students from around Australia that aims to fulfill the desire of medical students to discuss broader issues relevant to global health. The inaugural conference hosted 200 delegates in Sydney, whilst the 2006 conference in Perth saw more than 250 students attend. In 2007, Adelaide took the conference to new heights, hosting over 300 delegates at an event that sold out nationwide in less than 12 hours. The academic programme is filled with speakers from many disciplines, including workers from Medecins Sans Frontieres, Health Advisors to Non-for-profit organisations, Politicians and many many more; while the social programme allows students to meet like-minded individuals from around the country. The Global Health Conference aims to educate and empower medical students with knowledge about global health. The 2019 AMSA Global Health Conference was held in Sydney, NSW and the 2020 Conference will be in Gold Coast, QLD. AMSA National Leadership Development Seminar AMSA's National Leadership Development Seminar (NLDS) is held each year in the nation's capital, Canberra. Running over three days, the seminar is an exclusive conference on the medical student calendar with only 80-100 students selected to attend. The seminar aims to bring together some of the brightest minds and challenge and develop their leadership and opinions. Vampire Cup The Vampire Cup is AMSA's national blood drive run in association with the Australian Red Cross. The 22 Australian medical societies compete for the prestigious cup with the society with the highest number of donations per capita being awarded victory. In 2015, there were 1797 donations nationally with Deakin University Medical Society (MeDUSA) winning the Vampire Cup with 249 donations, the highest number of donations in the history of Vampire Cup. This is the fifth year in a row that Deakin has claimed the Cup.
How Important Is Health Inequality for Lifetime Earnings Inequality? Health and earnings are positively correlated and this is for several reasons. First, individuals who are in poor health are significantly less likely to work than healthy individuals. Second, conditional on working, individuals in poor health work fewer hours on average. Third, individuals in poor health earn lower wages on average. We document these facts using an objective measure of health called a frailty index which we construct for PSID respondents. The frailty index measures the fraction of observable health deficits an individual has. In previous work, we documented that health, as measured by the frailty index, deteriorates more rapidly and has a larger increase in dispersion with age than self-reported health. It is also more persistent over the life-cycle. These facts put together suggest that health inequality over the life cycle may be an important driver of lifetime earnings inequality. To assess this claim we develop a model of the joint dynamics of health and earnings over the life cycle. Individuals in the model face health, earnings and unemployment risk, and optimally choose labor supply on both the intensive and extensive margin. Agents are partially insured against these risks through government-run unemployment and disability insurance programs. We give agents in the model a dynamic process for frailty (health) that is estimated using the PSID data. Because of selection concerns, agents' productivity processes, including the contribution of frailty to productivity, are estimated using the model and a method of moments estimation. Targeted moments are constructed off distributions of wages, hours, and participation by frailty and age. These distributions are obtained from an auxiliary simulation model that is estimated using PSID data. We find that health inequality can account for a significant share of the variation in lifetime earnings among 70 year-olds. Most of this effect is due to the fact that unhealthy individuals exit the labor force at much younger ages than healthy ones. We find that health inequality has a larger impact on earnings inequality than previous literature for two reason. One, our model is the first in this literature that allows health to impact earnings through all three margins: participation, hours, and wages (productivity). Two, previous literature measured health using self-reported health status and thus understated the extent to with health deteriorates with age for some individuals.
package profile import ( "bytes" "fmt" "sort" "strings" "text/tabwriter" "time" ) // Profile is a collection of profiling events that were collected by a // profiler. type Profile struct { Events []Event } func (p Profile) String() string { var buf bytes.Buffer w := tabwriter.NewWriter(&buf, 5, 4, 3, ' ', tabwriter.AlignRight) evts := p.Events sort.Slice(evts, func(i, j int) bool { return strings.Compare(evts[i].Name, evts[j].Name) < 0 }) firstEvt, lastEvt := evts[0], evts[0] for _, evt := range evts { if evt.Start.Before(firstEvt.Start) { firstEvt = evt } if evt.Start.After(lastEvt.Start) { lastEvt = evt } } buckets := make(map[string][]Event) for _, evt := range evts { buckets[evt.Name] = append(buckets[evt.Name], evt) } _, _ = fmt.Fprintf(w, "%v\t\t%v\t%v\t%v\t%v\t\n", "event", "calls", "min", "avg", "max") for _, bucketEvts := range buckets { totalDuration := 0 * time.Second minBucketDur := bucketEvts[0].Duration maxBucketDur := bucketEvts[0].Duration for _, bucketEvt := range bucketEvts { totalDuration += bucketEvt.Duration if bucketEvt.Duration < minBucketDur { minBucketDur = bucketEvt.Duration } if bucketEvt.Duration > maxBucketDur { maxBucketDur = bucketEvt.Duration } } avgBucketDur := totalDuration / time.Duration(len(bucketEvts)) _, _ = fmt.Fprintf(w, "%v\t\t%v\t%v\t%v\t%v\t\n", bucketEvts[0].Name, len(bucketEvts), minBucketDur, avgBucketDur, maxBucketDur) } _ = w.Flush() return buf.String() }
#include "gramma.h" #include <iostream> using namespace std; void showDeveloper(); int main() { showDeveloper(); GrammaTable gt; string t; cout << "Please input grammas.\n"; cout << "Input blank line to stop input.\n"; getline(cin, t); bool flag = true; while (t.length() && flag) { if (gt.insert(t)) { flag = false; cout << "Invalid input at line: " << gt.currentLineCount() << ".\n"; } else { getline(cin, t); } } gt.generate(); cout << "Output:\n"; gt.output(); flag = true; while (flag) { cout << "\nInput a line to parse, input blank line to stop.\n"; getline(cin, t); if (t.length()) { gt.parse(t); } else { flag = false; } } system("pause"); } void showDeveloper() { cout << "*************************************************************\n" << " Compiler-Grammatical-Analyzer(CGA)\n" << " Written By DiscreteTom\n" << " See source code and report BUG at\n" << "https://github.com/DiscreteTom/Compiler-Grammatical-Analyzer\n" << "*************************************************************\n\n"; }
Review of Preoperative Magnetic Resonance Imaging (MRI) in Breast Cancer: Should MRI Be Performed on All Women with Newly Diagnosed, Early Stage Breast Cancer? Randomized controlled trials have shown equivalent survival for women with early stage breast cancer who are treated with breastconservation therapy (local excision and radiotherapy) or mastectomy. Decades of experience have demonstrated that breastconservation therapy provides excellent local control based on defined standards of care. Magnetic resonance imaging (MRI) has been introduced in preoperative staging of the affected breast in women with newly diagnosed breast cancer because it detects additional foci of cancer that are occult on conventional imaging. The median incremental (additional) detection for MRI has been estimated as 16% in metaanalysis. In the absence of consensus on the role of preoperative MRI, we review data on its detection capability and its impact on treatment. We outline that the assumptions behind the adoption of MRI, namely that it will improve surgical planning and will lead to a reduction in reexcision surgery and in local recurrences, have not been substantiated by trials. Evidence consistently shows that MRI changes surgical management, usually from breast conservation to more radical surgery; however, there is no evidence that it improves surgical care or prognosis. Emerging data indicate that MRI does not reduce reexcision rates and that it causes false positives in terms of detection and unnecessary surgery; overall there is little highquality evidence at present to support the routine use of preoperative MRI. Randomized controlled trials are needed to establish the clinical, psychosocial, and longterm effects of MRI and to show a related change in treatment from standard care in women newly affected by breast cancer. CA Cancer J Clin 2009;59:290302. © 2009 American Cancer Society, Inc. Introduction The remarkable evolution of breast cancer surgery from the radical mastectomy advocated by William Halsted to cosmetically appealing breast conservation has been championed by women and pioneering surgeons, 1 and the safety and benefits of this approach have ultimately been confirmed through high-level scientific evidence. Randomized trials and subsequent experience with breast-conservation therapy (local excision and radiotherapy) have clearly established its efficacy, with a low, long-term risk of local (in-breast) recurrence, typically 0.5% to 1% per year. The increasing adoption of performing preoperative magnetic resonance imaging (MRI) scans in women newly diagnosed with early stage (stage I and II) breast cancer, for the purpose of identifying additional (occult) foci of disease within the affected breast, has been based on assumptions that MRI's detection capability in this setting will improve surgical treatment (and, hence, outcomes) in the absence of evidence of incremental clinical benefit. These assumptions, which are further discussed in relation to available evidence, are that: preoperative MRI will improve surgical planning (or precision), thus leading to a reduction in re-excision surgery, and MRI will reduce in-breast recurrences by guiding surgical intervention for MRI-detected additional disease. Emerging data show that this approach to local staging of the breast leads to more women being treated with mastectomy without improvement in surgical outcomes or prognosis. Thus, it is timely and imperative to consider both current evidence and a way forward in determining the role of preoperative MRI. We review the data on breast MRI in women newly affected by breast cancer, highlighting the evidence on its potential benefit and associated harm, and discuss implications for management of women with early stage disease. We systematically discuss each of the key issues relevant to addressing the clinical question raised in the title of our article, based on summaries of published data, complemented by our perspectives on the evidence in the context of contemporary standards of care in breast cancer. Background Since the 1970s, studies have demonstrated that women with early stage breast cancer who are felt to have a single and resectable tumor as determined by clinical examination and conventional imaging have additional foci of cancer (foci other than the index cancer) on histology in about 20% to 60% of affected breasts. The multifocal (additional cancer in the same quadrant as the index cancer) and multicentric (additional cancer in a different quadrant to the index cancer) nature of breast cancer has long been identified by pathologists. However, the equivalence of mastectomy and breast-conserving therapy, which uses wide excision of the apparent tumor to achieve tumor-free margins, followed by breast radiation for long-term outcomes, such as metastasis-free and overall survival, has been established in several randomized controlled trials with decades of followup. These trials were based on conventional assessment (clinical, mammographic, and pathology correlation) with the assumption (and demonstration in randomized controlled trials) that unrecognized foci of disease would be eradicated by subsequent adjuvant radiotherapy. These randomized trials, and results from uncontrolled trials from several institutions, have demonstrated that the risk of local (inbreast) recurrence at 10 years after breast-conserving surgery and radiation is usually less than or equal to 10%. In centers with a decade or more of experience with breast-conserving therapy, this risk rarely exceeds 5%. 10 -15 The ability to achieve long-term, local control in women with early stage breast cancer who opted to have breast conservation and radiation therapy was demonstrated well before the introduction of breast MRI. Magnetic Resonance Imaging Remarkable advances in MRI technology have allowed sensitive detection and anatomic definition of cancer, and the introduction of MRI in several aspects of breast cancer diagnosis and management. These situations include screening women at high risk of breast cancer, 16,17 selective "problem-solving", or adjunct diagnosis where standard clinical and imaging evaluation do not provide a clear diagnosis, 18 imaging of breast silicone implants, 19 and monitoring response to neoadjuvant (primary), systemic therapy in locally advanced disease. 18 In this overview, we focus on MRI in the specific setting of preoperative evaluation of women who are considering breastconserving treatment after having received an established, new diagnosis of early stage breast cancer, and the application of MRI to identify additional foci of disease other than the proven index cancer (MRIdetected multifocal and/or multicentric cancer). This approach to staging the affected breast has been increasingly adopted in countries that have developed health care systems, and there are very divergent views on the merits and ramifications of preoperative MRI. 19 -23 Furthermore, this approach is now being extended to screen the contralateral, clinically unaffected breast. 24 Although we applaud any intervention, be it preventive, diagnostic, or therapeutic, that improves care and/or prognosis in breast cancer, we have concerns that the adoption of preoperative breast MRI has been based on assumptions (as outlined in the Introduction) rather than on evidence of improved patient outcomes. These concerns, and accumulating evidence on the potential for MRI to lead to worse clinical outcomes by virtue of unnecessary, more radical, breast surgery, are discussed to help clinicians judge the evidence on preoperative MRI and to clarify why we believe that randomized controlled trials are needed in this clinical context. Breast-Conserving Therapy Confers Equivalent Survival to Mastectomy For the first 80 years of the last century, mastectomy was considered the treatment of choice for women with newly diagnosed, early stage breast cancer. The long-term safety of breast-conserving surgery in the treatment of breast cancer has since been proven for more than 3 decades in randomized controlled trials and in meta-analysis of trials. 27 These have all shown equivalent long-term survival in women treated with breast-conserving surgery or mastectomy. 27 Primary tumor excision alone has, for most patient groups, been associated with high risks of local (in-breast) recurrence. Randomized controlled trials that compared breast-conserving surgery alone to breast-conserving surgery with radiation therapy have shown that the risk of local recurrence in those patients who receive radiotherapy is significantly reduced; an overview of all existing trials shows that radiation therapy provides a 70% proportional reduction in local recurrences with a 10-year risk of local recurrence of approximately 10%. 28 Furthermore, these trials demonstrate a small, but significant, reduction in mortality for the patients who received radiation. 28 Ten-year, local, recurrence rates of 5% to 10% are now reported in nonrandomized studies in many settings. 10 -15 In this regard, adjuvant radiotherapy plays a key role in achieving local control in women treated with breast-conserving surgery. Thus, the goal of breast-conserving therapy (breastconserving surgery with adjuvant radiotherapy) is to achieve good local control, and to provide women who wish to conserve their breast a good cosmetic outcome. Standards of care for breast cancer have been defined in clinical guidelines and at consensus meetings, have been advocated by experts, 29 -34 and pro-vide women with early stage disease a choice between breast-conserving surgery and mastectomy. Selection for breast-conserving surgery is assisted by a large body of knowledge on clinical and histological factors that may increase risk of in-breast recurrence despite radiation and may help identify women who may not receive the best outcome from breast-conserving surgery. These include involvement of surgical margins, the presence of extensive cancer identified clinically and/or on mammography, or the presence of locally advanced cancer. 29, When histology demonstrates cancer cells at the margins of the initial excision (surgical biopsy), then treatment may include re-excision surgery followed by radiotherapy. When clear margins cannot be achieved, mastectomy may be indicated. Whereas breast conservation confers the same survival outcomes as mastectomy, it is associated with a higher (but generally low) risk of local recurrence than mastectomy. 5,6 Breast-conserving surgery has the advantage of improved psychosocial health in relation to body image and sexuality. 38 -40 Therefore, a strong recommendation for mastectomy over breast-conserving surgery, or the introduction of interventions leading to forgoing the opportunity for breast-conserving surgery, must be given very carefully and should be based on evidence that this will improve clinical outcomes. (Figs. 1-3). Metaanalysis of all observational studies of preoperative MRI has shown that the median prevalence of detection of additional foci of cancer within the affected breast is 16% (interquartile range, 11% to 24%) based on 2,610 women with recently diagnosed cancer. 25 In Table 1, we present evidence for MRI's incremental (additional) detection on the basis of studies that have quantified both detection of additional cancer foci (ordered from the highest to the lowest proportion of incremental detection) and their distribution in the affected breast (multifocal or multicentric). 41,42,45,46,53-56,58 -61 Experts 20,21 have pointed out that the detection of additional malignant disease by MRI parallels the distribution that has been recognized in landmark histological studies, namely that the vast majority of additional cancer foci are within the same quadrant as the index cancer. 3 If this were the case, then it would be an indicator that MRI is unlikely to contribute to improvement of clinical outcomes, because the additional MRI-detected disease would reflect that which has been treated successfully with radiation therapy for almost 4 decades. Our summary of the evidence (Table 1) shows diverse estimates of percentages of patients in whom additional, multifocal, or multicentric cancer was detected by preoperative breast MRI. In-cremental MRI-only detection varies between approximately 1% and 28% for multifocal cancer and between 2% and 15% for multicentric cancer. 41,42,45,46,53-56,58 -61 It may be argued that the variability in reported data for MRI's detection of additional cancer foci is a reflection of changes in MRI technology. In a recent systematic review, Warren et al 62 examined MRI technical parameters (such as slice thickness, or number of sequences after administration of contrast medium) and found that neither technical variables nor study time-frame were significantly associated with MRI's incremental detection in this clinical context. 62 It is these findings on MRI's detection capability that have led to widespread adoption of preoperative MRI in early stage breast cancer, because, in theory, detection and removal of these previously unrecognized cancer deposits would lead to improved outcomes, either with regard to surgical planning or with regard to fewer locoregional recurrences, or even fewer distant metastases and deaths. However, in addition to detecting previously occult cancer foci, MRI is also associated with false-positive findings-the pooled estimate of true-positive to falsepositive MRI-detection was 1.9:1 in a recent overview. 25 Thus, regardless of the clinical significance of the cancer deposits detected by MRI, patients should be informed of the increased costs and the additional diagnostic procedures (which may include further imaging, needle and/or surgical biopsy, or secondopinion consultations) that this approach will entail. Women should also be informed of surgical implications including potential impact on cosmetic outcome, as will be further outlined. The Impact of Preoperative MRI on Surgical Treatment and Planning Because of the enhanced detection of previously occult tumor deposits compared with routine imaging evaluation, MRI has been applied in preoperative breast staging, particularly in North America and in some European countries. MRI has been adopted in this setting on the basis of assumptions outlined earlier in this article, despite the absence of evidence demonstrating clinical benefit. Specifically, the assumption that MRI will improve surgical care by helping to plan the extent of local resection of the tumor, thus avoiding the need for re-excision surgery, is not supported by data. In Table 2, we summarize the evidence from studies reporting surgical outcomes attributable to preoperative MRI in women with newly diagnosed breast cancer. Data on the effect of MRI on re-excision surgery is limited to evidence from 1 randomized controlled trial 63 and 2 observational (retrospective) studies. 26,64 None of these studies demonstrated that preoperative MRI improves surgical planning or precision as shown in the data ( Table 2). The only evidence from a randomized trial on the impact of MRI on surgical planning comes from 1 randomized controlled trial that was designed to measure the effect of MRI on re-excision rates as its primary endpoint, and which has been reported only in abstract form. 63 In this trial (COMICE, Comparative Effec- 3. The same patient, a woman aged 43 years who had a nodular density on screening mammography, had contrast-enhanced MRI, which showed the proven index cancer (larger on MRI than other imaging) as well as a second, smaller mass (circled) about 5 cm more anterior and lateral than the index cancer. This patient was treated with mastectomy. Histology of the mastectomy specimen confirmed the index cancer, an ILC with LCIS measuring 3 cm, and the second MRI-only-detected tumor was a 1-cm ILC. Sentinel-node biopsy and 4 nonsentinel nodes were negative for metastases. The MRI technique was performed to obtain, after intravenous injection of 0.1 mmol/kg Magnevist, sagittal maximum-intensity pixel projection (MIP), 3-dimensional (3-D) reconstructions of the breast from the second postcontrast subtraction images. Subtraction images were acquired 90 seconds after contrast administration. A 3-D, T1-weighted, fat-suppressed sequence (TR 6.3, TE1.5) was obtained with a slice thickness of 1 mm (512 512 matrix; 20-cm field of view ). Images were provided by Wendie Berg, MD, PhD. tiveness of MRI in Breast Cancer), 1,625 women scheduled for breast-conserving surgery were randomly assigned to preoperative evaluation with MRI or not. 63 Re-excision rates were almost the same 63 in women randomized to receive conventional assessment (19.3%) or to receive MRI in addition to conventional assessment (18.8%); P.8. Two additional, nonrandomized studies have also recently reported that MRI was not associated with a significant reduction in positive margins after local excision 26,64 (Table 2). In 1 of these *All studies were nonrandomized: P, prospective; R, retrospective; NR, not reported and unclear. Subject total in denominator may differ from number of initial subjects in each study because of eligibility for inclusion in analysis of MRI-only incremental detection, as defined by Houssami et al. 25 Cases with additional detection in both the same and a different quadrant are included as multicentric. §Two cases not clearly specified in the same quadrant but are probably multifocal. Estimate may include 2 subjects with lesions in the contralateral breast; however, these have not been included in numbers for multifocal or multicentric detection. The above findings are underscored by data from a recent meta-analysis of published, nonrandomized studies of MRI in preoperative breast evaluation. 25 This meta-analysis emphasized that MRI evaluation of the affected breast in women newly diagnosed with breast cancer may increase potentially unnecessary surgery ( Table 2). 25 Pooled estimates of the impact of MRI on surgical treatment, based on change in surgery attributed to MRI-detection in 12 primary studies 41-44,46,48 -54 (providing data for 1,908 women) showed that 11.3% (95% CI, 6.8 -18.3) had more extensive surgery than initially planned, either mastectomy or wider resection of the preserved breast. 25 Although perhaps less drastic than unnecessary mastectomy, the latter may compromise cosmetic outcome, because this is significantly affected by the volume of tissue removed. 29,38 Pooled estimates showed that 8.1% (95% CI, 5.9 -11.3) of all women eligible for breast-conserving surgery were treated with mastectomy because of MRI-only detection of additional disease. 25 A further 5.5% (95% CI, 3.1-9.5) of women had more extensive surgery (wider excision or mastectomy) because of false-positive findings on MRI including 1.1% (95% CI, 0.3-3.6) who were converted to mastectomy. 25 Table 2 illustrates the impact of MRI in individual studies and is partly based on this overview. 25 At the least, these pooled results suggest that if MRI is performed, the false-positive rate dictates that abnormal findings should be investigated with image-guided needle biopsy to establish a diagnosis before surgical treatment. REPORTED IMPACT ON SURGICAL PLANNING IN WOMEN WHO HAD ROUTINE ASSESSMENT VERSUS THOSE WHO ALSO HAD MRI FROM STUDIES OF WOMEN PLANNED FOR BREAST-CONSERVATION SURGERY Of note, the pooled estimates for incremental MRI detection and related change in surgical treatment are derived from random effects logistic-regression models. 25 This method takes into account both the withinstudy variability and the between-study variability; hence, smaller studies will have less weight in the overall estimate than larger studies. 25 Point estimates for MRIattributable conversion of surgery may, therefore, seem conservative relative to the study-specific data shown in Table 2. Study-specific data on conversion to mastectomy (counting both true-positive and false-positive MRI detection) range from 3.6% to 33.3% (Table 2), so the impact of MRI on surgical treatment will vary between breast services. In summary, with regard to surgical care, there is consistent evidence that MRI changes surgical management (generally from breast conservation to more radical surgery); however, there is no evidence that it improves surgical treatment or outcomes. The expectation that MRI would help decrease the need for re-excision surgery is, thus, not supported by existing evidence (based on a limited number of studies), whereas data in Table 2 demonstrate evidence of conversion to mastectomy attributed to preoperative MRI (based on a larger body of evidence). Overall, there is growing evidence that MRI does not improve surgical care, and it could be argued that it has a potentially harmful effect. Does Preoperative MRI Improve Long-Term Outcomes? The second assumption about preoperative MRI is that by identifying foci of cancer, which would have remained occult on the basis of standard assessment, and ensuring surgical removal of MRI-detected, additional disease, MRI will potentially reduce in-breast recurrence. This assumption has not been addressed in randomized trials, and data from 2 observational studies have reported conflicting findings. In a retrospective study, Fischer and colleagues 65 (University of Gottingen) reported that women evaluated by MRI had a significantly lower local recurrence rate (1.2%) at 40 months than women who had standard preoperative assessment (6.8%; P.01). This study is limited by imbalances in surgical treatment and adjuvant systemic therapy between the groups being compared, which would be expected to bias estimate-of-effect in favor of MRI. In a second, nonrandomized study, Solin and colleagues 53 (University of Pennsylvania) compared longer term outcomes in women who had or had not received preoperative MRI in a well-defined clinical cohort of women with early stage breast cancer. They observed that MRI staging was not associated with any differences in the 8-year rates for the following (had MRI vs did not have MRI): any local recurrence (3% vs 4%; P.51), local-only first site of recurrence (3% vs 4%; P.32), overall survival (86% vs 87%; P.51), cause-specific survival (94% vs 95%; P.63), or freedom from distant metastases (89% vs 92%; P.16). 53 Perspectives on the Evidence Our view is that the evidence on preoperative MRI indicates that it is of little benefit for the average woman with newly diagnosed, early stage breast cancer. It does not appear to improve surgical planning (as had been previously assumed), and there is very limited and inconsistent evidence on its long-term impact on clinical outcomes. Taken as a whole, there are 2 persistent concerns based on current evidence. First, the technical false positives cause unnecessary diagnostic biopsies that may compromise cosmesis and may further raise anxiety in patients who are already, by virtue of their recent diagnosis, under stress. Second, and perhaps more important, is the concern that although MRI detects previously unrecognized, but pathologically confirmed, cancer deposits, these deposits may be biologically and clinically irrelevant in a patient who will undergo standard excision and breast irradiation. Because the overall, long-term, localrecurrence rates for breast-conserving surgery using standard breast imaging and pathology criteria are routinely less than 10%, the 15% to 20% 25,41,42,45,46, detection rate of additional cancer foci reported in MRI series clearly underestimates or ignores the beneficial effects of postoperative breast irradiation, and overestimates the risk of subsequent in-breast recurrence if the test was not performed. The assumption that detection and surgical treatment of previously occult tumor deposits is beneficial also ignores the effect of systemic therapy, which has been shown to reduce the risk of local recurrence in women treated with breast-conserving surgery. 29 Pertinent to this discussion is that the small risk of in-breast relapse is present long term, with trials reporting cumulative incidence rates between 8.8% and 14.3% at 20-year follow-up. 5,6 Most of the inbreast recurrences occurring after the first 10 years post-breast-conserving therapy are believed to be new primary breast cancers 66 and not cancers recurring as a result of therapeutic failure. Because these late recurrences are not biologically present on initial diagnosis, an informed choice for women, some of whom may prefer to have mastectomy for their treatment to reduce the very small but relentless risk of in-breast recurrence, is unrelated to preoperative MRI evaluation. There may be a potential role for preoperative MRI in assisting patient selection for partial breast irradiation. However, at present, the results of randomized controlled trials evaluating the efficacy and long-term safety of partial breast irradiation as a therapeutic option in early stage breast cancer are unavailable. 67 What Are the Data for the Contralateral (Unaffected) Breast? We have focused on MRI in evaluation of the affected breast. In addition, the adoption of pretreatment MRI for screening the contralateral (unaffected) breast in newly diagnosed women warrants discussion. Studies have shown that MRI detects synchronous, contralateral breast cancer that is not detected clinically or with conventional imaging in approximately 1% to 18% of newly affected women. 24,41-44,49,50,52,53,68 -70 It is also associated with false-positive findings, 24,49,50,52,53, which may necessitate further imaging, needle biopsy, and/or surgical biopsy. In a systematic review of the evidence (22 observational studies) on MRI screening of the contralateral breast in 3,253 women with an established, invasive cancer of the affected breast, Brennan et al 71 recently identified the following outcomes: MRI-only-detected abnormalities (true-positives and false-positives) were identified in 9.3% (95% CI, 5.8 -14.7) of women. Less than half of these MRI-detected lesions were cancers, so the estimated, incremental cancer detection rate for contralateral breast cancer, based on pooled data, was 4.1% (95% CI, 2.7-6.0). cancers) showed that most of the MRI-detected, contralateral breast cancers were node-negative (pN0 17; pNmi 1; pNx 3). Mastectomy was performed in 10 women with a positive contralateral MRI who did not have a definitive diagnosis: 3 of these 10 women had malignancy in the mastectomy specimen; the remaining 7 had only benign breast changes. 71 Summary data reported by Brennan et al 71 confirm MRI's increased detection capability for the contralateral breast, although these authors cautioned that most primary studies in their systematic review did not include consecutive women, so their data were prone to selection bias. 71 Selection bias would be expected to overestimate MRI's detection yield. 71 The question of whether this "upfront" detection for the contralateral breast provides a clinical benefit cannot be answered from current evidence-there are several complex issues that factor into the interpretation of data on MRI in the context of contralateral breast cancer. The cumulative incidence rates for metachronous, contralateral breast cancer (in women with a past history of breast cancer) after 10 years of routine follow-up are less than or equal to 5% in contemporary series. 10,72 In one of the largest population studies of contralateral breast cancer with long-term follow-up, Gao et al 73 reported actuarial rates for contralateral breast cancer of 3% at 5 years, 6.1% at 10 years, 9.1% at 15 years, and 12% at 20 years. 74 This risk produces an annualized incidence rate for contralateral breast cancer in the range 0.4% to 0.6% in women with a history of breast cancer. The epidemiology of contralateral breast cancer is increasingly modified by the use of highly effective systemic therapies, including endocrine therapy (particularly in postmenopausal women) and chemotherapy, which may either prevent contralateral breast cancer or inhibit its progress. 72,74 The majority of MRI-detected contralateral breast cancer appears to be early stage disease as indicated in a recent overview of contralateral MRI in newly affected women, 71 and a considerable proportion is pure ductal carcinoma in situ. 71 Whether early detection of contralateral breast cancer in this specific scenario confers benefit in women whose prognosis may be largely determined by an established, invasive cancer is unknown. The only relevant evaluation, based on the observational study of Solin and colleagues, 53 found no significant difference in the 8-year rates of contralat-eral breast cancer between women who had or did not have preoperative breast MRI staging. If, as we discuss next, randomized trials of preoperative MRI are conducted, the incidence of contralateral breast cancer should be examined as a clinical endpoint allowing sufficient years of follow-up. Randomized controlled trials would also allow valid estimation of the effect of MRI of the contralateral breast on prognosis by removing the biases inherent in cancer screening, 75 specifically lead-time and length bias. Randomized Studies Are Needed Before We Modify Standards of Care in Breast Cancer The appearance of novel medical technologies, whether directed toward therapy or diagnosis, at times creates enthusiasm over potential clinical utility and adoption of the technology with the assumption, albeit without evidence, that clinical outcomes are improved. Demonstrating a test's detection yield or capability does not equate with evidence on clinical utility and does not constitute evidence that it improves clinical decisions or patient outcomes. The current article does not challenge the role of MRI in screening unaffected women with genetically high-risk profiles 16,17 or for specific clinical indications where it provides valuable information, 18,79 for example, in evaluation of women who present with axillary-node metastases without obvious, primary breast cancers on clinical or mammographic evaluation. 19 However, routine use of preoperative MRI in women with established, early stage breast cancer should be discouraged until (and if) high levels of evidence demonstrate that preoperative MRI either improves surgical care, reduces the number of required surgeries, or (more importantly) that it reduces at least local recurrence, if not distant metastases and death due to breast cancer. Appropriate evaluation of the impact of MRI in local staging of the breast should be determined through well-designed, randomized controlled trials to quantify potential benefit and harm, including careful evaluation of its impact on quality of life. One may argue that the incidence of in-breast recurrence is so low already that the size of such a trial would be prohibitive and impractical to conduct. We do not disagree with this perception, which could be stated as the primary reason that incorporation of MRI into routine management of patients contemplating breast-conserving therapy may be unwarranted. Surprisingly, preoperative MRI has already been incorporated into clinical practice in the absence of highlevel evidence of its clinical utility. We argue that, as for any new medical intervention in the evidencebased era, efforts should be directed to evaluations that generate high-level evidence to clearly define the role of MRI (if any) in this setting and to guide future practice. We, therefore, estimate that approximately 6,600 women would need to be accrued to a randomized controlled trial to determine whether use of preoperative MRI reduces 10-year local recurrence rates in early stage breast cancer by 20% (or 2,900 women for a 30% reduction), assuming a baseline 10% prevalence of local recurrence. If local recurrence rates are only 5%, the sample size would need to be 14,000 women (or 6,000 for a 30% reduction). If one wishes to use an even more important clinical endpoint, such as distant recurrence and/or breast-cancer mortality, the estimated numbers of required patients would be substantially higher. Alternately, it would be reasonable (and more feasible in terms of patient recruitment) to assume that randomized controlled trials demonstrating a relative reduction in localrecurrence rates of the magnitude hypothesized in the above estimates would provide a surrogate indicator of a reduction in breast-cancer deaths. We acknowledge that logistics and costs of conducting such large-scale, multicenter trials are enormous. If the technology is truly as beneficial as its proponents claim, then these costs are worth it. If it is not, then they are outweighed by the costs of adopting expensive technology and associated intervention without evidence of clinical benefit. The history of breast cancer treatment over the last century is riddled with examples in which expertsupported, presumably "better" treatment has been proven to be nonbeneficial or even harmful in appropriately designed trials. These include radical mastectomy versus modified radical mastectomy, mastectomy versus breast-conservation treatment, and high-dose chemotherapy with bone-marrow transplantation versus standard-dose chemotherapy, to name a few past dogmas in breast-cancer care. In each clinical scenario, the randomized controlled trial has provided answers that have guided the way toward evidence-based, effective patient care.
package com.mark.test.framework.api.dto; import org.apache.commons.lang3.builder.ReflectionToStringBuilder; import java.io.Serializable; /** * Created by mark . * Data : 2017/11/29 * Author : mark * Desc : */ public class DbCompareRequestDto implements Serializable { //原数据 private String sourceDbName; private String sourceDbIp; private String sourceDbPort; private String sourceDbUser; private String sourceDbPasswd; //目标数据 private String targetDbName; private String targetDbIp; private String targetDbPort; private String targetDbUser; private String targetDbPasswd; private String tableType; public String getSourceDbName() { return sourceDbName; } public void setSourceDbName(String sourceDbName) { this.sourceDbName = sourceDbName; } public String getTargetDbName() { return targetDbName; } public void setTargetDbName(String targetDbName) { this.targetDbName = targetDbName; } public String getTableType() { return tableType; } public void setTableType(String tableType) { this.tableType = tableType; } public String getSourceDbPort() { return sourceDbPort; } public void setSourceDbPort(String sourceDbPort) { this.sourceDbPort = sourceDbPort; } public String getSourceDbUser() { return sourceDbUser; } public void setSourceDbUser(String sourceDbUser) { this.sourceDbUser = sourceDbUser; } public String getSourceDbPasswd() { return sourceDbPasswd; } public void setSourceDbPasswd(String sourceDbPasswd) { this.sourceDbPasswd = sourceDbPasswd; } public String getTargetDbPort() { return targetDbPort; } public void setTargetDbPort(String targetDbPort) { this.targetDbPort = targetDbPort; } public String getTargetDbUser() { return targetDbUser; } public void setTargetDbUser(String targetDbUser) { this.targetDbUser = targetDbUser; } public String getTargetDbPasswd() { return targetDbPasswd; } public void setTargetDbPasswd(String targetDbPasswd) { this.targetDbPasswd = targetDbPasswd; } public String getSourceDbIp() { return sourceDbIp; } public void setSourceDbIp(String sourceDbIp) { this.sourceDbIp = sourceDbIp; } public String getTargetDbIp() { return targetDbIp; } public void setTargetDbIp(String targetDbIp) { this.targetDbIp = targetDbIp; } @Override public String toString() { return ReflectionToStringBuilder.reflectionToString(this); } }
// while conversation from JSON to object @Override public T read(JsonReader reader) throws IOException { T returnValue; try { returnValue = adapter.read(reader); } catch (UnknownHostException e) { throw new InvalidAddressException(); } if (returnValue instanceof IpAddress) { int port = ((IpAddress) returnValue).getPort(); if (port > 65535 || port < 1) { throw new InvalidAddressException(); } } return returnValue; }
<gh_stars>0 import pymel.core as pm import constant as con import pymel.core.datatypes as dt class Placer: def __init__(self, color = "cyan", side= "center", size= 1.0, pos= dt.Vector(0,0,0), n= "placer"): self.color = con.colour_dict[color] self.side = con.side_dict[side] #visit later (left side, right side, front, back, top, bottom) self.size = size #self.parent = #self.transform = self.shape = pm.sphere(po = 0, r = self.size ) pm.xform(self.shape, ws= True, t= pos)
/* * * Copyright (c) 2010 <NAME> <<EMAIL>> * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU Lesser General Public License as published by the Free * Software Foundation; either version 2.1 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more * details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ package actor.add; import java.net.URISyntaxException; import org.junit.Before; import org.junit.Test; import pcgen.cdom.base.UserSelection; import pcgen.cdom.content.CNAbilityFactory; import pcgen.cdom.enumeration.Nature; import pcgen.cdom.helper.CNAbilitySelection; import pcgen.cdom.reference.CDOMDirectSingleRef; import pcgen.core.Ability; import pcgen.core.AbilityCategory; import pcgen.core.Globals; import pcgen.core.Language; import pcgen.core.PlayerCharacter; import pcgen.core.SettingsHandler; import pcgen.persistence.PersistenceLayerException; import pcgen.rules.context.LoadContext; import pcgen.testsupport.AbstractCharacterUsingTestCase; import plugin.lsttokens.AddLst; import plugin.lsttokens.add.FeatToken; import plugin.lsttokens.testsupport.TokenRegistration; public class FeatTokenTest extends AbstractCharacterUsingTestCase { private static final AddLst ADD_TOKEN = new plugin.lsttokens.AddLst(); private static final FeatToken ADD_FEAT_TOKEN = new plugin.lsttokens.add.FeatToken(); static FeatToken pca = new FeatToken(); protected LoadContext context; @Override @Before public void setUp() throws PersistenceLayerException, URISyntaxException { SettingsHandler.getGame().clearLoadContext(); context = Globals.getContext(); context.getReferenceContext().importObject(AbilityCategory.FEAT); // new RuntimeLoadContext(new RuntimeReferenceContext(), // new ConsolidatedListCommitStrategy()); } @Test public void testEncodeChoice() { Ability item = construct("ItemName"); CNAbilitySelection as = new CNAbilitySelection(CNAbilityFactory.getCNAbility(AbilityCategory.FEAT, Nature.NORMAL, item)); assertEquals("CATEGORY=FEAT|NATURE=NORMAL|ItemName", pca .encodeChoice(as)); } @Test public void testDecodeChoice() { try { pca.decodeChoice(context, "CATEGORY=FEAT|NATURE=NORMAL|ItemName"); fail(); } catch (IllegalArgumentException e) { // OK } Ability item = construct("ItemName"); CNAbilitySelection as = new CNAbilitySelection(CNAbilityFactory.getCNAbility(AbilityCategory.FEAT, Nature.NORMAL, item)); assertEquals(as, pca .decodeChoice(context, "CATEGORY=FEAT|NATURE=NORMAL|ItemName")); } @Test public void testWithChoose() { try { setUpPC(); //Need to make sure we use the character related context context = Globals.getContext(); TokenRegistration.register(ADD_TOKEN); TokenRegistration.register(ADD_FEAT_TOKEN); } catch (PersistenceLayerException e1) { fail("Cannot set up PC"); } Ability item = construct("ChooseAbility"); Ability parent = construct("Parent"); context.getReferenceContext().constructCDOMObject(Language.class, "Foo"); context.getReferenceContext().constructCDOMObject(Language.class, "Bar"); context.getReferenceContext().constructCDOMObject(Language.class, "Goo"); context.getReferenceContext().constructCDOMObject(Language.class, "Wow"); context.getReferenceContext().constructCDOMObject(Language.class, "Rev"); AbilityCategory ff = context.getReferenceContext().constructCDOMObject(AbilityCategory.class, "Fighter Feat"); ff.setAbilityCategory(CDOMDirectSingleRef.getRef(AbilityCategory.FEAT)); AbilityCategory oc = context.getReferenceContext().constructCDOMObject(AbilityCategory.class, "Some Other Category"); Ability badCA = context.getReferenceContext().constructCDOMObject(Ability.class, "ChooseAbility"); context.getReferenceContext().reassociateCategory(oc, badCA); try { assertTrue(context.processToken(item, "CHOOSE", "LANG|Foo|Bar|Goo|Wow|Rev")); assertTrue(context.processToken(item, "MULT", "Yes")); assertTrue(context.processToken(badCA, "CHOOSE", "LANG|Foo|Bar|Goo|Wow|Rev")); assertTrue(context.processToken(badCA, "MULT", "Yes")); assertTrue(context.processToken(parent, "ADD", "FEAT|ChooseAbility")); } catch (PersistenceLayerException e) { e.printStackTrace(); fail(); } PlayerCharacter pc = new PlayerCharacter(); Object source = UserSelection.getInstance(); finishLoad(context); CNAbilitySelection badCACAS = new CNAbilitySelection(CNAbilityFactory.getCNAbility(oc, Nature.AUTOMATIC, badCA), "Foo"); CNAbilitySelection fooCAS = new CNAbilitySelection(CNAbilityFactory.getCNAbility(AbilityCategory.FEAT, Nature.AUTOMATIC, item), "Foo"); CNAbilitySelection barCAS = new CNAbilitySelection(CNAbilityFactory.getCNAbility(AbilityCategory.FEAT, Nature.VIRTUAL, item), "Bar"); CNAbilitySelection gooCAS = new CNAbilitySelection(CNAbilityFactory.getCNAbility(AbilityCategory.FEAT, Nature.NORMAL, item), "Goo"); CNAbilitySelection wowCAS = new CNAbilitySelection(CNAbilityFactory.getCNAbility(AbilityCategory.FEAT, Nature.NORMAL, item), "Wow"); CNAbilitySelection wowFFCAS = new CNAbilitySelection(CNAbilityFactory.getCNAbility(ff, Nature.NORMAL, item), "Wow"); CNAbilitySelection revCAS = new CNAbilitySelection(CNAbilityFactory.getCNAbility(AbilityCategory.FEAT, Nature.NORMAL, item), "Rev"); CNAbilitySelection revFFCAS = new CNAbilitySelection(CNAbilityFactory.getCNAbility(ff, Nature.NORMAL, item), "Rev"); assertTrue(pca.allow(fooCAS, pc, false)); assertTrue(pca.allow(barCAS, pc, false)); assertTrue(pca.allow(gooCAS, pc, false)); assertTrue(pca.allow(wowCAS, pc, false)); assertTrue(pca.allow(revFFCAS, pc, false)); pc.applyAbility(badCACAS, source); //Should have had no effect assertTrue(pca.allow(fooCAS, pc, false)); assertTrue(pca.allow(barCAS, pc, false)); assertTrue(pca.allow(gooCAS, pc, false)); assertTrue(pca.allow(wowCAS, pc, false)); assertTrue(pca.allow(revFFCAS, pc, false)); pc.applyAbility(fooCAS, source); assertFalse(pca.allow(fooCAS, pc, false)); assertTrue(pca.allow(barCAS, pc, false)); assertTrue(pca.allow(gooCAS, pc, false)); assertTrue(pca.allow(wowCAS, pc, false)); assertTrue(pca.allow(revFFCAS, pc, false)); pc.applyAbility(barCAS, source); assertFalse(pca.allow(fooCAS, pc, false)); assertFalse(pca.allow(barCAS, pc, false)); assertTrue(pca.allow(gooCAS, pc, false)); assertTrue(pca.allow(wowCAS, pc, false)); assertTrue(pca.allow(revFFCAS, pc, false)); pc.applyAbility(gooCAS, source); assertFalse(pca.allow(fooCAS, pc, false)); assertFalse(pca.allow(barCAS, pc, false)); assertFalse(pca.allow(gooCAS, pc, false)); assertTrue(pca.allow(wowCAS, pc, false)); assertTrue(pca.allow(revFFCAS, pc, false)); pc.applyAbility(wowFFCAS, source); assertFalse(pca.allow(fooCAS, pc, false)); assertFalse(pca.allow(barCAS, pc, false)); assertFalse(pca.allow(gooCAS, pc, false)); assertFalse(pca.allow(wowCAS, pc, false)); assertTrue(pca.allow(revFFCAS, pc, false)); pc.applyAbility(revCAS, source); assertFalse(pca.allow(fooCAS, pc, false)); assertFalse(pca.allow(barCAS, pc, false)); assertFalse(pca.allow(gooCAS, pc, false)); assertFalse(pca.allow(wowCAS, pc, false)); assertFalse(pca.allow(revFFCAS, pc, false)); } protected Ability construct(String one) { Ability obj = context.getReferenceContext().constructCDOMObject(Ability.class, one); context.getReferenceContext().reassociateCategory(AbilityCategory.FEAT, obj); return obj; } }
/// This will crash the process when the value_offset doesn't point to committed memory. /// While somewhat extreme, it is safe. pub extern fn pfex_release(lock_offset: u32, vmctx: &VmCtx) { let lock_ptr: *const Atomic<u32> = vmctx.fastpath_offset_ptr(lock_offset); let lock = unsafe { &*lock_ptr }; let user_data = &vmctx.data().user_data; let mut pfex_map = user_data.process.pfex_map().lock(); let locked = lock.load(Ordering::Relaxed); if locked != 0 { lock.store(0, Ordering::Release); if let Some(queue) = pfex_map.remove(&lock_offset) { unsafe { while let Some(thread) = queue.pop() { (*thread).resume(); } } } } // at this point, the pfex is unlocked }
package design.boilerplate.springboot.security.dto; import design.boilerplate.springboot.model.UserRole; import lombok.Getter; import lombok.NoArgsConstructor; import lombok.Setter; import lombok.ToString; import javax.validation.constraints.Email; import javax.validation.constraints.NotEmpty; import javax.validation.constraints.NotNull; /** * * * @author <NAME> */ @Getter @Setter @ToString @NoArgsConstructor public class RegistrationRequest { // @NotEmpty(message = "{registration_name_not_empty}") // private String name; @Email(message = "{registration_email_is_not_valid}") @NotEmpty(message = "{registration_email_not_empty}") private String email; @NotEmpty(message = "{registration_username_not_empty}") private String username; @NotEmpty(message = "{registration_password_not_empty}") private String password; @NotNull(message = "{registration_role_not_empty}") private UserRole role; }
In recent years it has become increasing popular to cultivate marine species in controlled settings. This cultivation, which is commonly referred to as aquaculture, has allowed the production of a variety of marine species for human consumption. Increasingly, many edible fishes have been produced using aquaculture. While aquaculture has shown marked technological increases, to support the growth of this industry, it is necessary to produce an artificial feed or to increase the harvest of naturally occurring foodstuff such as Artemia, Brachinus salina, Daphnia, etc. All marine life in the seas ultimately depends on microscopic algae for their growth or the growth of their food within the marine food chain. This microscopic algae, which is the first link within the marine food chain, is directly consumed by filter feeders such as shell fish, and indirectly through the complex food chain within the sea by the rest of marine life. Algae grows very slowly, however, as they only divide approximately once a day and therefore they are not easily available. This lack of availability contributes to a significant increase in the cost of aquaculture products. Attempts to replicate or replace natural foodstuff within aquaculture have been met with limited success. In U.S. Pat. No. 5,158,788 to Lavens et al. (“Lavens”), a method is described to produce a feed for aquaculture from yeast. Lavens entails a multi-step process in which yeast cells are processed by hydrolyzing its cell wall producing a digestible feed for aquaculture. Unfortunately, the multi-step process as suggested by Lavens is labor intensive and therefore not feasible economically. Most importantly, the destruction of the cell wall that is needed to produce this artificial feed causes pollution of the aquaculture by the cell materials contained within the yeast cell. One significant natural food source within the marine food chain that feeds off microscopic algae is Artemia. Artemia commonly referred to as brine shrimp is an excellent foodstuff for aquaculture, because of its position within the marine food chain and its desirability as a food source for higher members of marine culture. They are an excellent food for aquaculture, because unlike prior art foods in aquaculture they do not undergo putrefaction by microorganisms and foul water used in aquaculture, but rather they clear the water of fouling micro-organisms. It is commonly known that Artemia can be used as a feed for species such as shrimp, fishes, etc. The natural harvesting of Artemia for their use in aquaculture, however, is subject to environmental factors that have recently led to shortages. Artemia grow in large saline lakes such as the Great Salt Lake in Utah. Artemia have been harvested in the Great Salt Lake for many years. Unfortunately, recent harvests have been poor and the cost of Artemia cysts has increased more than three fold. It is thought that these recent poor harvests have been caused by changing weather patterns. Severe climatic disturbances caused by the warm weather and excessive rainfall that accompanied El Nino caused production levels of the Artemia from the Great Salt Lake to decrease dramatically. The harvest of Artemia cysts in 1995-96 and 1996-97 was approximately 15 million pounds gross weight. Of this total harvest only about fifty percent is suitable for use. The 1997-98 harvest was only approximately 6 million pounds gross weight. Decreases in harvesting of Artemia cysts, such as in the case of El Nino, not only cause problems with availability, but also sharp increases in the price of Artemia cysts. This sharp increase in cost makes the use of Artemia as a feed for aquaculture economically impermissible. Although there are several other sources of Artemia throughout the world, the Great Salt Lake provides more than ninety percent of the world's Aretmia cyst consumption. While additional sources of Artemia have been found in Russia, Turkey, and China, these additional sources have not offset the declining harvest of the Great Salt Lake. Various methods of producing Artemia within aquaculture have been explored such as in Vietnam, where Artemia have been growth in ponds having abundance of natural algae. In Hawaii, yeast and greenwater have been used to grow Artemia and rotifers for seahorse and Asian sea bass aquaculture. Unfortunately, these various efforts have been met with limited success as these prior methods of producing Artemia are labor extensive and not economically feasible.
<gh_stars>0 /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include <aws/license-manager/LicenseManager_EXPORTS.h> #include <aws/license-manager/model/ReportFrequencyType.h> #include <utility> namespace Aws { namespace Utils { namespace Json { class JsonValue; class JsonView; } // namespace Json } // namespace Utils namespace LicenseManager { namespace Model { /** * <p>Details on how frequently reports are generated.</p><p><h3>See Also:</h3> * <a * href="http://docs.aws.amazon.com/goto/WebAPI/license-manager-2018-08-01/ReportFrequency">AWS * API Reference</a></p> */ class AWS_LICENSEMANAGER_API ReportFrequency { public: ReportFrequency(); ReportFrequency(Aws::Utils::Json::JsonView jsonValue); ReportFrequency& operator=(Aws::Utils::Json::JsonView jsonValue); Aws::Utils::Json::JsonValue Jsonize() const; /** * <p>Number of times within the frequency period that a report will be generated. * Currently only <code>1</code> is supported.</p> */ inline int GetValue() const{ return m_value; } /** * <p>Number of times within the frequency period that a report will be generated. * Currently only <code>1</code> is supported.</p> */ inline bool ValueHasBeenSet() const { return m_valueHasBeenSet; } /** * <p>Number of times within the frequency period that a report will be generated. * Currently only <code>1</code> is supported.</p> */ inline void SetValue(int value) { m_valueHasBeenSet = true; m_value = value; } /** * <p>Number of times within the frequency period that a report will be generated. * Currently only <code>1</code> is supported.</p> */ inline ReportFrequency& WithValue(int value) { SetValue(value); return *this;} /** * <p>Time period between each report. The period can be daily, weekly, or * monthly.</p> */ inline const ReportFrequencyType& GetPeriod() const{ return m_period; } /** * <p>Time period between each report. The period can be daily, weekly, or * monthly.</p> */ inline bool PeriodHasBeenSet() const { return m_periodHasBeenSet; } /** * <p>Time period between each report. The period can be daily, weekly, or * monthly.</p> */ inline void SetPeriod(const ReportFrequencyType& value) { m_periodHasBeenSet = true; m_period = value; } /** * <p>Time period between each report. The period can be daily, weekly, or * monthly.</p> */ inline void SetPeriod(ReportFrequencyType&& value) { m_periodHasBeenSet = true; m_period = std::move(value); } /** * <p>Time period between each report. The period can be daily, weekly, or * monthly.</p> */ inline ReportFrequency& WithPeriod(const ReportFrequencyType& value) { SetPeriod(value); return *this;} /** * <p>Time period between each report. The period can be daily, weekly, or * monthly.</p> */ inline ReportFrequency& WithPeriod(ReportFrequencyType&& value) { SetPeriod(std::move(value)); return *this;} private: int m_value; bool m_valueHasBeenSet; ReportFrequencyType m_period; bool m_periodHasBeenSet; }; } // namespace Model } // namespace LicenseManager } // namespace Aws
<gh_stars>0 import { EventEmitterType } from './EventEmitter'; type callbackType = (param: { [data: string]: any }) => void; const EventHandler = (emitter: EventEmitterType, audioCtx?: AudioContext) => ({ ready(callback: callbackType) { emitter.listener('decoded', callback); }, start(callback: callbackType) { emitter.listener('start', callback); }, end(callback: callbackType) { emitter.listener('end', callback); }, state(callback: callbackType) { if (audioCtx) { audioCtx.onstatechange = () => callback({ data: audioCtx.state }); } }, }); export default EventHandler;
Estimation of reference intervals from small samples: an example using canine plasma creatinine. BACKGROUND According to international recommendations, reference intervals should be determined from at least 120 reference individuals, which often are impossible to achieve in veterinary clinical pathology, especially for wild animals. When only a small number of reference subjects is available, the possible bias cannot be known and the normality of the distribution cannot be evaluated. A comparison of reference intervals estimated by different methods could be helpful. OBJECTIVE The purpose of this study was to compare reference limits determined from a large set of canine plasma creatinine reference values, and large subsets of this data, with estimates obtained from small samples selected randomly. METHODS Twenty sets each of 120 and 27 samples were randomly selected from a set of 1439 plasma creatinine results obtained from healthy dogs in another study. Reference intervals for the whole sample and for the large samples were determined by a nonparametric method. The estimated reference limits for the small samples were minimum and maximum, mean +/- 2 SD of native and Box-Cox-transformed values, 2.5th and 97.5th percentiles by a robust method on native and Box-Cox-transformed values, and estimates from diagrams of cumulative distribution functions. RESULTS The whole sample had a heavily skewed distribution, which approached Gaussian after Box-Cox transformation. The reference limits estimated from small samples were highly variable. The closest estimates to the 1439-result reference interval for 27-result subsamples were obtained by both parametric and robust methods after Box-Cox transformation but were grossly erroneous in some cases. CONCLUSION For small samples, it is recommended that all values be reported graphically in a dot plot or histogram and that estimates of the reference limits be compared using different methods.
Diagnostic power of restingstate fMRI for detection of network connectivity in Alzheimer's disease and mild cognitive impairment: A systematic review Abstract Restingstate fMRI (rsfMRI) detects functional connectivity (FC) abnormalities that occur in the brains of patients with Alzheimer's disease (AD) and mild cognitive impairment (MCI). FC of the default mode network (DMN) is commonly impaired in AD and MCI. We conducted a systematic review aimed at determining the diagnostic power of rsfMRI to identify FC abnormalities in the DMN of patients with AD or MCI compared with healthy controls (HCs) using machine learning (ML) methods. Multimodal support vector machine (SVM) algorithm was the commonest form of ML method utilized. Multiple kernel approach can be utilized to aid in the classification by incorporating various discriminating features, such as FC graphs based on nodes and edges together with structural MRIbased regional cortical thickness and gray matter volume. Other multimodal features include neuropsychiatric testing scores, DTI features, and regional cerebral blood flow. Among AD patients, the posterior cingulate cortex (PCC)/Precuneus was noted to be a highly affected hub of the DMN that demonstrated overall reduced FC. Whereas reduced DMN FC between the PCC and anterior cingulate cortex (ACC) was observed in MCI patients. Evidence indicates that the nodes of the DMN can offer moderate to high diagnostic power to distinguish AD and MCI patients. Nevertheless, various concerns over the homogeneity of data based on patient selection, scanner effects, and the variable usage of classifiers and algorithms pose a challenge for MLbased image interpretation of rsfMRI datasets to become a mainstream option for diagnosing AD and predicting the conversion of HC/MCI to AD. | INTRODUCTION Alzheimer's disease (AD) is a neurodegenerative disorder that is characterized by a progressive decrease in cognitive function compared to baseline performance level in one or more cognitive domains that can interfere with the ability to independently carry out activities of daily living (American Psychiatric Association, 2013). Resting-state functional magnetic resonance imaging (rs-fMRI) is a neuroimaging tool used to study the aberrations in the functional activity of different brain networks, which normally occurs in AD and its prodromal condition, mild cognitive impairment (MCI; X. ). The functional connectivity (FC) of brain networks refers to inter-regional synchrony, as detected from low-frequency fluctuations in the blood oxygen level dependent (BOLD) fMRI sequence (L. Lee, Harrison, & Mechelli, 2003). FC and other functional features of AD are studied using different molecular imaging techniques such as electroencephalography (EEG), positron emission tomography-computed tomography (PET/CT), and fMRI. Various radiotracers such as glucose analogs and amyloid detecting radiotracers have been utilized for improving the diagnostic accuracy of detecting AD (Suppiah, Didier, & Vinjamuri, 2019). Of these techniques, fMRI remains the most widely used modality because of the relative simplicity of its usage, inherent safety features, noninvasive nature, and high spatial resolution (Mier & Mier, 2015). The default mode network (DMN) is the commonest brain network studied by rs-fMRI and is involved in memory consolidation tasks. It composed of the precuneus (Prec), posterior cingulate cortex (PCC), retro-splenial cortex, medial parietal cortex (MPC), lateral parietal cortex (LPC), and inferior parietal cortex (IPC), medial prefrontal cortex (mPFC), and the medial temporal gyrus (MTG; ). Fundamentally, AD patients suffer from impaired DMN connectivity (Grieder, Wang, Dierks, Wahlund, & Jann, 2018). There has been consistent evidence of decreased FC in the DMN of AD patients in comparison with HCs, especially between the posterior part of the cerebral cortex (Prec and PCC) and anterior parts, for example, the anterior cingulate cortex (ACC) and mPFC (;;). The observed decline in FC in areas within the DMN has also been reported among MCI patients (;Ouchi & Kikuchi, 2012). This indicates that rs-fMRI detected changes in the DMN can be a noninvasive diagnostic tool for diagnosing AD. In fact, the National Institute on Aging-Alzheimer's Association (NIAAA) has listed rs-fMRI FC as a potential biomarker of neuronal injury, which is at an early stage of validation (). There are several methods to analyze rs-fMRI data, namely, the seed-based analysis (SBA), the independent component analysis (ICA), and the graph theory analysis (GTA). SBA or small region of interest (ROI) analysis enables temporal correlations to be made between hypotheses-based predefined seed regions. The SBA investigates the FC of a specific brain region by correlating the brain region's restingstate time series with the time series of all other regions resulting in the creation of a FC map that identifies the FC of the predefined brain region (T. Jiang, He, Zang, & Weng, 2004). The simplicity and straightforwardness of this seed-dependent analysis coupled with the clarity of the FC map, makes it popular among researchers (). Nevertheless, the knowledge from an FC map is restricted to the FC of a pre-defined region that requires a priori knowledge, making it hard to analyze correlations of FC in whole brain regions. In contrast to SBA, ICA is free from any predefined seed region selection, which means one does not have to pick a seed or reference area beforehand. Hence, the entire BOLD signal is broken down to produce separate time courses and related spatial maps (De Luca, Smith, De Stefano, Federico, & Matthews, 2005). The resultant components are assumed to be non-Gaussian signals and are statistically independent of one another. ICA extracts FC information by detecting the patterns of synchronous neural activities between nodes without an a priori knowledge or pre-existing hypothesis. Thus, the signals from various nodes are temporally filtered from a sample dataset to assess the FC between two independent nodes, which is similar to the "cocktail party effect" (Li, Wang, Chen, Cichocki, & Sejnowski, 2017). The ICA algorithm assumes a set of maximally spatially independent brain components (S), each with associated time course signals (X). The model identifies latent sources whose elements (voxels) have the same time course and thus each component can be considered a measure of the degree to which each voxel is functional connected (correlated) to the component-time course. Due to its ability to accommodate whole-brain FC analysis, ICA is favored over SBA. Nevertheless, the disadvantage of ICA is that there is often difficulty in differentiating useful signals from noise and variations in the separate components. Hence, this causes challenges in making between-group comparisons using ICA (Fox & Raichle, 2007). Interestingly, both SBA and ICA can ultimately produce similar results if they are run at different experimental set-ups. Alternatively, GTA looks at the overall brain network structure with specific spatial information. Here, the BOLD signal undergoes spatial parcellation using a topological mapping of the entire brain, and the relationships between all pairs of activated regions involving "nodes" and "edges" are determined. A "node" is a defined area in the brain, whereas "edge" signifies the direct and indirect links or FC between two defined nodes. Additionally, a "hub" is a node that has an integrative role, which reflects the diversity of a region's crossnetwork FCs. A "hub" is defined as a node that has a betweenness centrality or eigenvector centrality (ECi) that is larger than the mean plus two standard deviations (mean + 2 SD) across all nodes in a particular region (Hojjati, Ebrahimzadeh, & Babajani-Feremi, 2019). The assessment of the relationship of nodes versus edges of the activated regions is achievable by forming a p p square matrix. The fMRI time signal of all participants, "X" is decomposed into a set of maximally independent components, "S" such that both can be transformed to each other via the mixing matrix "A". Thus, to illustrate the concepts in fMRI, "S" or "components" is a stack of 3D images that will be "mixed" by "A" or "dimensions" that are timepoints by component. Hence, the fMRI signal of all participants, X = A S, whereby S will be the weighted sum of all the components can be calculated to achieve the series of 3D time point images. In the most common "Dual Regression" approach, first a group ICA is run to estimate the "S" for the whole sample. Then, individual analyses are run to estimate the transformation matrix, "W" for each subject. Notably, the components in "S" then represent the common resting-state networks, that is, the DMN and visual-motor networks or physiological noise signals, for example, eyes movement, as well as heart and respiratory motion (J. E. ). Finally, one of the networks can be selected to run a multivariate regression on the individual's time courses to estimate group differences for each voxel (). In this way, the brain is considered as a single complex network where several global and local network topologies such as the path length, modularity of global connectedness, and clustering coefficient can be measured (Rubinov & Sporns, 2010). In GTA, the graph is directly constructed from the Fisher transformed correlation matrix by using each atlas region as the "node" and the z-value as the "edge" weight. The Fisher's r-to-z transformation is applied to the elements of the matrix to improve the normality of the correlation coefficients (Thompson & Fransson, 2016 (;). Typically, aMCI and AD groups had decreased FC in the left PCC and left parahippocampal gyrus as compared to HC subjects (). Only AD patients were identified with increased FC at the right middle frontal gyrus (MFG), which was interpreted as a compensatory neural mechanism in response to the impairment of the PCC and middle temporal gyrus (MTG; ). In the PCC, MTG, and MFG regions, MMSE scores showed significant positive and negative associations with FC (;). Therefore, it is evident that most of the FC disruptions of the DMN occurs in the PCC and MTG (;). Nevertheless, as the disease progresses, the FC disturbances spread to other brain regions (Damoiseaux, Prater, Miller, & Greicius, 2012). Since the PCC and other DMN hubs are affected in AD and MCI, the DMN may serve as an important biomarker for the classification of patients with AD and MCI. A recent review paper by Badhwar et al. that was published in 2017, studied various patterns of rs-fMRI detected dysfunctions among patients with AD (). Nevertheless, this systematic review did not report on the accuracy of the test to distinguish the disease state. Subject classifications are made from the FC scores of the rs-fMRI datasets using machine learning (ML) methods. A commonly applied technique is the support vector machines (SVMs) methods that are applied in patient stratification studies to make inter-group classifications (Dyrba, Grothe, Kirste, & Teipel, 2015;A. Lee, Ratnarajah, Tuan, Chen, & Qiu, 2015;). Another approach is to use Gaussian process logistic regression (GP-LR) models (). Diagnostic accuracy can be achieved by computing various classifiers that are selected from discriminating features of the multimodal imaging after performing tests using the training dataset (). A popular type of supervised ML is the support vector machine (SVM) method. SVM has been utilized by various researchers to boost the diagnostic results from multimodal imaging by incorporating multiple kernels in its algorithm (;Q. Zhao, Chen, & Zhou, 2016). Apart from SVM, other more sophisticated algorithms such as convolutional neural networks have been used to discriminate between AD and HC (Qureshi, Ryu, Song, Lee, & Lee, 2019). The main goal of this review is to examine the benefits and the issues of applying ML algorithms to assess rs-fMRI datasets for improved diagnostic accuracy of discriminating AD/MCI from HC. We also discuss the limitations of multimodal and multicenter studies, as well as recommend the future direction of research in this field. To the best of the authors' knowledge, this is the only systematic review in the existing literature that is focused on studies that perform rs-fMRI-based classification to detect AD and its prodromal stage. | METHODS AND MATERIALS We first provide some basic definitions with respect to the classifier methods that are utilized in evaluating the diagnostic accuracy of rs-fMRI. This is followed by the study protocol of our systematic review. The study protocol includes the study design, search strategy used when screening the articles from the medical databases, selection criteria for identification of eligible articles, assessment of bias, and data extraction. Next, we present the data using tables in Section 3. Afterward, the technicalities of performing ML, for example, SVM, linear regression, logistic regression, and convolutional neural networks, are described in Section 3.3. We also discuss the similarities and differences among the various articles and propose recommendations for future works. | Study design The systematic review method used to formulate the study design was adopted from Campbell et al.. The results of this review are reported based on the Prepared Reporting Items for Systematic Reviews and Meta-Analyses (PRISMA) method (Moher, Liberati, Tetzlaff, Altman, & The, 2009 Database, and for any on-going reviews similar to this study. This review protocol has been registered with the International Prospective Register of Systematic Reviews (PROSPERO) with the registration number CRD42020181655. Scopus, PubMed, DOAJ, and Google Scholar databases were searched for articles using a combination of the keyword using MESH terms. Our search strategy in the various database was as the following-. SCOPUS: TITLE-ABS-KEY (("resting-state functional MRI" OR "resting-state fMRI" OR rs-fMRI) AND (Alzheimer's disease OR AD OR mild cognitive impairment OR MCI) AND (accuracy OR classification)) DOAJ TS = (("resting-state functional MRI" OR "resting-state fMRI" OR rs-fMRI) AND (Alzheimer's disease OR AD OR mild cognitive impairment OR MCI) AND (accuracy OR classification)) PubMed: (((((("resting-state functional MRI") OR "resting-state fMRI") OR rs-fMRI)) AND ((((Alzheimer's disease) OR AD) OR mild cognitive impairment OR MCI)) AND (((((accuracy) OR "classification")) We sourced for relevant published articles through December 3, 2020. The combined articles obtained from the search were screened for duplicates and the resultant articles underwent further screening as highlighted in the subsequent sections ( Figure 1). | Inclusion criteria The review paper included published original articles that met the following criteria: peer-reviewed articles written in the English language, the articles were sourced from journal publications until December 3, 2020, and the articles included were observational studies of human subjects, which included case-control, cohort, and crosssectional studies that utilized rs-fMRI and the DMN to quantify and correlate FC between AD or MCI with HCs. Furthermore, the articles must have used established AD or MCI diagnostic criteria, for exam- | Exclusion criteria We excluded articles by the following criteria: (a) Review articles, (b) case reports, (c) case series, (d) articles written in foreign languages, that is, other than the English language, (e) animal studies, and (f) articles with studies using imaging tools other than rs-fMRI, for example, structural MRI, EEG, MEG, or PET. | Data extraction We conducted the literature search using the databases mentioned above. Two of the co-authors (B. I., S. S.) reviewed and independently screened the articles from the search results based on the titles and abstracts for potential inclusion into this review. Only the final screened articles agreed upon by both the authors were considered for the manuscript synthesis. In accordance with the PRISMA protocol, data extracted from each primary study included: author, year, country, number of subjects (patients and controls), age of the subjects, MMSE scores, rs-fMRI imaging protocol, and analysis method, sensitivity scores, and specificity scores. | Description of the articles included Tables 1 and 2 summarize the main characteristics of the selected articles, which had assessed the rs-fMRI diagnostic performance for detecting DMN abnormalities among AD and MCI subjects. The majority (64%) of these articles had ≤20 subjects per group. Five articles (46%) used SBA and three studies (27%) used ICA type of analysis. While one article (9%) utilized both SBA and ICA methods, two articles (18%) used GTA as their method of analysis (Table 3). Interestingly, large percentage of these studies (55%) were conducted in Asia, with China having 5 out of the 7 studies from the region, the other two being from Japan and Korea, respectively. -not rs-fMRI (n=10) -proceedings/ reviews (n=14) -methods design paper (n=8) -interventional study (n=2) -not evaluating AD (n=6) Identification No. of articles after full text assessment: -full text not in English (n=3) full text not available (n=10) -did not evaluate diagnostic accuracy of rs-fMRI in AD (n=9) No. of AD only articles included (n=13) No of records after removing duplicates (n=46) No of records remaining after removing: -not rs-fMRI (n=2) -proceedings/ reviews (n=9) -methods design paper (n=4) -not evaluating MCI (n=3) No of articles after full text assessment criteria: -full text not in English (n=2) -did not evaluate diagnostic accuracy of rs-fMRI (n=3) 4 8 60-80 (67.9 ± 4.5) (72.5 ± 7.9) (29.3 ± 1.6) (29.5 ± 0.8) Moderate to severe AD: (73.61 ± 4.76) Very mild to mild AD: (23.84 ± 3.90) Moderate to severe AD: 3.3 | Types of machine learning methods utilized to classify Alzheimer's disease subjects Machine learning (ML) is a form of artificial intelligence application that utilizes computer algorithms. The basis of ML is dependent upon the ability of the computer program to leverage algorithms. Hence, ML can automatically learn and improve from experience gathered on an independent training dataset based on statistical models. There are various computer programming languages for ML such as Python, Java, R, and JavaScript. These programs can perform ML in any one of the three types of ML, which are supervised learning, unsupervised learning, and reinforcement learning. Supervised learning is the basic type of ML that is frequently used in classification for diagnostics and automated image interpretation. This type of learning can also be used to predict an outcome, such as the occurrence of a disease by using regression analysis. To this end, the training dataset needs to be labeled correctly and provides the algorithm with a fundamental concept of the problem, solution, and data points to be dealt with. In rs-fMRI, the data points are the "nodes" and the FC between the "nodes" are called "edges". The probability of the number of connections or "edges" arising from the nodes gives the weightage of the FC. A Bayesian network (BN) is a probabilistic graphical model that represents a joint probability distribution over a set of variables. Nodes in a BN graph represent variables of interest, and edges represent the probabilistic associations among variables. In a BN, each node has a conditional-probability distribution, which quantifies the association between that variable and the variables with which it is associ- between patients with mild AD and HCs (). This indicates that even at the early stage of the AD, DMN moderately differentiates AD patients from HCs. Dai et al. Structural MRI, which was used to measure regional gray matter volume rs-fMRI, using amplitude of lowfrequency fluctuations (ALFF), regional homogeneity (ReHo), Framewise displacement (head motion) showed comparable displacements across sites, for example, cognitively impaired patients showed slightly more head motion than controls. The foreground-to-background energy ratio, the fractional ALFF in PCC, and the mean FC between PCC and anterior mPFC indicated no outlying center. tSNR was significantly different between certain centers. (). In several of these studies, rs-fMRI demonstrated a high diagnostic power in classifying abnormalities of the DMN among MCI patients compared to the HCs (Figure 2). | DISCUSSION Although the FC of DMN has been explored as a biomarker for distinguishing patients with AD and MCI from HCs (;;), no compiled review about its diagnostic power has been done before this. The impaired FC of DMN may be analyzed using SBA, ICA, and GTA methods of analyses, and all these methods can be used to classify patients with AD and MCI. To the best of our knowledge, this is the first review to determine the diagnostic power of rs-fMRI to detect impairments in the FC of the DMN, for discriminating AD and MCI subjects from HCs. The articles included in this review reported variable diagnostic powers of rs-fMRI in characterizing AD and MCI patients, by using a variety of protocols, that is, measurement of DMN FC alone (;), DMN FC correlated with MRI-measured cortical thickness (;), DMN FC measurements along with other resting-state measures such as DMN FC with PET/CT FC () and DMN FC with regional cerebral blood flow measurements (rCBF; ), respectively. In differentiating AD patients from HCs, most of the primary articles used SBA analysis, all of which reported that AD patients had weaker FC between the PCC and other brain regions (;;;;). This imaging biomarker, that is, the PCC, is able to provide an average sensitivity of 75.2% (ranging between 65.7 and 100%), and an average specificity of 74.9% (ranging between 70 and 95%) for distinguishing patients with AD, hence, indicating a moderate diagnostic power of DMN in differentiating AD patients from HCs. T A B L E 4 (Continued) Author ( 59 brain neural pathways based on a priori knowledge were analyzed 116 nodes were identified and the FC between nodes at paired brain regions was measured by the strength of the linear relationship depicted by r Three linear classifiers: Nave Bayesian (NB); logistic regression; and SVM One decision trees classifier: RF Diagnostic performances were evaluated on a pathway-based approach and a region-based approach SVM classification model gave the best diagnostic accuracies for discriminating MCI from HC, for both the pathwaybased approach and a regionbased approach. ROC curves were plotted The diagnostic performances of the competing methods were analyzed with HMP and without HMP. The best results were achieved with HMP in regression in the multi-spectrum analysis Neuroanatomic volumetric indices were extracted from the segmentation and parcellation output. FC analyzed based on SBA. Second network comprised four edges and five nodes, located bilaterally in precuneus as well as in the parahippocampal, fusiform, and superior temporal gyri in the right hemisphere. Third network comprised nine edges and eight nodes, located mostly in the left hemisphere. Khazaee et al. Network-based statistics were performed on the weighted raw rs-fMRI connectivity matrices to identify impaired sub-networks in the MCI-C and MCI-NC groups. First network had two edges and three nodes, specifically one node within the precuneus and the other two nodes within the cerebellum. Second network had three edges and four nodes within the vPFC, anterior insula, VFC, and occipital lobe. Third network had two edges and three nodes within the temporoparietal junction, occipital lobe, and lateral cerebellum. Optimal features based on sMRI data using Destrieux atlas and rs-fMRI data using the Dosenbach atlas gave the best accuracy for discriminating between MCI-C with MCI-NC. Optimal features based on sMRI data using Destrieux atlas and rs-fMRI data using the Their method achieved a high diagnostic power, with a sensitivity of 86% and specificity of 78%, respectively, with better results achieved when using the pathway-based approach compared with the region-based approach for classifying MCI from HCs. In essence, rs-fMRI can detect impairment of the DMN FC and can serve to identify important anatomical biomarkers for discriminating AD and MCI patients from HCs. When combined with other parameters such as cortical thickness, rCBF, or analyzed using combination of multivariate analysis, rs-fMRI has good diagnostic power for detecting AD and MCI. | Limitations and recommendations for future works The relatively small sample size in most of the articles leads to a reduced power of the studies. Restrictions of the studies to only include subjects with early AD had to be made due to the constraints of performing the investigation on non-cooperative patients with advanced AD. Furthermore, it is important to note that although MCI T A B L E 4 (Continued) Author ( may occur as a prodromal condition to AD, it can also occur in vascular dementia or even in cognitively healthy elderly persons without progressing to AD. Moreover, the conversion rate of MCI to AD is usually meager. Therefore, longitudinal studies, as opposed to identifying neural FC changes using a single time-point rs-fMRI study, can best assess whether an MCI patient will develop full-blown AD. Additionally, there is a need for further improvement and standardization of rs-fMRI patient selection criteria, acquisition, imageprocessing, and data analysis. The establishment of local populationbased database of fMRI studies involving AD subjects can also help in improving the suitability of comparison. Multicenter rs-fMRI using SBA FC has limited accuracy in the discrimination of AD and MCI cases from HC and requires careful data quality checks beyond the evaluation of global quality metrics, including visual inspection of all the data (). Furthermore, the combination and integration of multimodal imaging and clinical markers, introduce innumerable classifiers for the improved diagnostic accuracy of detecting and predicting AD. Although it appears very enticing to incorporate numerous multimodal features, nevertheless, this poses a challenge for ensuring homogeneity of datasets and hinders consistency of results. Other rs-fMRI features of engineering models that go beyond the classical Pearson correlation FC and ICA, that is, regional homogeneity (ReHo), fractional amplitude of low-frequency fluctuation ((f)ALFF), and dynamic FC need to be explored further to optimize the wealth of information available on rs-fMRI datasets. Additionally, novel computational models using convolutional neural networks that use 3D-deep learning frameworks are the way forward. There is potential for developing the utility of this technique by incorporating biomarker-based serial labeled data and domain transfer learning methods. | CONCLUSION The assessment of the DMN FC based on rs-fMRI analytic methods, has an excellent potential as a diagnostic tool for AD, particularly when using multivariate analysis to combine SBA and ICA methods of analyses. Nevertheless, the rs-fMRI protocols and analytical methods need to be more standardized to achieve uniformity in reporting improved diagnostic power. ACKNOWLEDGMENTS The research grant was awarded to Associate Professor Dr Subapriya Suppiah by the Malaysian Ministry of Education, that is, the CONFLICT OF INTEREST The authors declare and report no conflict of interest. Subapriya Suppiah conceptualized the study design. Buhari Ibrahim and Nisha Syed Nasser carried out the literature search, data extraction, and quality assessment. Buhari Ibrahim wrote the manuscript first draft. Subapriya Suppiah, Normala Ibrahim, Mazlyfarina Mohamad, Hasyma Abu Hassan, and M Iqbal Saripan edited the manuscript, verified the data, and provided critical feedback to help shape the research. DATA AVAILABILITY STATEMENT Data sharing is not applicable to this article as no new data were created or analyzed in this study.
<filename>src/app/@core/data/data.module.ts import { NgModule, ModuleWithProviders } from '@angular/core'; import { CommonModule } from '@angular/common'; import { HTTP_INTERCEPTORS } from '@angular/common/http'; import { StateService } from './state.service'; import { LabService } from './lab.service'; import { TokenInterceptor } from '../utils/token.interceptor'; import { BeakerService } from './beaker.service'; import { CompoundService } from './compound.service'; const SERVICES = [ StateService, LabService, BeakerService, CompoundService, ]; @NgModule({ imports: [ CommonModule, ], providers: [ ...SERVICES, ], }) export class DataModule { static forRoot(): ModuleWithProviders { return <ModuleWithProviders>{ ngModule: DataModule, providers: [ ...SERVICES, { provide: HTTP_INTERCEPTORS, useClass: TokenInterceptor, multi: true, }, ], }; } }
package sqrl import ( "bytes" "context" "database/sql" "fmt" "strconv" "strings" ) // SelectBuilder builds SQL SELECT statements. type SelectBuilder struct { StatementBuilderType prefixes exprs distinct bool options []string columns []Sqlizer fromParts []Sqlizer joins []Sqlizer whereParts []Sqlizer groupBys []string havingParts []Sqlizer orderBys []string limit uint64 limitValid bool offset uint64 offsetValid bool suffixes exprs } // NewSelectBuilder creates new instance of SelectBuilder func NewSelectBuilder(b StatementBuilderType) *SelectBuilder { return &SelectBuilder{StatementBuilderType: b} } // RunWith sets a Runner (like database/sql.DB) to be used with e.g. Exec. func (b *SelectBuilder) RunWith(runner BaseRunner) *SelectBuilder { b.runWith = wrapRunner(runner) return b } // Exec builds and Execs the query with the Runner set by RunWith. func (b *SelectBuilder) Exec() (sql.Result, error) { return b.ExecContext(context.Background()) } // ExecContext builds and Execs the query with the Runner set by RunWith using given context. func (b *SelectBuilder) ExecContext(ctx context.Context) (sql.Result, error) { if b.runWith == nil { return nil, ErrRunnerNotSet } return ExecWithContext(ctx, b.runWith, b) } // Query builds and Querys the query with the Runner set by RunWith. func (b *SelectBuilder) Query() (RowsScanner, error) { return b.QueryContext(context.Background()) } // QueryContext builds and Querys the query with the Runner set by RunWith in given context. func (b *SelectBuilder) QueryContext(ctx context.Context) (RowsScanner, error) { if b.runWith == nil { return nil, ErrRunnerNotSet } return QueryWithContext(ctx, b.runWith, b) } // QueryRow builds and QueryRows the query with the Runner set by RunWith. func (b *SelectBuilder) QueryRow() RowScanner { return b.QueryRowContext(context.Background()) } func (b *SelectBuilder) QueryRowContext(ctx context.Context) RowScanner { if b.runWith == nil { return &Row{err: ErrRunnerNotSet} } queryRower, ok := b.runWith.(QueryRowerContext) if !ok { return &Row{err: ErrRunnerNotQueryRunnerContext} } return QueryRowWithContext(ctx, queryRower, b) } // Scan is a shortcut for QueryRow().Scan. func (b *SelectBuilder) Scan(dest ...interface{}) error { return b.QueryRow().Scan(dest...) } // PlaceholderFormat sets PlaceholderFormat (e.g. Question or Dollar) for the // query. func (b *SelectBuilder) PlaceholderFormat(f PlaceholderFormat) *SelectBuilder { b.placeholderFormat = f return b } // ToSql builds the query into a SQL string and bound args. func (b *SelectBuilder) ToSql() (sqlStr string, args []interface{}, err error) { if len(b.columns) == 0 { err = fmt.Errorf("select statements must have at least one result column") return } sql := &bytes.Buffer{} if len(b.prefixes) > 0 { args, _ = b.prefixes.AppendToSql(sql, " ", args) sql.WriteString(" ") } sql.WriteString("SELECT ") if b.distinct { sql.WriteString("DISTINCT ") } if len(b.options) > 0 { sql.WriteString(strings.Join(b.options, " ")) sql.WriteString(" ") } if len(b.columns) > 0 { args, err = appendToSql(b.columns, sql, ", ", args) if err != nil { return } } if len(b.fromParts) > 0 { sql.WriteString(" FROM ") args, err = appendToSql(b.fromParts, sql, ", ", args) if err != nil { return } } if len(b.joins) > 0 { sql.WriteString(" ") args, err = appendToSql(b.joins, sql, " ", args) if err != nil { return } } if len(b.whereParts) > 0 { sql.WriteString(" WHERE ") args, err = appendToSql(b.whereParts, sql, " AND ", args) if err != nil { return } } if len(b.groupBys) > 0 { sql.WriteString(" GROUP BY ") sql.WriteString(strings.Join(b.groupBys, ", ")) } if len(b.havingParts) > 0 { sql.WriteString(" HAVING ") args, err = appendToSql(b.havingParts, sql, " AND ", args) if err != nil { return } } if len(b.orderBys) > 0 { sql.WriteString(" ORDER BY ") sql.WriteString(strings.Join(b.orderBys, ", ")) } // TODO: limit == 0 and offswt == 0 are valid. Need to go dbr way and implement offsetValid and limitValid if b.limitValid { sql.WriteString(" LIMIT ") sql.WriteString(strconv.FormatUint(b.limit, 10)) } if b.offsetValid { sql.WriteString(" OFFSET ") sql.WriteString(strconv.FormatUint(b.offset, 10)) } if len(b.suffixes) > 0 { sql.WriteString(" ") args, _ = b.suffixes.AppendToSql(sql, " ", args) } sqlStr, err = b.placeholderFormat.ReplacePlaceholders(sql.String()) return } // Prefix adds an expression to the beginning of the query func (b *SelectBuilder) Prefix(sql string, args ...interface{}) *SelectBuilder { b.prefixes = append(b.prefixes, Expr(sql, args...)) return b } // Distinct adds a DISTINCT clause to the query. func (b *SelectBuilder) Distinct() *SelectBuilder { b.distinct = true return b } // Options adds select option to the query func (b *SelectBuilder) Options(options ...string) *SelectBuilder { for _, str := range options { b.options = append(b.options, str) } return b } // Columns adds result columns to the query. func (b *SelectBuilder) Columns(columns ...string) *SelectBuilder { for _, str := range columns { b.columns = append(b.columns, newPart(str)) } return b } // Column adds a result column to the query. // Unlike Columns, Column accepts args which will be bound to placeholders in // the columns string, for example: // Column("IF(col IN ("+Placeholders(3)+"), 1, 0) as col", 1, 2, 3) func (b *SelectBuilder) Column(column interface{}, args ...interface{}) *SelectBuilder { b.columns = append(b.columns, newPart(column, args...)) return b } // From sets the FROM clause of the query. func (b *SelectBuilder) From(tables ...string) *SelectBuilder { parts := make([]Sqlizer, len(tables)) for i, table := range tables { parts[i] = newPart(table) } b.fromParts = append(b.fromParts, parts...) return b } // FromSelect sets a subquery into the FROM clause of the query. func (b *SelectBuilder) FromSelect(from *SelectBuilder, alias string) *SelectBuilder { b.fromParts = append(b.fromParts, Alias(from, alias)) return b } // JoinClause adds a join clause to the query. func (b *SelectBuilder) JoinClause(pred interface{}, args ...interface{}) *SelectBuilder { b.joins = append(b.joins, newPart(pred, args...)) return b } // Join adds a JOIN clause to the query. func (b *SelectBuilder) Join(join string, rest ...interface{}) *SelectBuilder { return b.JoinClause("JOIN "+join, rest...) } // LeftJoin adds a LEFT JOIN clause to the query. func (b *SelectBuilder) LeftJoin(join string, rest ...interface{}) *SelectBuilder { return b.JoinClause("LEFT JOIN "+join, rest...) } // RightJoin adds a RIGHT JOIN clause to the query. func (b *SelectBuilder) RightJoin(join string, rest ...interface{}) *SelectBuilder { return b.JoinClause("RIGHT JOIN "+join, rest...) } // Where adds an expression to the WHERE clause of the query. // // Expressions are ANDed together in the generated SQL. // // Where accepts several types for its pred argument: // // nil OR "" - ignored. // // string - SQL expression. // If the expression has SQL placeholders then a set of arguments must be passed // as well, one for each placeholder. // // map[string]interface{} OR Eq - map of SQL expressions to values. Each key is // transformed into an expression like "<key> = ?", with the corresponding value // bound to the placeholder. If the value is nil, the expression will be "<key> // IS NULL". If the value is an array or slice, the expression will be "<key> IN // (?,?,...)", with one placeholder for each item in the value. These expressions // are ANDed together. // // Where will panic if pred isn't any of the above types. func (b *SelectBuilder) Where(pred interface{}, args ...interface{}) *SelectBuilder { b.whereParts = append(b.whereParts, newWherePart(pred, args...)) return b } // GroupBy adds GROUP BY expressions to the query. func (b *SelectBuilder) GroupBy(groupBys ...string) *SelectBuilder { b.groupBys = append(b.groupBys, groupBys...) return b } // Having adds an expression to the HAVING clause of the query. // // See Where. func (b *SelectBuilder) Having(pred interface{}, rest ...interface{}) *SelectBuilder { b.havingParts = append(b.havingParts, newWherePart(pred, rest...)) return b } // OrderBy adds ORDER BY expressions to the query. func (b *SelectBuilder) OrderBy(orderBys ...string) *SelectBuilder { b.orderBys = append(b.orderBys, orderBys...) return b } // Limit sets a LIMIT clause on the query. func (b *SelectBuilder) Limit(limit uint64) *SelectBuilder { b.limit = limit b.limitValid = true return b } // Offset sets a OFFSET clause on the query. func (b *SelectBuilder) Offset(offset uint64) *SelectBuilder { b.offset = offset b.offsetValid = true return b } // Suffix adds an expression to the end of the query func (b *SelectBuilder) Suffix(sql string, args ...interface{}) *SelectBuilder { b.suffixes = append(b.suffixes, Expr(sql, args...)) return b }
p,q,l,r = map(int,input().split()) z= [] dif = [i for i in range(l, r+1)] res = set([]) for i in range(p): a,b = map(int, input().split()) for i in range(a,b+1): z.append(i) for i in range(q): c,d = map(int, input().split()) toremove = [] for index, i in enumerate(dif): st = c+i ft = d+i for a in z: if st<=a<=ft: toremove.append(i) res.add(i) break for t in toremove: dif.remove(t) print(len(res))
Immune therapy: non-highly active antiretroviral therapy management of human immunodeficiency virus-infected patients. The complexity of human immunodeficiency virus (HIV) immunopathogenesis has prompted multiple strategic approaches to re-establish normal immune responses. Highly active antiretroviral therapy (HAART) can control viral replication, but it is unable to restore HIV-specific immunity. Newer approaches for managing HIV infection are focusing on cell-mediated immune responses, including the potential for improved immunologic control over HIV replication. Cytokines, such as interleukin (IL)-2 and IL-12, are being evaluated for their ability to enhance cell-mediated immunity, which is thought to be critical for immunologic control. Initial studies with IL-2 have demonstrated an improvement in CD4 cell counts, and large randomized trials are underway to determine the long-term clinical efficacy of IL-2 in combination with antiretroviral therapy, including HAART. Stimulating the immune response against HIV by use of exogenous (therapeutic vaccination) or endogenous (structured treatment interruption) antigens with or without immune adjuvants or cytokines, such as IL-2, is another approach currently being explored.
/** * The context for evaluation within macros. This wraps an existing {@code EvaluationContext} * but intercepts reads of the macro's parameters so that they result in a call-by-name evaluation * of whatever was passed as the parameter. For example, if you write... * <pre>{@code * #macro (mymacro $x) * $x $x * #end * #mymacro($foo.bar(23)) * }</pre> * ...then the {@code #mymacro} call will result in {@code $foo.bar(23)} being evaluated twice, * once for each time {@code $x} appears. The way this works is that {@code $x} is a <i>thunk</i>. * Historically a thunk is a piece of code to evaluate an expression in the context where it * occurs, for call-by-name procedures as in Algol 60. Here, it is not exactly a piece of code, * but it has the same responsibility. */ static class MacroEvaluationContext implements EvaluationContext { private final Map<String, Node> parameterThunks; private final EvaluationContext originalEvaluationContext; MacroEvaluationContext( Map<String, Node> parameterThunks, EvaluationContext originalEvaluationContext) { this.parameterThunks = parameterThunks; this.originalEvaluationContext = originalEvaluationContext; } @Override public Object getVar(String var) { Node thunk = parameterThunks.get(var); if (thunk == null) { return originalEvaluationContext.getVar(var); } else { // Evaluate the thunk in the context where it appeared, not in this context. Otherwise // if you pass $x to a parameter called $x you would get an infinite recursion. Likewise // if you had #macro(mymacro $x $y) and a call #mymacro($y 23), you would expect that $x // would expand to whatever $y meant at the call site, rather than to the value of the $y // parameter. return thunk.evaluate(originalEvaluationContext); } } @Override public boolean varIsDefined(String var) { return parameterThunks.containsKey(var) || originalEvaluationContext.varIsDefined(var); } @Override public Runnable setVar(final String var, Object value) { // Copy the behaviour that #set will shadow a macro parameter, even though the Velocity peeps // seem to agree that that is not good. final Node thunk = parameterThunks.get(var); if (thunk == null) { return originalEvaluationContext.setVar(var, value); } else { parameterThunks.remove(var); final Runnable originalUndo = originalEvaluationContext.setVar(var, value); return new Runnable() { @Override public void run() { originalUndo.run(); parameterThunks.put(var, thunk); } }; } } }
// // MBProgressHUDHelper.h // FinancePlatForm // // Created by 刘鹤宫 on 14-3-3. // Copyright (c) 2014年 liuhegong. All rights reserved. // #import <UIKit/UIKit.h> @interface MBProgressHUDHelper : UIBarButtonItem + (void)showHUD:(NSString *)msg delayTime:(NSTimeInterval)delayTime; + (void)removeHUD; +(void)changeText; + (void) showToast:(NSString *)text delayTime:(NSTimeInterval) delayTime; @end
An Uncertainty Measure for Incomplete Decision Tables and Its Applications Uncertainty measures can supply new viewpoints for analyzing data. They can help us in disclosing the substantive characteristics of data. The uncertainty measurement issue is also a key topic in the rough-set theory. Although there are some measures to evaluate the uncertainty for complete decision systems (also called decision tables), they cannot be trivially transplanted into incomplete decision systems. There are relatively few studies on uncertainty measurement in incomplete decision systems. In this paper, we propose a new form of conditional entropy, which can be used to measure the uncertainty in incomplete decision systems. Some important properties of the conditional entropy are obtained. In particular, two validity theorems guarantee that the proposed conditional entropy can be used as a reasonable uncertainty measure for incomplete decision systems. Experiments on some real-life data sets are conducted to test and verify the validity of the proposed measure. Applications of the proposed uncertainty measure in ranking attributes and feature selection are also studied with experiments.
<gh_stars>10-100 #include "common.h" #include "../simd/simd.h" void usimd_add(const FLOAT_T *a, const FLOAT_T *b, FLOAT_T *c, int len) { int i=0; #if V_SIMD && VEC_LT256 #ifdef DOUBLE_T int vsteps = v_nlanes_f64; int n = len & -vsteps; for (; i < n; i+=vsteps) { v_f64 v1 = v_load_f64(a+i); v_f64 v2 = v_load_f64(b+i); v_f64 v3 = v_add_f64(v1, v2); v_store_f64(c+i, v3); } #else int vsteps = v_nlanes_f32; int n = len & -vsteps; for (; i < n; i+=vsteps) { v_f32 v1 = v_load_f32(a+i); v_f32 v2 = v_load_f32(b+i); v_f32 v3 = v_add_f32(v1, v2); v_store_f32(c+i, v3); } #endif #endif for(; i < len; i++){ c[i] = a[i] + b[i]; } }
<reponame>kevin00000000/Python-programming-exercises s = input('Input a sentence: ') print(s[::2])
Face-to-face with Brian Blunden, Pira Electronic Publisher Pira is very much like any other private sector company. We only survive if we are efficient and respond to the needs of the marketplace. We have to win members by selling ourselves. Currently, we have approximately one thousand industrial organisations backing us and a turnover of approximately £3Y2 million per annum. Our activities may be summarised as consultancy and investigations we undertake
Neurogenic Claudication, a Delayed Complication of a Retained Bullet Study Design. A case report is presented of a 31-year-old man who visited the authors' neurosurgical department in 1993, complaining of neurogenic claudication. History revealed a gunshot incident 11 years ago, with a bullet left in situ. Objectives. To determine whether to operate on patients who have a bullet in situ near the spinal cord without initial neurologic deficits. Summary of Background Data. In the literature, only four publications report an epidural chronic inflammatory mass as a reaction to a retained bullet, thereby causing delayed neurologic symptoms. Previous to this report, only one case is described of a patient with a bullet lodged in the paravertebral musculature. Methods. Clinically, the patient had pain radiating from his lower back to both his thighs, provoked by walking, standing, and the Valsalva maneuver. Comparison of radiographs made in 1990 and in 1993 showed the lead bullet still completely intact in 1990, whereas in 1993, a partial disintegration and displacement of the bullet, causing a chronic inflammatory reaction (extraspinal and intraspinal), as well as cyst formation, was seen. Particularly notable was the radiographic feature of a sort of "fallen leaf sign" at the level of L5-S1. Results. The preoperative complaints were still absent 1 year after surgery. Conclusions. It is argued that with regard to a retained bullet in the vicinity of the spinal canal, the presence or absence of neurologic symptoms should be the guide for further diagnostic procedures. Only if a neurologic deficit develops, which is possible after many years, should surgical intervention be considered, depending on the severity and type of the deficit, as presented in this case report.
/* * Copyright (C) 2018 Oracle. All Rights Reserved. * * Author: <NAME> <<EMAIL>> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <stdio.h> #include <stdint.h> #include <stdbool.h> #include <dirent.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/statvfs.h> #ifdef HAVE_LIBATTR # include <attr/attributes.h> #endif #include "xfs.h" #include "handle.h" #include "list.h" #include "path.h" #include "workqueue.h" #include "xfs_scrub.h" #include "common.h" #include "inodes.h" #include "progress.h" #include "scrub.h" #include "unicrash.h" /* Phase 5: Check directory connectivity. */ /* * Warn about problematic bytes in a directory/attribute name. That means * terminal control characters and escape sequences, since that could be used * to do something naughty to the user's computer and/or break scripts. XFS * doesn't consider any byte sequence invalid, so don't flag these as errors. */ static bool xfs_scrub_check_name( struct scrub_ctx *ctx, const char *descr, const char *namedescr, const char *name) { const char *p; bool bad = false; char *errname; /* Complain about zero length names. */ if (*name == '\0' && should_warn_about_name(ctx)) { str_warn(ctx, descr, _("Zero length name found.")); return true; } /* control characters */ for (p = name; *p; p++) { if ((*p >= 1 && *p <= 31) || *p == 127) { bad = true; break; } } if (bad && should_warn_about_name(ctx)) { errname = string_escape(name); if (!errname) { str_errno(ctx, descr); return false; } str_info(ctx, descr, _("Control character found in %s name \"%s\"."), namedescr, errname); free(errname); } return true; } /* * Iterate a directory looking for filenames with problematic * characters. */ static bool xfs_scrub_scan_dirents( struct scrub_ctx *ctx, const char *descr, int *fd, struct xfs_bstat *bstat) { struct unicrash *uc = NULL; DIR *dir; struct dirent *dentry; bool moveon = true; dir = fdopendir(*fd); if (!dir) { str_errno(ctx, descr); goto out; } *fd = -1; /* closedir will close *fd for us */ moveon = unicrash_dir_init(&uc, ctx, bstat); if (!moveon) goto out_unicrash; dentry = readdir(dir); while (dentry) { moveon = xfs_scrub_check_name(ctx, descr, _("directory"), dentry->d_name); if (!moveon) break; moveon = unicrash_check_dir_name(uc, descr, dentry); if (!moveon) break; dentry = readdir(dir); } unicrash_free(uc); out_unicrash: closedir(dir); out: return moveon; } #ifdef HAVE_LIBATTR /* Routines to scan all of an inode's xattrs for name problems. */ struct xfs_attr_ns { int flags; const char *name; }; static const struct xfs_attr_ns attr_ns[] = { {0, "user"}, {ATTR_ROOT, "system"}, {ATTR_SECURE, "secure"}, {0, NULL}, }; /* * Check all the xattr names in a particular namespace of a file handle * for Unicode normalization problems or collisions. */ static bool xfs_scrub_scan_fhandle_namespace_xattrs( struct scrub_ctx *ctx, const char *descr, struct xfs_handle *handle, struct xfs_bstat *bstat, const struct xfs_attr_ns *attr_ns) { struct attrlist_cursor cur; char attrbuf[XFS_XATTR_LIST_MAX]; char keybuf[NAME_MAX + 1]; struct attrlist *attrlist = (struct attrlist *)attrbuf; struct attrlist_ent *ent; struct unicrash *uc; bool moveon = true; int i; int error; moveon = unicrash_xattr_init(&uc, ctx, bstat); if (!moveon) return false; memset(attrbuf, 0, XFS_XATTR_LIST_MAX); memset(&cur, 0, sizeof(cur)); memset(keybuf, 0, NAME_MAX + 1); error = attr_list_by_handle(handle, sizeof(*handle), attrbuf, XFS_XATTR_LIST_MAX, attr_ns->flags, &cur); while (!error) { /* Examine the xattrs. */ for (i = 0; i < attrlist->al_count; i++) { ent = ATTR_ENTRY(attrlist, i); snprintf(keybuf, NAME_MAX, "%s.%s", attr_ns->name, ent->a_name); moveon = xfs_scrub_check_name(ctx, descr, _("extended attribute"), keybuf); if (!moveon) goto out; moveon = unicrash_check_xattr_name(uc, descr, keybuf); if (!moveon) goto out; } if (!attrlist->al_more) break; error = attr_list_by_handle(handle, sizeof(*handle), attrbuf, XFS_XATTR_LIST_MAX, attr_ns->flags, &cur); } if (error && errno != ESTALE) str_errno(ctx, descr); out: unicrash_free(uc); return moveon; } /* * Check all the xattr names in all the xattr namespaces for problematic * characters. */ static bool xfs_scrub_scan_fhandle_xattrs( struct scrub_ctx *ctx, const char *descr, struct xfs_handle *handle, struct xfs_bstat *bstat) { const struct xfs_attr_ns *ns; bool moveon = true; for (ns = attr_ns; ns->name; ns++) { moveon = xfs_scrub_scan_fhandle_namespace_xattrs(ctx, descr, handle, bstat, ns); if (!moveon) break; } return moveon; } #else # define xfs_scrub_scan_fhandle_xattrs(c, d, h, b) (true) #endif /* HAVE_LIBATTR */ /* * Verify the connectivity of the directory tree. * We know that the kernel's open-by-handle function will try to reconnect * parents of an opened directory, so we'll accept that as sufficient. * * Check for potential Unicode collisions in names. */ static int xfs_scrub_connections( struct scrub_ctx *ctx, struct xfs_handle *handle, struct xfs_bstat *bstat, void *arg) { bool *pmoveon = arg; char descr[DESCR_BUFSZ]; bool moveon; xfs_agnumber_t agno; xfs_agino_t agino; int fd = -1; agno = bstat->bs_ino / (1ULL << (ctx->inopblog + ctx->agblklog)); agino = bstat->bs_ino % (1ULL << (ctx->inopblog + ctx->agblklog)); snprintf(descr, DESCR_BUFSZ, _("inode %"PRIu64" (%u/%u)"), (uint64_t)bstat->bs_ino, agno, agino); background_sleep(); /* Warn about naming problems in xattrs. */ moveon = xfs_scrub_scan_fhandle_xattrs(ctx, descr, handle, bstat); if (!moveon) goto out; /* Open the dir, let the kernel try to reconnect it to the root. */ if (S_ISDIR(bstat->bs_mode)) { fd = xfs_open_handle(handle); if (fd < 0) { if (errno == ESTALE) return ESTALE; str_errno(ctx, descr); goto out; } } /* Warn about naming problems in the directory entries. */ if (fd >= 0 && S_ISDIR(bstat->bs_mode)) { moveon = xfs_scrub_scan_dirents(ctx, descr, &fd, bstat); if (!moveon) goto out; } out: progress_add(1); if (fd >= 0) close(fd); if (!moveon) *pmoveon = false; return *pmoveon ? 0 : XFS_ITERATE_INODES_ABORT; } /* Check directory connectivity. */ bool xfs_scan_connections( struct scrub_ctx *ctx) { bool moveon = true; bool ret; if (ctx->errors_found) { str_info(ctx, ctx->mntpoint, _("Filesystem has errors, skipping connectivity checks.")); return true; } ret = xfs_scan_all_inodes(ctx, xfs_scrub_connections, &moveon); if (!ret) moveon = false; if (!moveon) return false; xfs_scrub_report_preen_triggers(ctx); return true; }
Acoustic Analysis of Phonation in Children With SmithMagenis Syndrome Complex simultaneous neuropsychophysiological mechanisms are responsible for the processing of the information to be transmitted and for the neuromotor planning of the articulatory organs involved in speech. The nature of this set of mechanisms is closely linked to the clinical state of the subject. Thus, for example, in populations with neurodevelopmental deficits, these underlying neuropsychophysiological procedures are deficient and determine their phonation. Most of these cases with neurodevelopmental deficits are due to a genetic abnormality, as is the case in the population with SmithMagenis syndrome (SMS). SMS is associated with neurodevelopmental deficits, intellectual disability, and a cohort of characteristic phenotypic features, including voice quality, which does not seem to be in line with the gender, age, and complexion of the diagnosed subject. The phonatory profile and speech features in this syndrome are dysphonia, high f0, excess vocal muscle stiffness, fluency alterations, numerous syllabic simplifications, phoneme omissions, and unintelligibility of speech. This exploratory study investigates whether the neuromotor deficits in children with SMS adversely affect phonation as compared to typically developing children without neuromotor deficits, which has not been previously determined. The authors compare the phonatory performance of a group of children with SMS (N = 12) with a healthy control group of children (N = 12) matched in age, gender, and grouped into two age ranges. The first group ranges from 5 to 7 years old, and the second group goes from 8 to 12 years old. Group differences were determined for two forms of acoustic analysis performed on repeated recordings of the sustained vowel /a/ F1 and F2 extraction and cepstral peak prominence (CPP). It is expected that the results will enlighten the question of the underlying neuromotor aspects of phonation in SMS population. These findings could provide evidence of the susceptibility of phonation of speech to neuromotor disturbances, regardless of their origin. INTRODUCTION The production of speech involves the oral coding of a message and its phonoarticulatory performance. It is a complex neurocognitive process that has been described from various cognitive approaches, ranging from the conception of the mind as software that processes linguistic information unidirectionally through rules of representation and transformation, to a connectionist conception that supports the existence of interconnected neuronal networks that operate in parallel in linguistic activity (). Regardless of the theoretical approach taken, the neurological basis for speech production and phonation always coincides with a set of cortical and subcortical areas specialized in the organization of the message, the generation of its propositional and grammatical structure, the identification of the phonetic and phonological correlates, and the neuromotor planning of the articulatory organs involved in speech. In this linguistic activity, complex simultaneous neuropsychophysiological mechanisms occur, whose nature is closely linked to the clinical state of the individual. The complexity of neurocognitive activity occurs successfully in healthy people. In populations with pathologies of neurological origin (ictus, tumors, dementias, among others), some of the cortical areas or part of the crucial neurocognitive mechanisms in the linguistic activity are impaired. In populations with neurodevelopmental deficits, these mechanisms are also deficient and can affect phonation, speech, language, and communication, both in comprehension and production tasks. From the phonetics, the physical sounds of the language have been studied through the characterization of the speech and the acoustic and articulatory particularities of the segmental and suprasegmental features of the languages; it has also allowed the acoustic and articulatory description of the voice production as a speech vehicle. This linguistic knowledge has a valuable application in the clinical field; thus, when speech and phonation are involved, whether for organic, functional, or neurological reasons, it is essential to know in detail the underlying characteristics and to describe the anomalies that are observed. The total of this abnormal speech profile is always a consequence of the patient's clinical disorder. Many studies have examined the neuromotor profile of the speech of populations with Parkinson, amyotrophic lateral sclerosis (ALS), cerebral palsy, hydrocephalus, among others, showing that speech production and phonation are compromised in the presence of diseases of neurological origin (). In the study of speech production in general and of dysarthria in particular, the starting point is the acoustic examination of speakers' emissions. The first two formants (F1 and F2) of these vocalizations are particularly important since both are part of the acoustic correlates of articulatory activity. Formants are prominent resonances resulting from the specific configuration of the vocal tract at a given moment; specifically, F1 and F2 relate to the movement of the jaw and tongue. In general, the first formant has an inverse relationship to the opening of the mouth: F1 is higher the lower the jaw and vice versa. The second formant has a direct relationship with the tongue: F2 is higher the further forward the tongue is in the oral tract and vice versa (). The examination of the first two formants involves knowing the activity of some essential organs in the articulation of speech, and it is important for measuring speech intelligibility (Kent and Vorperian, 2018;Kent and Rountrey, 2020). Numerous acoustic measurements have been used in studies on the articulatory characterization of individuals with neuromotor speech problems. A classic measure used in this type of studies is the vocal space area (VSA), related to the dimensions of the acoustic vowel chart formed the first two formants of a vowel. The VSA space reflects the degree of separation between the vowels of a speaker, that is, the articulatory distinction between them in the same speaker. A reduced VSA area implies less articulatory capacity and, as a consequence, less intelligibility (). In the ALS and Parkinson's population, VSA is lower when compared to the normative population (Forrest and Weismer, 1997;;Skodda et al.,, 2013. A smaller area of this intervocal space has also been observed in speakers with cerebral palsy (;DuHadway and Hustad, 2012;). Similar results have been obtained with adult populations with Down syndrome (Bunton and Leddy, 2011), and a high degree of intrasubject variability was observed in the first two formants of the vowel /a/, but not a reduced VSA, in the X-Fragile syndrome (). Another classic measure in the examination of dysarthria used as an alternative to VSA is the formant centralization ratio (FCR). This measure expresses a ratio that is extracted from the first two vowel formants, and it is expressed as: (F2u + F2a + F1i + F1u)/(F2i + F1a), where F2u is the value of the second formant of /u/, F1a is the value of the first formant of /a/, and so on (). Since FCR is a measure that expresses a relationship, the intravariability obtained with VSA is considerably reduced. Studies of dysarthria in patients with Parkinson's compared to healthy populations revealed that FCR was able to differentiate between the two groups and was sensitive to the effects of treatment to improve dysarthria in patients with Parkinson's ((Sapir et al.,, 2010). Likewise, this same measure could differentiate the speech of Down syndrome population from healthy speakers (). More recent studies have proposed less conventional measures related to the kinematics of the phono-articulatory organs. One such measure is absolute kinematic velocity (AKV), which is associated with the myoelectric activity of certain facial muscles that move to the jaw, tongue, and lips (;a,b). The AKV has also been used to measure the articulation stability during sustained vowels emissions and allows to observe how much the first two formants fluctuate during the prolonged sustain of a vowel (usually an /a/), which leads to an analysis of the degree of articulatory position stability (b). As far as phonation is concerned, a wide variety of acoustic parameters are used to assess dysphonia. Traditionally, parameters relating to the fundamental frequency (f0) and the classical distortion parameters-jitter, shimmer, harmonic-tonoise ratio (HNR)-based on frequency and amplitude variability in consecutive glottal cycles, and on the noise-to-harmonic ratio, respectively, have been used (Lieberman, 1963;Koike, 1969;Kitajima and Gould, 1976;;Hirano, 1981;Ladefoged and Antoanzas-Barroso, 1985;Klatt and Klatt, 1990). Long-term average spectrum (LTAS) has also been used as an acoustic parameter in the analysis of dysphonia (Formby and Monsen, 1982). To assess the degree of periodicity of a voice, the cepstral peak prominence (CPP) is used as an acoustic parameter, which shows the prominence of the cepstral peak, which varies depending on the periodicity of phonation. When the voice is more periodic and richer in harmonics, the CPP is more prominent and has a greater amplitude. It was first used for phonation analysis by Noll (1964Noll (, 1967, and years later, Koike pointed out the direct relationship between the CPP and the periodicity of the voice. The automatic calculation of CPP was proposed by Hillenbrand et al. and later a variant of CPP, CPP smoothed (CPPS), was suggested (Hillenbrand and Houde, 1996). The CPPS was more useful because it had higher correlations with dysphonic voices, especially breathy voices (Hillenbrand and Houde, 1996). Over the last three decades, these cepstral parameters have been used to assess dysphonia, and a strong correlation has been found between CPP, CPPS, and the degree of dysphonia (;, 2003Halberstam, 2004;;Maryn et al.,, 2010;Watts and Awan, 2011;;;, among others). In fact, some authors, such as Moers et al., found higher correlations between CPP and hoarseness than between hoarseness and classical distortion parameters. Others, such as Samlan et al., observed a clear correlation between CPP and HNR but did not consider the former to be a more robust parameter in the detection of dysphonia. Currently, most researchers consider CPP and CPPS as the strongest acoustic parameters in the assessment of dysphonia severity, both in speech and sustained voice (). The latter is relevant, as sustained vowels will be analyzed in this paper. In relation to the findings on the robustness of CPP in dysarthria, it has also been shown that this parameter is sensitive to variations in phonation resulting from a neurological condition. Specifically, in research carried out with Parkinson's patients, CPP and CPPS correlate with the degree of phonatory impairment and exhibit values below those of control groups, especially in patients with non-tremor-dominant phenotype (Burk and Watts, 2018;;;Brown and Spencer, 2020;). The same correlation has also been observed with phonation in patients with mild cognitive impairment (), with spasmodic dysphonia (), with Friedrich's ataxia (Jannetts and Lowit, 2014;;), and with cerebral palsy (van Brenk and Kuschmann, 2018;Kuschmann and van Brenk, 2019). Furthermore, cepstral parameters are significantly lower in voice analyses of populations with neurodevelopmental deficits diagnosed with Williams syndrome (). The latter is a syndrome of genetic origin that affects the correct development of the neurological system and coincides in this sense with Smith-Magenis syndrome (SMS), the study group of this research work. Two of the most prominent, but least studied, features that influence the daily lives of individuals diagnosed with SMS have to do with their speech and language difficulties. Most of the individuals with SMS show language disorders that mainly affect production and are focused on the morphosyntactic, pragmatic, and phonetic-phonological levels (Garayzbal ;;Garayzbal and Lens, 2013;). Their phonatory profile and speech features, both children and adults, are dysphonia, high f0, excess vocal muscle stiffness, disfluencies, tachylalia, numerous syllabic simplifications, phoneme omissions, and, in general, a high unintelligibility of utterances (Hidalgo and Garayzbal, 2019; Hidalgo-de la Gua, 2019; Hidalgo-De la ). Neuromotor Profile of Smith-Magenis Syndrome The neuromotor characteristics of SMS are the result of an altered neurological development. These neuromotor conditions of SMS patients are observed from the first year of life (;). Classic studies with newborn children and in the first 10 years of life have outlined the neuromotor profile of these patients that includes (a) developmental delay and little weight and height gain; (b) poor interaction with the environment and low response to external stimuli; (c) low sensitivity to pain and high temperatures; (d) generalized hypotonia; (e) late onset of standing, unstable balance, abnormal tremor in extremities, poor fine and gross motor skills (severe-moderate level); and (f) late and poor babbling, oral-motor dysfunction, and speech delay ((Gropman et al.,, 2006(Gropman et al.,, 2007;;). These neurological problems are also observed in the swallowing difficulties and in the hypotonia of the velopharyngeal and orofacial muscles of the patients (;). It has also been found that 75% of children with SMS have alterations in the peripheral nervous system (). The complex neuromotor deficits associated with SMS could affect multiple subsystems involved in voice and speech production, and therefore, it is important to investigate these potential effects using acoustic outcome measures. It is reasonable to assume that there are anomalies in the neurological mechanisms that interfere in phonation and speech and that the phono-articulatory organs will lack the tonicity and precision required in coarticulated speech and phonation. As seen in the previous section, several studies addressing the acoustic characteristics of speech in people with genetic syndromes (Down's syndrome, X-Fragile syndrome) have shown abnormalities that could be due to the neuromotor deficits that occur in these disorders (;;Bunton and Leddy, 2011). The voice and speech deficits associated with SMS are currently poorly understood and are important for the effective management of this disorder. In the current study, acoustic correlates of voice and speech were analyzed as a reflection of the activity of the phonoarticulatory organs and the state of the nasopharyngeal and oropharyngeal tracts. Word articulation and speech fluency have not been considered in this exploratory study since it is the first time that a research like this is carried out in SMS, and it is convenient to limit the object of analysis. The findings would evidence the susceptibility of phonation and speech articulation to neuromotor alterations, independently of their origin. The aim of this study is to explore whether the neuromotor deficits in children with SMS adversely affect phonation as compared to typically developing children without neuromotor deficits. These findings could provide evidence of the susceptibility of phonation of speech to neuromotor disturbances, regardless of their origin. Participants The study was carried out with an experimental group of 12 children with SMS (six boys and six girls) grouped in two age ranges: 5-7 years and 8-12 years. The sample analyzed constitutes the 17% of the total number of people with SMS in the Spanish Association of Smith-Magenis Syndrome (ASME). SMS has a very low prevalence and is underdiagnosed mainly due to the lack of knowledge that still exists about the syndrome. In Spain, the average age of diagnosis is 6.5 years (Hidalgo-de la Gua, 2019), and in ASME, there are currently 72 cases diagnosed at various ages. All participants were diagnosed by fluorescent in situ hybridization (FISH), which identified interstitial microdeletion in the 17p11.2 region. All children with SMS in this study come from the Spanish ASME, which has actively collaborated to facilitate our access to families and children diagnosed. The information from the experimental group can be seen in Table 1. The healthy control population sample is a set of 12 typically developing children matched in age and sex to SMS cases. The total number of cases of NG came from the Public School "Mara Luisa Caas" (Ciudad Real), where tutors and teachers were previously informed about this research. The specific exclusion criterion for constituting NG was vocal pathology, a condition provided by the speech therapist of the school and by the parents. All the tutors and parents of the underage participants in this research signed their informed consent. This research does not violate any rights of minors and complies with all the ethical principles set out in the Declaration of Helsinki by the World Medical Association in 1964 (Povl Riis, 2003). Types of Samples and Recording Voice Procedure The present study is of a purely exploratory nature since the studies addressed in SMS on the manifestations of neuromotor deficits in speech and phonation is non-existent. In this research, samples of sustained speech were used, specifically with the vowel /a/ sustained for approximately 1 s. A sustained vowel has been chosen instead of diadochokinetic emissions or full words because it is considered the best way to contrast the initial hypothesis. To see whether the speech and voice of children with SMS reveals part of their atypical neuromotor profile, it was necessary to examine, on the one hand, the phonation under a neuromotor framework such as that of SMS-cepstral peak prominence (CPP)-and second, the stability of the articulatory position when this is held for 1 s, considering that the people articulating the vowel have neuromotor deficits. The latter is fundamental, since it relates to the behavior of the articulatory organs during sustained emission, The number of cases and label identification is provided. Frontiers in Human Neuroscience | www.frontiersin.org specifically regarding the activity of the jaw and the tongue. Furthermore, that steady-state analysis without coarticulation effects surrounding phonemes is also important for generating valid measures of F1 and F2. The stable tongue and jaw position achieved in this type of speech production is optimal for phonatory analysis. It is also the most recommended way to indirectly analyze the laryngeal activity and the vocal quality. As regards the choice of the phoneme /a/ instead of any other vowel segment, this is due to phonological issues: it is the most open vowel in Spanish and the one that favors a more natural-less forced-and more standard phonation (Vihman and de Boysson-Bardies, 1994). Several samples were taken per participant, and a total of 50 emissions were obtained. These samples were collected at different times throughout the week. A cardioid clip-on microphone (Audio-Technica ATR-3350) was used for voice collection due to the age of the participants. In this type of study in which the voice is the object of analysis, it is essential to avoid any kind of distraction given the influence it has on phonation (Ramig and Dromey, 1996). The microphone could be a distracting element, especially in SMS population, so a small clip-on recording device was the best option. The recording device was always placed approximately 20 cm from the source (the participant's mouth). The files have been recorded with a sampling rate of 22,100 Hz (Smith-Magenis data set) and 48,000 Hz (normative data set), both stereo 16 bits. A small room with good acoustic conditions was the recording location for the two study groups. During the process of collecting the voice samples, only the researcher and one participant were in the recording room. To ensure that the voice samples analyzed were natural and in accordance with the phonatory characteristics of each subject, the examiner played with each participant, sang, and used the microphone for at least half an hour before collecting the voice samples. The aim was to familiarize the child with the test and the materials used. Preprocessing of Data All files were filtered to subtract the mean value, to make sure no continuous bias was introduced. After that, they were all normalized in amplitude. Different sampling frequencies were used for formant an f0 calculation. In the case of formant calculation, only the low frequencies of the spectrum were worth for authors, and a sampling frequency of 16,000 Hz was used. The reason to do so was because the first five formants lied under 8 kHz, so all the files were downsampled to 16 kHz prior to formant calculation. Nevertheless, for f0 calculation, higher sampling frequency provided a more precise f0 calculation. Thus, in this case, a sampling frequency of 48 kHz was used. This frequency up-sampling was not going to give more precision in the case of the Smith-Magenis data set but was quite convenient, as the so calculated f0 values were easier to align with formant values if window overlap was properly selected. The resampling algorithm performs an up-sampling by an integer value "p, " followed by a low-pass filtering and a downsampling by an integer value "q." The relationship between p and q equals the relationship between the original and the desired frequencies (Crochiere, 1979;Crochiere and Rabiner, 1983). Formant Calculation As previously indicated, data files were down-sampled to 16 kHz. A pre-emphasis filter with a pole in 0.95 was applied to every file to enhance higher frequencies. An allpole model system was considered for speech. To calculate such system, a linear predictive coding (LPC) model with covariance algorithm was calculated in block processing. For every file, LPC models of orders 12, 14, and 16 were calculated, in most cases, 12 order being the optimum compromise. Nevertheless, older children tended to show lower formants, so a higher order was sometimes necessary to separate first two formants in a Spanish /a/ vowel. The window length for the block processing was 256 ms (4,096 samples). This is a very long window, but the data set consisted of sustained vowels, so slow variations in the position of the formants were expected. The window displacement was 8 ms (128 samples). That implied an overlap of 96.9% between windows. The formants were calculated as the frequencies that corresponded to the positions of the roots of the polynomial of the LPC model. Some restrictions were taken to consider a root being a real formant: the minimum value for the radius of the first formant should be 0.84, and the minimum imaginary part for that first formant should be 0.05. Then, only frequencies above 150 Hz were considered for the first formant. Consecutive formants should be calculated from roots of radius higher than 0.82, 0.8, 0.78, 0.76, and 0.74. This less strict restriction as the order of the formant increase was due to the lower level in energy of the formants as frequency increased. f0 Calculation For f0 calculation, files were resampled to 48 kHz. The value of f0 was searched between a minimum of 40 Hz and a maximum of 700 Hz. The period in samples that corresponded to these two frequencies were fs/40 (T max ) and fs/700 (T min ) samples. The algorithm was based in cepstrum. Files were block processed with a window length of 64 ms (3,072 samples) and a displacement of 8 ms (384 samples and 87.5% overlap). The time displacement was the same as in the case of the formant calculation. Every frame was multiplied by a Hamming window function, and the real cepstrum was calculated and put together in a matrix. The so obtained cepstrum matrix was then filtered with a 2D filter of size 19 9 (19 files and nine columns). This filter was obtained as the product of two matrices: the first matrix consisted of 19 files of Blackman window vectors of size 9, and the second one consisted of nine columns of Blackman window vectors of size 19. The so obtained filtering function can be viewed in Figure 1. After that filtering, cepstrum vectors were compensated to enhance low frequencies. The period in the samples of every frame was calculated as the position of the maximum of every column in the cepstrum-filtered matrix between T min and T max. On some occasions, the second or the third peak of the cepstrum (situated at double and three times the samples of the first one) could be more prominent than the first one, although low frequency enhancement was accomplished. Those cases were detected, and if the first peak was over the 60% of the value of the most prominent one, the value was corrected. When the value of the first peak was even lower than the 60% of the most prominent one, we assumed that a subharmonic of the f0 was present. Cepstral Peak Prominence Calculation An estimate of the goodness of the cepstral peak was calculated for every cepstrum frame. The real cepstrum of the windowed frames was smoothed with an FIR filter of 25 coefficients with a Hamming window impulse response. The cepstrum signal was then limited between T min and T max. The maximum (cepstral peak) of the so constrained signal was calculated. Finally, the CPP was obtained as the difference between that value and the average of the rest of the cepstrum signal between T min and T max. This value was also smoothed between consecutive frames, with a filter with a window length of 50 ms. Stages of the Study and Statistical Analysis For the statistical analysis of the data and their comparison with the control population, parametric and non-parametric statistical tests were used. The analysis of the data was complex and required a thorough examination divided into different stages, which are summarized in Figure 2. In stage 1, a first comparison of F1 and F2 between the non-normative (SMS) and the normative (NG) group was carried out. In stage 2, due to the absence of relevant results, it was considered to carry out a second comparison between SMS and normative cases of the same age range and gender, i.e., one SMS child of rank 1 versus each of the three normative children of the same age range and gender ( Table 2) and so on. In this phase, on the one hand, the results of the F1 and F2 analysis of both groups (Stage 2.1.) were compared; on the other hand, the outcomes of the phonation analysis (CPP) (Stage 2.2.) were contrasted. As can be seen in Figure 2, the relevant results were found in the latter stage. The approach followed in the statistical analysis was conditioned by the statistical restrictions set by each test used (Figure 3). Given the two distributions, the first step was to check if these distributions followed or not a normal trend. Due to the twostage analysis process in this study, the overall distributions and age/gender subgroup distributions were tested for normality. The Shapiro-Wilk's test is used when the number of samples is <50, whereas the Kolmogorov-Smirnov test is used when samples are 50 or more. Due to the number of samples per participant (| N| ≥ 50), the Kolmogorov-Smirnov (KS) test was appropriate. When distributions were non-normal as evidenced by a KS test p < 0.05, the Mann-Whitney, or U test, was used to determine group differences. RESULTS The outcomes of the acoustic analysis are the result of a complex statistical examination of the data. It was in Stage 2.2 (Figure 2) that relevant results were obtained in terms of the values of the statistical tests used (Figure 3). Two data analysis approaches were performed. The first approach involved individual comparisons for each age and gender matched SMS and NG participant pair, using the 50 repeated samples for each participant. The second approach involved the analysis of four collective clusters-range 1 and 2 male and range 1 and 2 female, respectively. In both analysis approaches, the acoustic analysis of phonation was based on CPP extraction. The results of the statistical tests indicated that there were significant differences between phonation of SMS and NG groups. Likewise, in Table 2, the summary of statistical values for CPP such as cardinality, mean, and standard deviation were provided divided into range of age, gender, and group (NG and SMS). At this point, in the first analysis approach, the outcomes of the acoustic analysis of the phonation of SMS cases were compared with the age-and gender-matched NG individuals. The results of the T-and U-test are shown in Table 3. In yellow color, T-tests with not significant p value are depicted (two cases out of 36); in red color, Mann-Whitney-Wilcoxon tests (U-test) with not significant p values are illustrated (three out of 36). Finally, each cell without background represents significant p values, using U-test (31 cases out of 36). The table above shows the results of the T-and U-tests comparisons of the CPP data extracted from the acoustic analysis of /a/. The aquamarine rows show SMS group cases, and the red pale columns show the NG cases. As can be seen, the results have been grouped by age range and gender. All results rejected the null hypothesis (p < 0.05), except for the boxes highlighted in red and yellow. That is, the non-highlighted results reflect that the CPP values of SMS and NG cases are significantly different, which means that these participants have distinctly different phonations. As follows, in Figures 4-7, the three-representative kind of outcomes in Table 3 are shown; using Q-Q plots reflecting the sample distributions, where it can be seen that SMS5 vs. 517A (in yellow) and SMS6 vs. 637A (in red) cases are above the p value. Figure 4 is a Q-Q plot of SMS5 and 517A CPP distributions. In Figure 5, the Q-Q plot between SMS6 and 637A is depicted. As in Figure 4, the two case distributions intersect and do not show statistically significant differences (p = 0.15). However, in this case, these distributions do not follow a normal trend, and therefore, the non-parametric U-test was used. In contrast to the previous cases that were not significantly different, the majority of case comparisons were significantly different. For example, Figure 6 depicts the Q-Q for the comparison of SMS3 and 618O, with distributions that were widely separated and significantly different as tested with the U-test (p = 3.76E−16). Likewise, in Figure 7, the distributions for the comparison of SMS12 and 11AAZM are completely distinct, with statistically significant differences (p = 4.23E−45). As can be shown in the aforementioned figures, Q-Q plots provide easy-to-read behavior in participants. For instance, if populations cannot be distinguished, or put in other words, belong to the same distribution, the set of points are located very close (see Figures 4, 5 respectively). However, if the statistical test returns a p < 0.05, there exists statistical significance between non-normative and NGs (see Figures 6, 7 respectively). Therefore, these plots depict two perfectly separated distributions. Concerning the second study, the process described in Figure 3 was carried out for each cluster-old girls/boys and young girls/boys-respectively. In all cases, Kolmogorov-Smirnov test rejected the null hypothesis; in other words, both contrasted distributions did not follow a normal trend. For this reason, the non-parametric U-test was the inferential statistic used to determine if there was a significant difference between these two populations The added columns provide information about cluster (range of age), gender, group (NG/SMS), the number of samples, the mean, and the standard deviation of CPP. Frontiers in Human Neuroscience | www.frontiersin.org (NG and SMS). As shown in Table 4, p values were well below 0.05. Likewise, the four Q-Q plots are depicted in Figure 8. After this second analysis, the Q-Q plot further highlights the differences between the two study groups (SMS and NG). The distributions are very different, especially in the group of young boys and older girls. DISCUSSION Our initial objective was to study the phonation and speech of children with SMS by means of an acoustic analysis to detect features associated with the neuromotor deficits that result from this genetic disorder. The main features of the neurological profile of patients with SMS can be summarized as: marked developmental delay, hypotonia, hyporeflexia, high pain threshold, poor motor skills, abnormal limb tremor, among others (;Gropman et al.,, 2006Gropman et al.,, 2007). It is possible that this picture of abnormalities is reflected in speech and phonation mechanisms, so at the beginning of this study, we expected to find different acoustic features in the speech emissions of patients with SMS compared to the normative population. For this purpose, a first observation of the articulatory particularities of SMS was made through the analysis of F1 and F2 of a sustained /a/ produced by 12 children with SMS between 5 and 12 years of age, who were compared with 12 age-matched children. As discussed in the previous section, the results of these analyses were not as expected, as the statistical tests did not yield relevant results, but we cannot come to any conclusions given the small number of participants. To attempt to arrive at robust results, it would be necessary to recruit more cases with SMS with a view to continue this research work. It would also be interesting to include tasks other than sustained /a/ to see if the results of the articulation analysis reflect the differences that were assumed at the beginning of this research. After this first articulatory analysis, phonation was studied by determining the prominence of the cepstral peak. CPP values, unlike F1 and F2, were significantly different between the SMS group and NG. CPP is an acoustic parameter currently considered to be the most consistent parameter in the detection of dysphonia (). It has been found to have a strong correlation between cepstral values and dysphonia severity. Consequently, it is also a widely used parameter in the analysis of dysarthria, as has been shown to be sensitive FIGURE 8 | Q-Q plot cepstral peak prominence (CPP) distributions of gender and ages groups. Notice that young and old legend refers to ages of groups (ranges 1 and 2, respectively). to phonation disorders of neurological origin. The outcomes of the phonation analysis indicated that there were significant differences between the SMS group and NG; it can be observed that in most of the cases with SMS, the CPP had a lower value than that in NG. This indicated that children with SMS had poorer vocal quality than their NG peers. CPP was less prominent in voices with reduced vocal quality, i.e., when their first harmonic did not stand out sufficiently from the background noise. The richer the harmonic structure of a voice, the higher the CPP; in other words, the better the quality of a voice, the more prominent the cepstral peak ((Maryn et al.,, 2010;Watts and Awan, 2011;;;;;among others). The poor harmonic structure of a voice is linked to dysphonia, which can be organic, functional, and of a neurological nature (dysarthria). The latter are due to an alteration of the set of cortical and subcortical structures underlying the phonation mechanisms, whether degenerative, acquired, or developmental, as in the case of SMS. Thus, low CPP values have been evidenced in populations with Parkinson's disease (Burk and Watts, 2018;;;Brown and Spencer, 2020;), ataxia (Jannetts and Lowit, 2014;;), cerebral palsy (van Brenk and Kuschmann, 2018;Kuschmann and van Brenk, 2019), and even neurodevelopmental deficits, such as the Williams syndrome (). The latter is, by far, the most similar clinical condition to SMS, as both are caused by a genetic abnormality and are conditioned by a neurodevelopmental disorder in the embryonic stages. In the first study analysis involving single-case comparisons of phonation, the results clearly showed differences in laryngeal biomechanics for the children with SMS as compared with age and gender matched typically developing children. There were only a few cases in which no statistically significant differences had been found. However, if we take a closer look at some of these cases, we can see different distributions (Figures 4, 5). The Q-Q plot showing the distributions of SMS6 and 637A (Figure 5) showed that these two cases had different trends. The distributions intersected at a certain point, which could cause the test statistic to fail to distinguish the two samples as different and caused the p value to be above 0.05. In fact, the graph showed that the distributions separated. This phenomenon occurred similarly in the other cases where the statistical tests did not reject the null hypothesis. In addition, what was noteworthy was that most of the cases assessed showed clearly different distributions (examples in Figures 6, 7). This finding would suggest that the altered neuromotor profile of SMS individuals influences the biomechanics of the structures involved in their phonation and voice quality. Apart from the neurological features described above, it has been shown that the population with SMS also has swallowing difficulties, hypotonia of the velopharyngeal and orofacial muscles (;), and alterations in the peripheral nervous system (75% of children with SMS) (). Phonological difficulties and many simplification processes related to velopharyngeal hypotonia (distortions affecting velar and, mainly, fricative consonant phonemes) have also been described (Hidalgo and Garayzbal, 2019). Therefore, taking these findings into account, it would seem justified to relate the results of the acoustic analysis of this paper to the neuromotor particularities of SMS. In the second study analysis involving subgroups of children divided by age and gender, children with SMS were clearly differentiated from their typically developing peers through the phonatory analysis of CPP. Power of statistical tests was enhanced by the large number of analyzed samples, as reflected in p values that were often well below 0.05 (e.g., young boys and old girls). Likewise, the Q-Q plots obtained generally presented separate distributions. As hypothesized, these results reinforce the fact that the use of the CPP as a phonatory feature helps to distinguish between healthy control and Smith-Magenis populations. CONCLUSION This study is of an exploratory nature, but it has allowed us to identify how the phonatory characteristics of children with SMS differ with their typically five developing peers. These phonatory differences are likely associated with the neurological deficits that characterize of this syndrome. An acoustic analysis of a sustained and comparative /a/ was carried out between an experimental group of 12 SMS cases aged 5-12 years and a control group of 12 typically developing children matched in age and gender. The initial aim was to determine whether the phonation and speech of the experimental group showed acoustic features that differed from the normative population. This interest stems from the altered neuromotor profile of the population with SMS and the close relationship between speech and phonation mechanisms and neurological disorders (dysarthria). Therefore, F1 and F2 were analyzed, as well as CPP, considered the most reliable acoustic parameter in the detection of dysphonia. The main findings of this work can be summarized as follows. For the F1 and F2 analysis, no significant differences were found between the SMS children and the normative comparison group. Although this would not imply that both samples present with equivalent articulatory features, it is also possible that the F1 and F2 analysis for a single vowel was not sensitive enough to detect possible articulatory differences. In contrast, the phonatory cepstral analysis revealed significantly lower CPP for the children with SMS as compared with the age-and gender-matched normative group. Additionally, the vocal quality of most children with SMS in this study was lower than that of the normative comparison group. These findings suggest that the neuromotor deficits that characterize children with SMS may adversely affect laryngeal biomechanics and thus vocal quality. Finally, these findings are in line with previous research addressing dysarthria populations. The current results are also consistent with the findings made by Watts, Awan, and Marler in 2008 in a population with a syndrome with similar characteristics to SMS: Williams syndrome. It is also important to point out some limitations of the present study. First, the experimental sample is small, although this is a frequent circumstance in research with rare and minority disease populations that are also underdiagnosed and relatively new. Second, another limitation has been the availability of a single speech task (a sustained /a/), since having several utterance tasks would have allowed us to test whether there are indeed no significant articulatory differences between SMS and typically developing children. In this sense, it will be interesting to examine the vowel quadrilateral for children with SMS and comparing that to a control group. In future research, authors will try to increase the population sample and the types of speech exercises to continue with the same line of study of the present work and to provide answers to unresolved questions. In addition, we will try to answer other questions that have arisen after this first analysis, such as the following: Is the typical neuromotor profile of SMS reflected in the same way in the phonation and speech of girls and boys? Is there a gender bias? Do adults with SMS present with a speech and voice profile that is further from normal than those diagnosed at a younger age? How do the speech and voice profiles of children with SMS compare to those of other syndromes with neurodevelopmental alterations? Our current, exploratory study addressed the initial purpose of determining whether acoustic features of speech and voice differed for children with SMS as compared with a normative group and confirmed that cepstral-based, phonatory features differed between the two groups. DATA AVAILABILITY STATEMENT The datasets presented in this article are not readily available because the law RGPD of Spain. Requests to access the datasets should be directed to DP-A, daniel.palacios@urjc.es. ETHICS STATEMENT The studies involving human participants were reviewed and approved by ASME. The parents and/or legal representatives of each participant signed an informed consent accepting the participation in the process of data gathering, custody, and treatment. All data are kept in safe custody accordingly to European and national regulations. The Spanish Association of Children with Smith-Magenis Syndrome issued a letter to the name of the researchers acknowledging and availing the purpose and safety of the recording protocols. The protocols and procedures for data gathering and treatment adhered strictly to the Declaration of Helsinki. AUTHOR CONTRIBUTIONS IH-D, EG-H, PG-V, and DP-A contributed to the conception and design of the study. RM-O organized the database and extracted all features DP-A performed the statistical analysis and plots. IH-D, EG-H, RM-O, and DP-A wrote sections of the manuscript.
. Activity of key enzymes of a sphingomyelin cycle and the maintenance of its components (sphingomyelin, ceramide and sphingosine-1-phosphate) have been studied in livers of rats in dynamics of the acute toxic hepatitis caused by hypodermic introduction of an oil solution of CCl4. Sphingomyelinase activity significally increased already on early terms and remained increased over the whole period of observation. Activity of ceramidase insignificantly differed from the control level. The levels of sphingomyelin and sphingosine-1-phosphate did not undergo marked changes while ceramide content significally increased. Thus, balance between liver content of ceramide (proapoptotic) and the sphingosine-1-phosphate, being the antiapoptotic factor, was shifted towards ceramide. In sphingomyelin molecules there was a significant decrease in the content of fatty acids C18: and C22:2, while in ceramide molecules and sphingosine-1-phosphate only fatty acid C22:2 changed. In spite of significant decrease in content of some unsaturated fatty acids, calculated unsaturation coefficients of the fatty acid component of the sphingomyelin cycle metabolites. Thus, our results together with literature data suggests involvement of ceramide-mediated apoptosis in the pathogenesis of acute toxic hepatitis. Elimination of damaged hepatocytes facilitates realization of repair processes and optimization of cellular community of a liver.
/* * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. Oracle designates this * particular file as subject to the "Classpath" exception as provided * by Oracle in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ package org.openjdk.jmh.annotations; import java.lang.annotation.ElementType; import java.lang.annotation.Inherited; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; /** * <p>Marks the configurable parameter in the benchmark.</p> * * <p>{@link Param} fields should be non-final fields, * and should only reside in {@link State} classes. JMH will inject * the value into the annotated field before any {@link Setup} method * is called. It is <b>not</b> guaranteed the field value would be accessible * in any initializer or any constructor of {@link State}.</p> * * <p>Parameters are acceptable on any primitive type, primitive wrapper type, * a String, or an Enum. The annotation value is given in String, and will be * coerced as required to match the field type.</p> * * <p>Parameters should normally provide the default values which make * benchmark runnable even without the explicit parameters set for the run. * The only exception is {@link Param} over {@link java.lang.Enum}, which * will implicitly have the default value set encompassing all enum constants.</p> * * <p>When multiple {@link Param}-s are needed for the benchmark run, * JMH will compute the outer product of all the parameters in the run.</p> */ @Inherited @Target({ElementType.FIELD}) @Retention(RetentionPolicy.RUNTIME) public @interface Param { String BLANK_ARGS = "blank_blank_blank_2014"; /** * Default values sequence for the parameter. By default, the parameter * values will be traversed during the run in the given order. * * @return values sequence to follow. */ String[] value() default { BLANK_ARGS }; }
Asian Origins of Cinderella: The Zhuang Storyteller of Guangxi Abstract Focusing on Duan Chengshi's c. 850 CE text, this paper starts with the hypothesis that Yexian's story reflects the time and place of the informant, Li Shiyuan, cited by Duan. I concentrate on Li Shiyuan's possible identity as a member of the Zhuang ethnic group in Nanning, Guangxi Province, now within the People's Republic of China near the Vietnamese border. Victor Mair's 2005 translation and footnotes stimulated my interest in Guangxi, and Katherine Kaup, who studies contemporary Zhuang politics, enabled me to interview Zhuang folklore scholars in Nanning. With some observations in Guangxi Province, but more importantly analysis of literary texts and previous scholarship, I place the Yexian story in the context of Zhuang beliefs, creativity, and history.
<reponame>toyboxman/yummy-xml-UI // // This file was generated by the JavaTM Architecture for XML Binding(JAXB) Reference Implementation, v2.2.8-b130911.1802 // See <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a> // Any modifications to this file will be lost upon recompilation of the source schema. // Generated on: 2016.03.26 at 09:53:41 AM CST // package king.flow.data; import javax.xml.bind.JAXBElement; import javax.xml.bind.annotation.XmlElementDecl; import javax.xml.bind.annotation.XmlRegistry; import javax.xml.namespace.QName; /** * This object contains factory methods for each Java content interface and Java * element interface generated in the king.flow.data package. * <p> * An ObjectFactory allows you to programatically construct new instances of the * Java representation for XML content. The Java representation of XML content * can consist of schema derived interfaces and classes representing the binding * of schema type definitions, element declarations and model groups. Factory * methods for each of these are provided in this class. * */ @XmlRegistry public class ObjectFactory { final static QName _Unionpayid_QNAME = new QName("", "unionpayid"); final static QName _Systemrestart_QNAME = new QName("", "systemrestart"); final static QName _Updatefile_QNAME = new QName("", "updatefile"); final static QName _Terminalstate_QNAME = new QName("", "terminalstate"); final static QName _Restart_QNAME = new QName("", "restart"); final static QName _Keyboardstate_QNAME = new QName("", "keyboardstate"); final static QName _Update_QNAME = new QName("", "update"); final static QName _Errmsg_QNAME = new QName("", "errmsg"); final static QName _Terminalid_QNAME = new QName("", "terminalid"); final static QName _Okmsg_QNAME = new QName("", "okmsg"); final static QName _Counter_QNAME = new QName("", "counter"); final static QName _Version_QNAME = new QName("", "version"); final static QName _Token_QNAME = new QName("", "token"); final static QName _Uid_QNAME = new QName("", "uid"); final static QName _Changekey_QNAME = new QName("", "changekey"); final static QName _Startid_QNAME = new QName("", "startid"); final static QName _Prtstate_QNAME = new QName("", "prtstate"); final static QName _Prscode_QNAME = new QName("", "prscode"); final static QName _Cargo_QNAME = new QName("", "cargo"); final static QName _Redirection_QNAME = new QName("", "redirection"); final static QName _Branchno_QNAME = new QName("", "branchno"); final static QName _Retcode_QNAME = new QName("", "retcode"); /** * Create a new ObjectFactory that can be used to create new instances of * schema derived classes for package: king.flow.data * */ public ObjectFactory() { } /** * Create an instance of {@link TLS } * */ public TLS createTLS() { return new TLS(); } /** * Create an instance of * {@link JAXBElement }{@code <}{@link String }{@code >}} * */ @XmlElementDecl(namespace = "", name = "unionpayid") public JAXBElement<String> createUnionpayid(String value) { return new JAXBElement<String>(_Unionpayid_QNAME, String.class, null, value); } /** * Create an instance of * {@link JAXBElement }{@code <}{@link Integer }{@code >}} * */ @XmlElementDecl(namespace = "", name = "systemrestart") public JAXBElement<Integer> createSystemrestart(Integer value) { return new JAXBElement<Integer>(_Systemrestart_QNAME, Integer.class, null, value); } /** * Create an instance of * {@link JAXBElement }{@code <}{@link String }{@code >}} * */ @XmlElementDecl(namespace = "", name = "updatefile") public JAXBElement<String> createUpdatefile(String value) { return new JAXBElement<String>(_Updatefile_QNAME, String.class, null, value); } /** * Create an instance of * {@link JAXBElement }{@code <}{@link Integer }{@code >}} * */ @XmlElementDecl(namespace = "", name = "terminalstate") public JAXBElement<Integer> createTerminalstate(Integer value) { return new JAXBElement<Integer>(_Terminalstate_QNAME, Integer.class, null, value); } /** * Create an instance of * {@link JAXBElement }{@code <}{@link Integer }{@code >}} * */ @XmlElementDecl(namespace = "", name = "restart") public JAXBElement<Integer> createRestart(Integer value) { return new JAXBElement<Integer>(_Restart_QNAME, Integer.class, null, value); } /** * Create an instance of * {@link JAXBElement }{@code <}{@link Integer }{@code >}} * */ @XmlElementDecl(namespace = "", name = "keyboardstate") public JAXBElement<Integer> createKeyboardstate(Integer value) { return new JAXBElement<Integer>(_Keyboardstate_QNAME, Integer.class, null, value); } /** * Create an instance of * {@link JAXBElement }{@code <}{@link Integer }{@code >}} * */ @XmlElementDecl(namespace = "", name = "update") public JAXBElement<Integer> createUpdate(Integer value) { return new JAXBElement<Integer>(_Update_QNAME, Integer.class, null, value); } /** * Create an instance of * {@link JAXBElement }{@code <}{@link String }{@code >}} * */ @XmlElementDecl(namespace = "", name = "errmsg") public JAXBElement<String> createErrmsg(String value) { return new JAXBElement<String>(_Errmsg_QNAME, String.class, null, value); } /** * Create an instance of * {@link JAXBElement }{@code <}{@link String }{@code >}} * */ @XmlElementDecl(namespace = "", name = "terminalid") public JAXBElement<String> createTerminalid(String value) { return new JAXBElement<String>(_Terminalid_QNAME, String.class, null, value); } /** * Create an instance of * {@link JAXBElement }{@code <}{@link String }{@code >}} * */ @XmlElementDecl(namespace = "", name = "okmsg") public JAXBElement<String> createOkmsg(String value) { return new JAXBElement<String>(_Okmsg_QNAME, String.class, null, value); } /** * Create an instance of * {@link JAXBElement }{@code <}{@link Long }{@code >}} * */ @XmlElementDecl(namespace = "", name = "counter") public JAXBElement<Long> createCounter(Long value) { return new JAXBElement<Long>(_Counter_QNAME, Long.class, null, value); } /** * Create an instance of * {@link JAXBElement }{@code <}{@link String }{@code >}} * */ @XmlElementDecl(namespace = "", name = "version") public JAXBElement<String> createVersion(String value) { return new JAXBElement<String>(_Version_QNAME, String.class, null, value); } /** * Create an instance of * {@link JAXBElement }{@code <}{@link String }{@code >}} * */ @XmlElementDecl(namespace = "", name = "token") public JAXBElement<String> createToken(String value) { return new JAXBElement<String>(_Token_QNAME, String.class, null, value); } /** * Create an instance of * {@link JAXBElement }{@code <}{@link String }{@code >}} * */ @XmlElementDecl(namespace = "", name = "uid") public JAXBElement<String> createUid(String value) { return new JAXBElement<String>(_Uid_QNAME, String.class, null, value); } /** * Create an instance of * {@link JAXBElement }{@code <}{@link Integer }{@code >}} * */ @XmlElementDecl(namespace = "", name = "changekey") public JAXBElement<Integer> createChangekey(Integer value) { return new JAXBElement<Integer>(_Changekey_QNAME, Integer.class, null, value); } /** * Create an instance of * {@link JAXBElement }{@code <}{@link Long }{@code >}} * */ @XmlElementDecl(namespace = "", name = "startid") public JAXBElement<Long> createStartid(Long value) { return new JAXBElement<Long>(_Startid_QNAME, Long.class, null, value); } /** * Create an instance of * {@link JAXBElement }{@code <}{@link Integer }{@code >}} * */ @XmlElementDecl(namespace = "", name = "prtstate") public JAXBElement<Integer> createPrtstate(Integer value) { return new JAXBElement<Integer>(_Prtstate_QNAME, Integer.class, null, value); } /** * Create an instance of * {@link JAXBElement }{@code <}{@link String }{@code >}} * */ @XmlElementDecl(namespace = "", name = "prscode") public JAXBElement<String> createPrscode(String value) { return new JAXBElement<String>(_Prscode_QNAME, String.class, null, value); } /** * Create an instance of * {@link JAXBElement }{@code <}{@link String }{@code >}} * */ @XmlElementDecl(namespace = "", name = "cargo") public JAXBElement<String> createCargo(String value) { return new JAXBElement<String>(_Cargo_QNAME, String.class, null, value); } /** * Create an instance of * {@link JAXBElement }{@code <}{@link String }{@code >}} * */ @XmlElementDecl(namespace = "", name = "redirection") public JAXBElement<String> createRedirection(String value) { return new JAXBElement<String>(_Redirection_QNAME, String.class, null, value); } /** * Create an instance of * {@link JAXBElement }{@code <}{@link String }{@code >}} * */ @XmlElementDecl(namespace = "", name = "branchno") public JAXBElement<String> createBranchno(String value) { return new JAXBElement<String>(_Branchno_QNAME, String.class, null, value); } /** * Create an instance of * {@link JAXBElement }{@code <}{@link Integer }{@code >}} * */ @XmlElementDecl(namespace = "", name = "retcode") public JAXBElement<Integer> createRetcode(Integer value) { return new JAXBElement<Integer>(_Retcode_QNAME, Integer.class, null, value); } }
WASHINGTON — Attorney General Jeff Sessions will testify at a public hearing of the Senate intelligence committee Tuesday afternoon, the committee said in a statement. This will be the first time Sessions has testified in Congress since he recused himself from the Justice Department’s probe into Russian meddling in last year’s election and the firing of FBI Director James Comey. “(Sessions) believes it is important for the American people to hear the truth directly from him and looks forward to answering the committee’s questions tomorrow,” a Justice Department spokesperson said. The announcement caps the drama that started over the weekend when Sessions canceled two appearances Thursday, citing former Comey’s blistering testimony last week. Comey told the intelligence committee in a closed session that Sessions may have had a third, undisclosed interaction with Russia’s ambassador to the US, according to people familiar with the briefing. “In light of reports regarding Mr. Comey’s testimony before the Senate Select Committee on Intelligence, it is important that I have an opportunity to address these matters in the appropriate forum,” Sessions wrote over the weekend. But intelligence committee members at first were unaware of a planned hearing.
export interface Point { x: string; y: string; } export interface SearchResult { [index: string]: string | undefined; addressName: string; categoryGroupCode: string; categoryGroupName: string; categoryName: string; distance: string; id: string; phone: string; placeName: string; placeUrl: string; roadAddressName: string; x: string; y: string; } export interface RawResult { [index: string]: string | undefined; address_name: string; category_group_code: string; category_group_name: string; category_name: string; distance: string; id: string; phone: string; place_name: string; place_url: string; road_address_name: string; x: string; y: string; }
/** * Create a context menu if right-clicking */ @Override protected void contextMenuEvent(QContextMenuEvent event) { if(isEditable) { QPointF pos=mapToScene(event.pos()); for(LinTrack track:tracks) { if(track.contextMenuEvent(event, pos)) return; } } }
<filename>jcasts/podcasts/migrations/0084_auto_20211112_1033.py # Generated by Django 3.2.9 on 2021-11-12 10:33 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("podcasts", "0083_podcast_hub"), ] operations = [ migrations.AddField( model_name="podcast", name="hub_exception", field=models.TextField(blank=True), ), migrations.AddField( model_name="podcast", name="last_subscribe_callback", field=models.DateTimeField(blank=True, null=True), ), migrations.AddField( model_name="podcast", name="subscribe_requested", field=models.DateTimeField(blank=True, null=True), ), migrations.AddField( model_name="podcast", name="subscribe_status", field=models.CharField( choices=[ ("unsubscribed", "Unsubscribed"), ("subscribed", "Subscribed"), ("requested", "Requested"), ("error", "error"), ], default="unsubscribed", max_length=30, ), ), migrations.AddField( model_name="podcast", name="subscribed", field=models.DateTimeField( blank=True, null=True, verbose_name="Subscribed until" ), ), ]
A Toolkit for Enhancing End-of-Life Care: An Examination of Implementation and Impact The purpose of this study was to examine the infusion of a Toolkit for Enhancing End-of-Life Care in prisons, as well as the outcome and impact on the quality of prison end-of-life care. A total of 74 front-line staff and administrators were in attendance across two post-Toolkit-infusion evaluation visits. Applying qualitative analysis, co-researcher outcome findings were related to activities, community outreach and relations, multidisciplinary team, quality improvement approach, and participatory action research team effects. Organizational outcomes included barriers and challenges, cost, organizational features, sphere of influence, readiness (for change), and sustainability.
Solution structure of the rhodanese homology domain At4g01050(175295) from Arabidopsis thaliana The threedimensional structure of the rhodanese homology domain At4g01050(175195) from Arabidopsis thaliana has been determined by solution nuclear magnetic resonance methods based on 3043 upper distance limits derived from NOE intensities measured in threedimensional NOESY spectra. The structure shows a backbone root mean square deviation to the mean coordinates of 0.43 for the structured residues 7125. The fold consists of a central parallel sheet with five strands in the order 15423 and arranged in the conventional counterclockwise twist, and helices packing against each side of the sheet. Comparison with the sequences of other proteins with a rhodanese homology domain in Arabidopsis thaliana indicated residues that could play an important role in the scaffold of the rhodanese homology domain. Finally, a threedimensional structure comparison of the present noncatalytic rhodanese homology domain with the noncatalytic rhodanese domains of sulfurtransferases from other organisms discloses differences in the length and conformation of loops that could throw light on the role of the noncatalytic rhodanese domain in sulfurtransferases.
A clear sky. Low 43F. Winds NW at 10 to 20 mph.. A clear sky. Low 43F. Winds NW at 10 to 20 mph. or stop in to visit! atmosphere and friendly, accessible staff. Two Communities You Can Call Home!
#include <muduo/base/StringPiece.h> #include <map> namespace zurg { // returns file name std::string writeTempFile(muduo::StringPiece prefix, muduo::StringPiece content); void parseMd5sum(const std::string& lines, std::map<muduo::StringPiece, muduo::StringPiece>* md5sums); void setupWorkingDir(const std::string& cwd); void setNonBlockAndCloseOnExec(int fd); }
Each year about this time, mailboxes across America are filled with tax forms. Sometimes, those tax forms go straight to a tax professional, unopened. Other times, taxpayers may dutifully open those forms and type the information, box for box, into tax preparation software. In both cases, it's not unusual for taxpayers to not have an understanding of the meaning of all of the numbers, letters and other information on those forms. That's about to change. This week, I'll be dissecting some of the most basic tax forms for you. The more you know, the less scary some of these forms can be. A form W-2 is issued by an employer to an employee. That carries with it some significance and not only for tax reasons. An employer has certain reporting, withholding and insurance requirements for employees that are a bit different from those owed to an independent contractor. The threshold for issuing a form W-2 is based on dollars - nothing else matters. Not time worked. Not position held. Just dollars (or dollar equivalents) earned. The magic number is $600. Every employer who pays at least $600 in case (or cash equivalent, including taxable benefits) must issue a form W-2. If any taxes are withheld, including those for Social Security or Medicare, a form W-2 must be issued regardless of how much was paid out to an employee. An employer prepares six copies of each form W-2 per employee. Yes, that's a lot of paperwork. Copy A is transmitted to the Social Security Administration (SSA) along with a form W-3 (the form W-3 reports the total of all of the forms W-2 for the employer). The due date for employers to get that information to SSA is February 28. Copy 1 is issued to any applicable state, city or local tax department. Copy D is retained by the employer. As an employee, you get three copies of your form W-2. Those three copies must be issued by January 31 of each year. Copy B is for use in reporting your federal income taxes and is generally filed with your federal income tax return (unless you are e-filing in which case you have to provide it to the preparer but it is not usually forwarded to IRS). Copy 2 is for use in reporting your state, city or local income tax and is filed with the relevant taxing authorities. Copy C is for your records (you should retain Copy C for at least three years after you file or the due date of your return, whichever is later). The left side of the form is for reporting taxpayer information; the right side of the form is used to report financials and codes. The bottom of the form reports local and state tax information. Box a. Your Social Security Number (SSN) is reported in box (a). You should always double-check this to make sure it's correct. If it's not correct, you need to request a new form W-2 from your employer. An error could slow the processing of your return. Box b. Your employer's EIN is reported in box (b). An EIN is more or less the employer's equivalent of your SSN. Box c. Your employer's address is reported in box (c). This is the legal address of your employer which may or may not be where you actually work. Don't let that throw you. Box d. The control number is an internal number used by your employer or your employer's payroll department. If your employer doesn't use control numbers, box (d) will be blank. Boxes e and f. These appear as one big block on your form W-2. Your full name is reported at box (e). It's supposed to reflect the name that's actually on your Social Security card (the SSA isn't crazy about suffixes, even if you use them, so you shouldn't see one on your form W-2 unless it's on your Social Security card). If your name isn't exactly as it appears on your Social Security card, you may need a new form W-2; ask your employer if you're not sure. Your address is reported at box (f) and should reflect your mailing address - which could be a post office box - likely without punctuation (a USPS preference). If your address on the form W-2 isn't correct, notify your employer: you won't need a new form W-2 but your employer needs to update his or her records. Box 1 shows your total taxable wages, tips, prizes and other compensation, as well as any taxable fringe benefits. It does not include elective deferrals to retirement plans, pretax benefits or payroll deductions. Since the figure (highlighted by the red arrow in my example below) doesn't include those amounts, it's not unusual for this amount to be less than the amounts included at boxes 2 and 3. It's the number most taxpayers care about the most. Box 2 reports the total amount of federal income taxes withheld from your pay during the year. This amount (highlighted by the purple arrow in my example below) is determined by the elections on your form W-4 based on exemptions and any additional withholding. If you find that this number is too low or too high, you'll want to make an adjustment on your form W-4 for the next year. Box 3 shows your total wages subject to the Social Security tax. This figure is calculated before any payroll deductions which means that the amount in box 3 could be higher than the number reported in box 1, as in my example. It could also be less than the amount in box 1, if you're a high-wage earner, since the total of boxes 3 and 7 (see below) cannot exceed the maximum Social Security wage base. For 2013, that amount was $113,700. If you have more than one job, for Social Security tax purposes, the cap still applies. Box 4 shows the total of Social Security taxes withheld for the year. Unlike federal income taxes, Social Security taxes are calculated based on a flat rate. The rate is 6.2%. The amount in Box 4 should, then, be equal to the amount in box 3 times 6.2%. Since you should not have more Social Security withholding than the maximum wage base times 6.2%, the amount in box 4 should not exceed $7,049.40. In my example, the figure is $50,000 x .062, or $3,100.00. Box 5 indicates wages subject to Medicare taxes. Medicare taxes generally do not include any pretax deductions and will include most taxable benefits. That, combined with the fact that unlike Social Security wages, there is no cap for Medicare taxes, means that the figure in box 5 may be larger than the amounts shown in box 1 or box 3. In fact, it's likely the largest number on your form W-2. Box 6 shows the amount of Medicare taxes withheld for the year. Like Social Security taxes, Medicare taxes are figured based on a flat rate. The rate is 1.45%. For most taxpayers, this means that the figure in box 6 is equal to the figure in box 5 times 1.45% (as in my example indicated by the green arrow since $50,000 x 1.45% = $725). However, under a new law that kicked in beginning in 2013, an employer must withhold additional Medicare tax of .9% from wages paid to an individual earning more than $200,000, regardless of filing status or wages paid by another employer. Since your employer doesn't know your entire financial picture, it's possible under the new law that you may have to pay more additional Medicare taxes than your withholding depending on filing status, compensation and self-employment income. Tips which were reported to your employer will be found in box 7. If this box is blank, it means that you did not report tips to your employer (this doesn't mean that you don't have to report those tips to IRS). Allocated tips reported in box 8 are those that your employer has figured are attributable to you. Those tips are considered income to you. There won't be anything in box 9. The reporting requirement for that box expired a few years ago and the box hasn't yet been removed from the form (go figure). At box 10, your employer will report the total of any benefits paid on your behalf under a dependent care assistance program. Amounts paid out under a qualified plan which are less than $5,000 are considered non-taxable benefits. That number will include report the value of all dependent care benefits, including those greater than the $5,000 exclusion (if the value exceeds $5,000, that excess will be reported in boxes 1, 3 and 5). Box 11 is used to report amounts which have been distributed to you from your employer's non-qualified deferred compensation plan (this amount is taxable). This isn't to be confused with amounts contributed by you. That shows up in box 12. In the sample form W-2, I've included three of the most popular codes. Elective deferrals (Code D) are extremely popular. As noted above, these amounts will generally be included at box 3 and box 5 even if they are excluded from wages at box 1. The cost of employer-sponsored health coverage is reported using Code DD. This amount is reportable under the Affordable Care Act but it is not taxable to you. Excludable moving expenses (Code P) for qualified costs are an example of benefits which will be reported by your employer but are not taxable to you. If reimbursements are non-qualified, they will be reported as income to you in boxes 1, 3, and 5. Box 13 really isn't one box: it's a series of three boxes. Your employer will check the applicable box if you are a statutory employee (employees whose earnings are subject to Social Security and Medicare taxes but not federal income tax withholding); if you participated in your employer's retirement plan during the year; or if you received sick pay under your employer's third-party insurance policy. Box 14 is a "catch all" box. Your employer reports anything here that doesn't fit anywhere else on the form W-2. Examples include state disability insurance taxes withheld, union dues, health insurance premiums deducted and nontaxable income. Your state and local tax reporting can be found at the very bottom of the form W-2. Box 15 is very straightforward and includes your employer's state and state tax identification number. If you work in a state without a reporting requirement, this box (along with boxes 16 and 17) will be blank. If you had multiple withholdings in a number of states, more than one box will be filled. If you are subject to state taxes, box 16 will indicate the total amount of taxable wages for state tax purposes. If you have wages reported in box 16, box 17 will show the total amount of state income taxes withheld during the year. If you live in a state that has a flat state tax (like PA), you can double check to make sure that your withholding is correct by multiplying the amount in box 16 by the flat tax rate. If you are subject to local, city, or other state income taxes, those will be reported in box 18. If you have wages subject to withholding in more than two states or localities, your employer will furnish an additional form W-2. If you have wages in box 18 subject to local, city, or other state income taxes, any amount of withholding will be reported at box 19. Box 20 is exactly what you'd expect: the name of the local, city, or other state tax being reported at box 19. You should have received your form W-2 - with all of this information properly reported - by January 31, 2014. If you didn't, you'll want to contact your employer and possibly take more action. For more details on other tax forms, like the forms 1098 and 1099, check out the rest of the series this week. Want more taxgirl goodness? Pick your poison: receive posts by email, follow me on twitter (@taxgirl), hang out with me on Facebook or check out my YouTube channel. If you want to keep an eye on documents I've posted, check out my profile on Scribd. And finally, you can subscribe to my podcast on the site or via iTunes (it's free).
South Korea has banned all fish imports from a large area of Japan in response to growing concern over the possible environmental impact of recent leaks of highly toxic water at the Fukushima Daiichi nuclear power plant. In Japan, the row over the plant operator's handling of the leaks deepened on Friday when the head of the country's nuclear watchdog issued a stern rebuke to Tokyo Electric Power (Tepco) for causing unnecessary alarm overseas by releasing "scientifically unacceptable" information about radiation levels. Seoul's decision came as Japan's prime minister, Shinzo Abe, prepared to make a final pitch for Tokyo's bid to host the 2020 Olympics. Tokyo is the bookmakers' favourite but international concern over contamination at Fukushima Daiichi is fuelling speculation that the Japanese capital could lose out to Madrid or Istanbul. The International Olympic Committee will name the host city at a meeting in Buenos Aires on Saturday evening. South Korea said it had imposed a total ban on fish from Fukushima and seven other prefectures in response to growing public fears over the safety of produce from the region. The fisheries vice-minister, Son Jae-hak, told reporters that the ban would stay in place indefinitely, adding that Japanese authorities had failed to provide timely and detailed information about the water leaks. South Korea imported 5,000 tonnes of fish from the affected region last year, including stocks from Aomori in the far north, to Chiba, located east of Tokyo. Despite assurances by Japan that it rigorously tests food for radiation, China has also maintained a ban on dairy, vegetable and seafood imports from several prefectures, including Fukushima, since March 2011. "We have provided the South Korean government with relevant information since the contaminated water leaks," the chief cabinet secretary, Yoshihide Suga, said . "We will continue to ask South Korea to take measures based on scientific facts." In a statement carried by the Yonhap news agency, South Korea's fisheries ministry said the ban had been necessary "as the government concluded that it is unclear how the incident in Japan will progress in the future and that the information the Japanese government has provided so far is not enough to predict future developments". The impact from the recent water leaks was being felt as far away as Buenos Aires, where Japanese officials were besieged by questions about Fukushima ahead of the IOC's decisive vote. Hiroshi Hase, an MP and former Olympic wrestler, told reporters in the Argentinian capital that contamination from Fukushima was "not even an issue" for the health of people in Tokyo, located 150 miles south of Fukushima Daiichi. With the IOC decision imminent, Shunichi Tanaka, chairman of Japan's nuclear regulation authority, criticised Tepco for inflating fears around the world by releasing misleading data about the water leaks. Earlier this week, the utility said it had detected measured radiation of 2,200 millisieverts an hour at a hotspot near a water tank. Tanaka said the measurement was misleading, and had prompted alarmist reports in the domestic and international media. "What Tepco is talking about is the level of contamination," he said, "So to describe it with the unit 'millisieverts per hour' is scientifically unacceptable. It's like describing how much something weighs by using centimetres." He said Tepco should have used the unit becquerel, which signifies the radioactivity levels in the water itself rather than the potential human exposure levels. "I have come to think they need to be spoon fed," Tanaka said of Tepco. "It is regrettable that Tepco has caused confusion and fear in the international community by spreading misleading information." The 2,200-millisievert an hour reading, confirmed by Tepco, is accurate, however. The firm has been at pains to point out that most of the radiation was emitted as beta rays – as opposed to far more dangerous gamma rays – which travel only short distances and are easily blocked by protective clothing. Concern has been voiced over the danger posed to the health of nuclear workers. Until recently, only two workers were responsible for checking the water tanks, and devices used to measure radiation have had to be replaced because they could not detect radiation levels of more than 100 millisieverts an hour. The number of workers checking the tanks has since been increased to almost 100, Tepco said. Tanaka said reports that the water leaks represented another catastrophe at the plant had been overblown, adding that the quantity of radiation leaking into the Pacific Ocean would have "no meaningful effect" on the environment. Criticism of Tepco's failure to to deal with the buildup of huge quantities of radioactive water at Fukushima Daiichi has intensified since its belated admission last month that the plant was leaching 300 tonnes of contaminated water into the ocean every day. The cleanup operation has been further blighted by leaks from water storage tanks and high levels of radiation. About 1,000 tanks containing a total of 335,000 tonnes of water have been built behind the plant, but efforts to remove dangerous radioactive substances from the water have been held up by technical glitches. Earlier this week, Tanaka offered assurances that the stored water would not be discharged into the sea unless radiation levels had been brought down to legally acceptable levels. Coolant water that is being pumped into the wrecked reactors becomes contaminated when it comes into contact with melted uranium fuel. It then mixes with groundwater flowing in from the hills behind the plant, requiring workers to pump out and store an estimated 400 tonnes of the toxic liquid every day. This week, the Japanese government announced almost 50bn yen (£320m) in emergency funds to build a frozen underground barrier to prevent groundwater from leaking into the reactor basements, and to develop a reliable water treatment system.
The introduction of capillary structures in 4D simulated vascular tree for ART 3.5D algorithm further validation Several neurosurgical procedures, such as Artero Venous Malformations (AVMs), aneurysm embolizations and StereoElectroEncephaloGraphy (SEEG) require accurate reconstruction of the cerebral vascular tree, as well as the classification of arteries and veins, in order to increase the safety of the intervention. Segmentation of arteries and veins from 4D CT perfusion scans has already been proposed in different studies. Nonetheless, such procedures require long acquisition protocols and the radiation dose given to the patient is not negligible. Hence, space is open to approaches attempting to recover the dynamic information from standard Contrast Enhanced Cone Beam Computed Tomography (CE-CBCT) scans. The algorithm proposed by our team is called ART 3.5 D. It is a novel algorithm based on the postprocessing of both the angiogram and the raw data of a standard Digital Subtraction Angiography from a CBCT (DSACBCT) allowing arteries and veins segmentation and labeling without requiring any additional radiation exposure for the patient and neither lowering the resolution. In addition, while in previous versions of the algorithm just the distinction of arteries and veins was considered, here the capillary phase simulation and identification is introduced, in order to increase further information useful for more precise vasculature segmentation.
Evidence-based policy is a powerful notion that has entered the public lexicon in recent years. But, like another well-worn idea, agile, it has become increasingly debased as it has grown in popularity. Evidence-based policy is now invoked to provide a supposedly apolitical, scientific solution to any number of problems, many of which are primarily about ideology and values. The pursuit of data and evidence can improve our knowledge of what’s happening on the ground and inform our response, but it can’t give us the answers to everything. It’s important to remember the limits of evidence-based policy, argued Melbourne School of Government foundation director Professor Helen Sullivan at Monday’s Power to Persuade symposium in Melbourne. “We start focusing on those people who are seduced by the slogans. Not us, it’s those people. And that’s really dangerous.” Plenty of important social policy questions are difficult to evaluate and cannot be reduced to a set of numbers or a formula. “There are some things that cannot be measured, and most of the things we do in collaboration cannot be measured,” Sullivan says. Boiling things down to the data is also expensive and time-consuming. Policy experts need to think about what is appropriate to measure and what’s not. “It’s really important both to recognise that there are things that can be measured and should be measured and we should spend time doing that — but it’s also important to recognise that sometimes the pursuit of measurement leads you to more and more resources spent on things that tell you less and less,” she thinks. This trend didn’t come from nowhere. “We’ve only really become completely obsessed with measuring things over the last thirty years. It’s not coincidental that it emerged alongside the marketisation and managerialisation of our public services,” Sullivan noted. Relying only on abstracted data can hide as much as it reveals. Sharon Fraser, general manager of central Victorian social policy initiative Go Goldfields, highlighted the problem of policy processes ignoring the lived experience of those it impacts. It’s often assumed expertise means having letters after your name or having a theoretical framework — in practice this has seen community organisations having to fight to have survivors of family violence, for example, recognised as experts in that field and given formal input into policy development. A narrow focus on evidence can lead policy experts to play down their own ideological and personal backgrounds. Sullivan pointed to a tendency to dismiss the concerns of some sectors of the public because the “evidence” shows that they’re wrong. Polls showing low public trust in institutions suggest there’s a problem that needs to be addressed. “This isn’t just about government, this is about the fact that people don’t trust big institutions. People are frightened, people are anxious,” she explained. “One of the interesting things that happens then is we start focusing on those people who are seduced by the slogans. Not us, it’s those people. And that’s really dangerous. “I also have a real problem with the fact that ‘we’ — whoever ‘we’ are — are all about evidence, and ‘they’ — whoever they are — are all about things other than evidence.”
<filename>aswscommon/client/batch/session_scheduler.h<gh_stars>1-10 #pragma once // session_scheduler.h // // (C) Copyright 2014 <NAME> // #include "session.h" //-------------------------------------------------------------------------------- #include <list> //-------------------------------------------------------------------------------- namespace asws { namespace client { namespace batch { //-------------------------------------------------------------------------------- /* session_scheduler Performs the submission of multiple sessions supports two modes, sequential execution or round robin */ class session_scheduler : public interface_isession { public: /* Session scheduler mode */ typedef enum { round_robin = 1, sequential = 2 } mode; /* Constructor */ session_scheduler(mode m) : _mode(m) { } /* Clear up the list */ virtual ~session_scheduler() { _sessions.clear(); } /* Add the session to the scheduler */ void add_session(std::unique_ptr<session>&& sp) { _sessions.push_back(std::move(sp)); } /* Execute the sessions */ virtual void run(); private: // Scheduler mode mode _mode; // List of the sessions std::list<std::unique_ptr<session>> _sessions; }; // class session //-------------------------------------------------------------------------------- } // namespace batch } // namespace client } // namespace asws //--------------------------------------------------------------------------------
<reponame>michaelruocco/idv-context<filename>method/otp/domain/entities/src/test/java/uk/co/idv/method/entities/otp/policy/delivery/phone/VoiceDeliveryMethodConfigTest.java package uk.co.idv.method.entities.otp.policy.delivery.phone; import org.junit.jupiter.api.Test; import uk.co.idv.method.entities.eligibility.Eligibility; import uk.co.idv.method.entities.otp.delivery.phone.OtpPhoneNumber; import java.time.Instant; import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.BDDMockito.given; import static org.mockito.Mockito.mock; class VoiceDeliveryMethodConfigTest { private final OtpPhoneNumberConfig phoneNumberConfig = mock(OtpPhoneNumberConfig.class); private final VoiceDeliveryMethodConfig config = new VoiceDeliveryMethodConfig(phoneNumberConfig); @Test void shouldReturnType() { assertThat(config.getType()).isEqualTo("voice"); } @Test void shouldReturnPhoneNumberConfig() { assertThat(config.getPhoneNumberConfig()).isEqualTo(phoneNumberConfig); } @Test void shouldReturnEligibilityFromPhoneNumberConfig() { OtpPhoneNumber number = mock(OtpPhoneNumber.class); Instant now = Instant.now(); Eligibility expectedEligibility = mock(Eligibility.class); given(phoneNumberConfig.toEligibility(number, now)).willReturn(expectedEligibility); Eligibility eligibility = config.toEligibility(number, now); assertThat(eligibility).isEqualTo(expectedEligibility); } }
<reponame>jatinmankar8/xcurator /* * Copyright (c) 2013, University of Toronto. * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. You may obtain * a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ package edu.toronto.cs.xml2rdf.utils; import java.util.LinkedList; import java.util.List; /** * A simple representation of disjoint sets */ public class DisjointSet<T> { /** * The resource this set represents */ private T data; /** * The parent set in a union */ private DisjointSet<T> m_parent; private List<DisjointSet<T>> m_children; /** * Heuristic used to build balanced unions */ private int m_rank; /** * The link to the distinguished member set */ private DisjointSet<T> m_ancestor; /** * Set to true when the node has been processed */ private boolean m_black = false; /** * Set to true when we've inspected a black set, since the result is only * correct just after both of the sets for u and v have been marked black */ private boolean m_used = false; public DisjointSet(T data) { this.data = data; m_rank = 0; m_parent = this; m_children = new LinkedList<DisjointSet<T>>(); } public T getData() { return data; } public DisjointSet<T> getParent() { return m_parent; } public void setParent(DisjointSet<T> parent) { m_parent = parent; parent.m_children.add(this); parent.m_children.addAll(this.m_children); } public int getRank() { return m_rank; } public void incrementRank() { m_rank++; } public DisjointSet<T> getAncestor() { return m_ancestor; } public void setAncestor(DisjointSet<T> anc) { m_ancestor = anc; } public void setBlack() { m_black = true; } public boolean isBlack() { return m_black; } public boolean used() { return m_used; } public void setUsed() { m_used = true; } public List<DisjointSet<T>> getChildren() { return m_children; } /** * The find operation collapses the pointer to the root parent, which is one * of Tarjan's standard optimisations. * * @return The representative of the union containing this set */ public DisjointSet<T> find() { DisjointSet<T> root; if (getParent() == this) { // the representative of the set root = this; } else { // otherwise, seek the representative of my parent and save it root = getParent().find(); setParent(root); } return root; } /** * The union of two sets * * @param y */ public void union(DisjointSet<T> y) { DisjointSet<T> xRoot = find(); DisjointSet<T> yRoot = y.find(); if (xRoot.getRank() > yRoot.getRank()) { yRoot.setParent(xRoot); } else if (yRoot.getRank() > xRoot.getRank()) { xRoot.setParent(yRoot); } else if (xRoot != yRoot) { yRoot.setParent(xRoot); xRoot.incrementRank(); } } /** * @see java.lang.Object#toString() * @return A string representation of this set for debugging */ @Override public String toString() { StringBuffer buf = new StringBuffer(); buf.append("DisjointSet{node="); buf.append(data); buf.append(",anc="); buf.append((getAncestor() == this) ? "self" : (getAncestor() == null ? "null" : getAncestor().toShortString())); buf.append(",parent="); buf.append((getParent() == this) ? "self" : (getParent() == null ? "null" : getParent().toShortString())); buf.append(",rank="); buf.append(getRank()); buf.append(m_black ? ",black" : ",white"); buf.append("}"); return buf.toString(); } public String toShortString() { StringBuffer buf = new StringBuffer(); buf.append("DisjointSet{node="); buf.append(data); buf.append(",parent="); buf.append((getParent() == this) ? "self" : (getParent() == null ? "null" : getParent().toShortString())); buf.append("...}"); return buf.toString(); } }
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_EXTENSIONS_EXTENSION_GARBAGE_COLLECTOR_H_ #define CHROME_BROWSER_EXTENSIONS_EXTENSION_GARBAGE_COLLECTOR_H_ #include <map> #include <string> #include "base/files/file_path.h" #include "base/memory/weak_ptr.h" namespace content { class BrowserContext; } class ExtensionService; namespace extensions { // The class responsible for cleaning up the cruft left behind on the file // system by uninstalled (or failed install) extensions. // The class is owned by ExtensionService, but is mostly independent. Tasks to // garbage collect extensions and isolated storage are posted once the // ExtensionSystem signals ready. class ExtensionGarbageCollector { public: explicit ExtensionGarbageCollector(ExtensionService* extension_service); ~ExtensionGarbageCollector(); #if defined(OS_CHROMEOS) // Enable or disable garbage collection. See |disable_garbage_collection_|. void disable_garbage_collection() { disable_garbage_collection_ = true; } void enable_garbage_collection() { disable_garbage_collection_ = false; } #endif // Manually trigger GarbageCollectExtensions() for testing. void GarbageCollectExtensionsForTest(); private: // Cleans up the extension install directory. It can end up with garbage in it // if extensions can't initially be removed when they are uninstalled (eg if a // file is in use). // Obsolete version directories are removed, as are directories that aren't // found in the ExtensionPrefs. // The "Temp" directory that is used during extension installation will get // removed iff there are no pending installations. void GarbageCollectExtensions(); // Garbage collects apps/extensions isolated storage, if it is not currently // active (i.e. is not in ExtensionRegistry::ENABLED). There is an exception // for ephemeral apps, because they can outlive their cache lifetimes. void GarbageCollectIsolatedStorageIfNeeded(); // The ExtensionService which owns this GarbageCollector. ExtensionService* extension_service_; // The BrowserContext associated with the GarbageCollector, for convenience. // (This is equivalent to extension_service_->GetBrowserContext().) content::BrowserContext* context_; // The root extensions installation directory. base::FilePath install_directory_; #if defined(OS_CHROMEOS) // TODO(rkc): HACK alert - this is only in place to allow the // kiosk_mode_screensaver to prevent its extension from getting garbage // collected. Remove this once KioskModeScreensaver is removed. // See crbug.com/280363 bool disable_garbage_collection_; #endif // Generate weak pointers for safely posting to the file thread for garbage // collection. base::WeakPtrFactory<ExtensionGarbageCollector> weak_factory_; DISALLOW_COPY_AND_ASSIGN(ExtensionGarbageCollector); }; } // namespace extensions #endif // CHROME_BROWSER_EXTENSIONS_EXTENSION_GARBAGE_COLLECTOR_H_
Field of the Invention The invention relates to a semiconductor structure and a manufacturing method thereof; particularly, the invention relates to a memory having a shallow trench isolation (STI) structure and a manufacturing method of the memory. Description of Related Art As the level of integration of semiconductor devices increases, sizes of the semiconductor devices continuously decrease, thus leading to increasing mutual influence on the semiconductor devices. Generally, isolation structures are applied to isolate the semiconductor devices from one another, so as to avoid significant influences and improve the reliability of the devices. In memory devices, the excessively small heights of the isolation structures may easily cause the mutual interference during programming actions and cause potential damages to tunneling dielectric layers, such that the reliability of the memory devices is deteriorated. If the heights of the isolation structures are excessively large, however, the gate coupling ratio (GCR) may decrease, and thus the performance of the memory devices is lowered.
<gh_stars>1-10 import { IBestAFSRoute } from '@umijs/plugin-layout'; /** * 权限定义 */ const Permissions = { template: { dashboard: { index: 'template.dashboard', }, sample: { index: 'template.sample', list: { index: 'template.sample.list', edit: 'template.sample.list.edit', delete: 'template.sample.list.delete', }, }, }, }; // umi routes: https://umijs.org/zh/guide/router.html const routes: IBestAFSRoute[] = [ { path: '/account', layout: false, routes: [ { path: '/account', redirect: 'login', }, { name: '登录', path: 'login', component: '@/pages/account/login/index', }, ], }, { path: '/template', // menu: { // name: 'Wetrial', // icon:'' // }, flatMenu: true, routes: [ { path: '/template', redirect: 'dashboard', }, { path: 'dashboard', name: '看板', icon: 'dashboard', access: Permissions.template.dashboard.index, component: '@/pages/template/dashboard/index', }, { path: 'sample', name: '案例', access: 'isAdmin', icon: 'smile', routes: [ { path: 'list', name: '列表', access: Permissions.template.sample.list.index, menu: { hideChildren: true, }, routes:[ { path: '/template/sample/list', redirect: 'index', }, { path: 'index', access: Permissions.template.sample.list.index, component: '@/pages/template/sample/list/index', }, { path: 'edit/:id?', component: '@/pages/template/sample/list/edit', access: Permissions.template.sample.list.edit, }, ] }, { path: 'tabs-share', name: '共享Tab', access: Permissions.template.sample.list.index, component:'@/pages/template/sample/tabs-share/index' }, { path:'tree-form', name:'tree-form', access: Permissions.template.sample.list.index, component:'@/pages/template/sample/tree-form/index' }, { path: 'dict', name:'数据字典', access: Permissions.template.sample.list.index, component: '@/pages/template/sample/dict/index', }, ], }, { component: '@/pages/exception/404', }, ], }, { path: '/', redirect: '/template', }, { component: '@/pages/exception/404', }, ]; export default routes; export { Permissions };
<filename>setup.py #!/usr/bin/env python import setuptools from memsource import version with open("README.md", "r", encoding="utf-8") as fh: long_description = fh.read() setuptools.setup( name="memsource", version=version.__version__, author="<NAME>", author_email="<EMAIL>", description="Python bindings for Memsource", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/Spredzy/python-memsource", packages=setuptools.find_packages(), install_requires=[ "requests" ], classifiers=[ "Programming Language :: Python :: 3", "Environment :: Console", "Intended Audience :: Developers", "Intended Audience :: Information Technology", "License :: OSI Approved :: Apache Software License", "Operating System :: POSIX :: Linux", ], python_requires=">=3.6", )
<reponame>kruschk/the-c-programming-language #include <stdio.h> int main(void) { float celsius, fahr; short int lower, upper, step; lower = -100; upper = 100; step = 10; celsius = (float)lower; printf("\nFahrenheit to Celsius Conversion Table\n\n"); printf("Celsius | Fahrenheit\n"); printf("--------|-----------\n"); while (celsius <= upper) { fahr = ((float)9.0 / (float)5.0) * celsius + (float)32.0; printf("%7.0f | %6.1f\n", celsius, fahr); celsius = celsius + step; } return 0; }
Q: Minimum drop on washer drain? I would like to add a diverter valve to my washer's drain line to facilitate a graywater system. I'm thinking of shortening the washer drain so I can relocate the P-trap higher up and add the diverter valve beneath it on the main stack. If I were to do this, what's the shortest that I could make the washer drain? What does the code say, and what does sanity say? If necessary, I can just move the washer box higher up; it's a little low as it is. Here's a picture to show my thoughts: The dryer ducting is all going to be removed, and the 220v outlet is going to be relocated, so ignore those. And needless to say, that cast iron is going to be replaced with ABS. A: International residential code says that a standpipe must be no less than 18" above the trap weir. International Residential Code 2012 Chapter 27 - Plumbing Fixtures Section P2706 Waste Receptors P2706.2 Standpipes. Standpipes shall extend not less than of 18 inches (457 mm) but not greater than 42 inches (1067 mm) above the trap weir. Access shall be provided to standpipe traps and drains for rodding. Check local codes, to determine what's acceptable in your area. A: Practically speaking, what you want to do is fine. The minimum horizontal slope is 1/4 inch per foot and the max is 3 inches per foot unless vertical. the inlet of the washer drain must be above the height of the washer. This is usually 36 to 38 inches minimum for floor seated washers. Obviously, if your washer is set on a platform, you will need to adjust the height of the drain inlet to be above the washer. I like to see at least a foot to 18 inches higher than the washer so that the drain can keep up with the washer pump and not overflow. I caution you however, if the diverter valve slows the drain flow at the current drain diameter, you may need to increase the pipe/valve diameter size to avoid back-up and overflow.
Long-term therapy with sustained-release theophylline. Twenty patients with partially reversible bronchial obstruction due to chronic obstructive lung disease participated in a study comparing serum levels, clinical and side-effects of a sustained-release formulation of theophylline with placebo. Prior to the study, theophylline dosages were individually adjusted to give serum levels of 55 to 75 mumol/l 4 hours after tablet intake. Theophylline or placebo was then administered every 12 hours with crossover after 6 weeks. During the study, patients were examined in the morning every second week and lung function tests carried out before and after salbutamol inhalation. Doses required to achieve the desired serum concentration showed great inter-individual variations, but the obtained levels were stable during the whole study. Lung function tests were significantly better in the theophylline period. After inhalation of salbutamol, values were also better in the theophylline period but the differences were less marked and of no statistical significance. Subjective improvement from theophylline was not observed. Side-effects reported were mild and caused no withdrawals.
1. Field of the Invention The present invention generally relates to magnetic recording/reproducing apparatus for recording/reproducing signals of high density onto/from the magnetic tape at a high transfer rate by guiding a magnetic tape at high speed and, more particularly, relates to configurations of magnetic heads and magnetic tape wrap angles relative to the magnetic heads. 2. Description of the Related Art In a field of magnetic recording/reproducing in which signals of high density are recorded/reproduced on/from a magnetic tape guided at high speed, one example of magnetic recording/reproducing apparatus was disclosed in Japanese Patent Publication No. 63-49308, which is effective to maintain a good contact condition between a magnetic tape and a slide face of a magnetic head. What was disclosed in the patent will now be described with reference to FIG. 16. There is a magnetic gap 100 at the top of a magnetic head 101 in the magnetic recording/reproducing apparatus. A magnetic tape 110 contacts with a magnetic head 101 on a slide face 102 in a sliding manner. In the figure, .alpha.'s represent angles between a direction of a chord joining both ends 104, 105 on slide face 102 and the directions of tangents at ends 104, 105, and .beta..sub.in, .beta..sub.out represent angles between the chordal direction above and the direction of magnetic tape 110 coming in and out of contact with the ends 104, 105 on slide face 102. The angles .beta..sub.in, .beta..sub.out will be hereinafter referred to as "tape wrap angles". Tape wrap angle .beta..sub.in represents a tape wrap angle with which magnetic tape 110 approaches slide face 102 (the side of end 104) (therefore hereinafter referred to as approaching tape wrap angle), and .beta..sub.out represents a tape wrap angle with which magnetic tape 110 leaves slide face 102 (the side of end 105) (hereinafter referred to as leaving tape wrap angle). Tape wrap angles .beta..sub.in, .beta..sub.out are determined by positions of tape guides 108, 109, for example. Force restricting magnetic tape 110 is generated at both ends 104, 105 of slide face 102 by setting values of .beta..sub.in /.alpha., .beta..sub.out /.alpha. to larger than 1 in the arrangement above. Magnitudes of restricting force F.sub.in, F.sub.out (where "in", "out" indicate the approaching and leaving sides, respectively) per unit width of magnetic tape 110 controlled as stated above are given by the following equations: EQU F.sub.in = T sin (.beta..sub.in -.alpha.) (1) EQU F.sub.out = T sin (.beta..sub.out -.alpha.) (2) where T is tape tension per unit width of magnetic tape 110. Pressing force is generated on slide face 102 by the tape tension T per unit width above, which is given by the following equation: EQU f = T/R (3) where R is a radius of the curvature of slide face 102. In this case, when magnetic tape 110 is guided in the direction indicated by the arrow A in the figure, magnetic tape 110 moves in a stable manner under the condition that the restricting forces F.sub.in, F.sub.out and the pressing force f are in harmony with an air film pressure between slide face 102 of magnetic head 101 and magnetic tape 110. FIG. 17 is a diagram of a spacing distribution when the values of .beta..sub.in /.alpha., .beta..sub.out /.alpha. are 1.2-2.5 as described in Japanese Patent Publication No. 63-49308. The spacing distribution is a distribution of a gap (hereinafter referred to as spacing) between magnetic tape 110 on slide face 102 of magnetic head 101 and slide face 102. As shown in FIG. 17, the spacing is virtually constant and small over slide face 102. This means that the spacing of magnetic gap 100 on slide face 102 of magnetic head 101 is hardly affected by the moving direction of magnetic tape 110 and disturbance such as vibrations, and a good electromagnetic converting characteristic can be provided at magnetic gap 100. The effect described above is due to the restricting forces F.sub.in, F.sub.out. Appropriate setting of the values of .beta..sub.in /.alpha., .beta..sub.out /.alpha. is disclosed, for example, in U.S. Pat. No. 4,888,657, U.S. Pat. No. 4,875,129, and Japanese Patent Laying-Open No. 59-16119. According to the references, an air bearing face is formed on the same circumference as that of the slide face having the magnetic gap. As a representative of the prior art, a magnetic head configuration according to U.S. Pat. No. 4,888,657 is shown in FIG. 18. In FIG. 18, a magnetic head 101 includes a slide face 102 having a cylindrical shape with a magnetic gap 100. Slots 103a, 103b are formed on slide face 102 at both sides of magnetic gap 100, extending at right angles with the moving direction of magnetic tape 110. Slide face 102 is divided into three raised faces 102a, 102b and 102c by slots 103a and 103b. In this case, when magnetic tape 110 is guided as far as raised faces 102b and 102c of slide face 102 by tape guides 108 and 109, magnetic tape 110 is restricted by both ends 104, 105 of raised face 102a having the magnetic gap 100 and ends 106, 107 of raised faces 102b, 102c to be air bearing faces. As a result, the magnetic tape wrap angles, .beta..sub.in, .beta..sub.out on raised face 102a are set stably. That is, end 106 of raised face 102b and end 107 of raised face 102c serve as guides, which correspond to tape guides 108, 109 of FIG. 16, respectively. In the conventional technique (FIGS. 16 and 18), however, even if the values of .beta..sub.in /.alpha., .beta..sub.out /.alpha. are set to 1.2 to 2.5, a good spacing characteristic is not necessarily obtained for some tape tension. Here, the spacing characteristic means a characteristic of a spacing and spacing distribution. This problem will now be described with reference to FIG. 19. FIG. 19 is a diagram showing a spacing distribution relative to tape tension when the tape wrap angles are set so that .beta..sub.in / .alpha.= .beta..sub.out /.alpha.= 1.2 to 2.5, and magnetic tape 110 is guided in the direction of the arrow A in FIGS. 16 and 18. In the figure, the solid line indicates a spacing distribution when the tape tension is optimized. The dot-and-dash line indicates a spacing distribution when the tape tension is rather low. The dotted line indicates a spacing distribution when the tape tension is rather high. That is, when the tape tension is low, magnetic tape 110 flies by a fluid lubrication effect as a restriction effect on magnetic tape 110 on end 104 of slide face 102 of FIG. 16 or raised slide face 102a of FIG. 18 is small. When the tape tension is high, the restricting force of the tape on the both ends 104, 105 of slide face 102 of FIG. 16 or raised face 102a of FIG. 18 is large and stiffness of magnetic tape 110 is increased, so that magnetic tape 110 flies due to deformation of tape 110 at magnetic gap 100. A second problem in the conventional technique will now be described. This problem is due to the difference between the approaching tape wrap angle .beta..sub.in and the leaving tape wrap angle .beta..sub.out in FIGS. 16 and 18. FIG. 20 is a diagram showing a change of a spacing distribution with the leaving tape wrap angle .beta..sub.out being constant while the approaching tape wrap angle .beta..sub.in being changed. In this case, the values of .beta..sub.in /.alpha., .beta..sub.out /.alpha. are in the range of 1.2 to 2.5. The spacing distribution in this case varies with tape tension. In the figure, the solid line, the dot-and-dash line, and the dotted line indicate spacing distributions when .beta..sub.in = .beta..sub.out, .beta..sub.in &lt;&lt; .beta..sub.out, and .beta..sub.in &gt;&gt; .beta..sub.out, respectively. That is, for some tape tensions set, when there is a large difference between the approaching tape wrap angle .beta..sub.in and the leaving tape wrap angle .beta..sub.out, a good contact condition cannot be created between magnetic tape 110 and slide face 102 of magnetic head 101 (FIG. 16), or magnetic tape 110 and raised slide face 102a of magnetic head 101 (FIG. 18). A third problem in the conventional technique will now be described. This problem is related to a distance between guides for setting the tape wrap angles .beta..sub.in, .beta..sub.out . A spacing characteristic changes depending on distance between guides. For example, a spacing characteristic with a relatively long distance between the guides, such as, the distance between tape guides 108, 109 shown in FIG. 16 is different from that with a short distance, such as, the distance between end 106 of raised face 102b and end 107 of raised face 102c in FIG. 18. FIG. 21 is a diagram for describing the problem above. In this case, the tape tension and the tape wrap angles are so set that a good spacing characteristic is obtained with a short distance between the guides. In the figure, the solid line and the dot-and-dash line indicate spacing distributions with a short distance and a long distance between the guides, respectively. As shown in the figure, since the spacing characteristic varies with the distance between the guides, the tape tension and the tape wrap angles must be set appropriately according to the distance in order to obtain a good spacing characteristic. As described above, the first, second and third problems indicate that the spacing characteristic is determined by a correlation between the guide distance, the approaching tape wrap-angle .beta..sub.in, and the leaving tape wrap angle .beta..sub.out and if these parameters are set inappropriately, a good spacing characteristic cannot be obtained. In this case, magnetic gap 100 of magnetic head 101 cannot display a full electromagnetic converting characteristic. This becomes more significant with shorter recording wavelength on magnetic tape 110.
The invariabilities in the free vibrations of carbon nanotube networks with identical boundary conditions Two kinds of planar, well-ordered carbon nanotube (CNT) networks, Super-Graphene and Super-Square CNT networks, are theoretically built by straight single-walled carbon nanotubes (SWNTs) and corresponding CNT junctions. The free vibrations of these CNT networks are explored by the molecular structural mechanics method. The natural frequencies of these CNT networks with four edges fully clamped are found only dependent on the side lengths but independent of the geometries and topologies of the networks. As the similarity between the continuum plate and these CNT networks, the vibration theory of the plate could be successfully applied to explain the geometry- and topology-independent behaviors of these networks. Such independent behaviors still exist when the biaxial pre-strain is introduced into the CNT networks.
// All gets the all subscriptions from the stats. func (s *Stats) All() []Stat { s.Lock() defer s.Unlock() stats := make([]Stat, 0, len(s.stats)) for _, stat := range s.stats { stats = append(stats, *stat) } return stats }
<filename>deploy/myChecks.py #!/usr/bin/env python3 """ Author: <NAME> <<EMAIL>> """ import os import re import sys import subprocess from urllib.request import Request, urlopen from urllib.error import URLError, HTTPError protocol=sys.argv[1] serverIP=sys.argv[2] mydictionary={"MONIT":"{}://{}:2812".format(protocol,serverIP),"PROMETHEUS":"{}://{}:9090".format(protocol,serverIP),"Alertmanager":"{}://{}:9093/alertmanager".format(protocol,serverIP),"KEYCLOAK":"{}://{}/auth".format(protocol,serverIP),"SUNBIRD PORTAL":"{}://{}".format(protocol,serverIP),"GRAFANA":"{}://{}/grafana".format(protocol,serverIP)} def checkStatus(req,name): try: response = urlopen(req) except HTTPError as e: print(str(name) + ' ' + str(" is not Working")) except URLError as e: print(str(name) + ' ' + str(' is not Working')) else: print(str(name) + ' ' + str(" is working")) def checkAvailibility(): for k in mydictionary: checkStatus(mydictionary[k],k) def checkContainerReplication(): print("Checking Container Replication:-\n") reslt=(subprocess.check_output("sudo docker service ls | grep \" 0/\" | awk '{ print $2 }'", shell=True)).splitlines() for val in reslt: print("Container "+str(val,"utf-8")+" Failed to replicate") print("\n-----------------------------------------\n") print("Checking The service Working Status:-\n") checkAvailibility() print("\n-----------------------------------------\n") checkContainerReplication() print("\n-----------------------------------------\n") #print("\nThe King Never Fails To Win His Destiny\n")
from django.conf import settings from django.contrib.auth.decorators import login_required from django.shortcuts import render import stripe stripe.api_key = settings.STRIPE_SECRET_KEY # Create your views here. @login_required() def checkout(request): publishKey = settings.STRIPE_PUBLISHABLE_KEY customer_id = request.user.userstripe.stripe_id if request.method == 'POST': token = request.POST['stripeToken'] try: customer = stripe.Customer.retrieve(customer_id) customer.sources.create(source=token) charge = stripe.Charge.create( amount=1000, currency="usd", customer = customer, description="Example charge", ) except stripe.error.CardError as e: pass context = {'publishKey':publishKey} template = 'checkout.html' return render(request, template, context)
The present invention relates to electron multiplier devices, and more specifically relates to dynode arrays, methods of making the same and components incorporating the same. Photomultiplier tubes (PMT) are versatile, sensitive detectors of radiant energy in the ultraviolet, visible, and near infrared regions of the electromagnetic spectrum. A photomultiplier tube consists of a photoemissive photocathode, an electron multiplier device based on secondary electron emission and an anode to collect the signal electrons, all housed inside a vacuum envelope. Radiant energy such as light incident on the photocathode causes the photocathode to emit electrons. In the electron multiplier device, these electrons are accelerated by an electric field towards an electrode referred to as a xe2x80x9cdynodexe2x80x9d. As the electrons impinge on the dynode, they cause the dynode to emit a larger number of secondary electrons which are in turn accelerated to another dynode producing more secondary electrons. This process continues for several stages, with progressively larger numbers of electrons being emitted at each successive stage. The electrons from the last dynode stage are collected on an anode which is connected to an external circuit, outside of the vacuum envelope. The dynodes may be arranged to provide a tortuous path which changes direction at each dynode. This helps to assure that electrons from each dynode will impinge on the next dynode, and also protects the photocathode against positive ions which may be emitted from the anode or from the dynodes. PMT""s are used in industrial and scientific apparatus as detectors in systems for measuring the intensity of a beam of radiant energy. For a large number of applications, the PMT is the most sensitive detector available. The superiority of the PMT arises from the secondary electron emission amplification, which makes it possible for the device to approach xe2x80x9cidealxe2x80x9d device performance limited only by the statistics of photoemission. The electron gain of a PMTxe2x80x94the ratio of the number of electrons provided by the last stage to the number of electrons provided by the photocathodexe2x80x94typically ranges from 103 to as high as 108. Thus, even when the radiant energy to be detected is extremely weak, a PMT can provide output signals at levels which are easily measured by auxiliary electronic equipment. PMTs can also have extremely fast time response (xcx9c100 ps), which provides the capability for measuring radiant energy varying at a rapid rate. Stated another way, the combination of gain and bandwidth provided by PMTs is unmatched by any other detector. PMTs have very low quiescent power when the individual dynodes are powered separate from an active power supply circuit. A dynode set can also be used to amplify a stream of electrons or ions from a source other than a photocathode, and can provide similar advantages in these applications. The dynode sets used in measurement devices typically provide only one channel or set of cascaded dynode stages, and amplify only one stream of electrons. Thus, in a light-sensing PMT, the electrons emitted by the entire photocathode are amplified together in one stream of electrons so that the device provides a single output signal representing the light incident on the entire photocathode. Imaging devices typically must process a separate signal for each of many picture elements or xe2x80x9cpixelsxe2x80x9d in a two-dimensional array of pixels constituting an image. For example, a monochrome (black and white) image can be represented by a set of signals, each representing the brightness of the image within a pixel at a particular position. Many common imaging devices, such as the charge-coupled-device or xe2x80x9cCCDxe2x80x9d imaging devices in home video cameras and in electronic still cameras incorporate a two-dimensional array of detectors incorporating a separate detector for each pixel in the image. A lens focuses the image onto the array, and each detector provides a signal representing the brightness of one pixel in the image. These signals can be reconstructed to provide an image, such as a television or still picture representing the original image. To provide reasonable resolution in the resulting image, the imaging device should include a large number of detectors. Even a medium-quality imaging system such as a consumer video camera requires tens of thousands of pixels; high quality imaging requires hundreds of thousands of pixels. However, with common CCD technology, there is a direct relationship between the size of each detector and the sensitivity of the device, and a similar relationship between the size of each detector and immunity to random electronic noise. Thus, the spatial resolution of the devicexe2x80x94the number of detectors which can be provided in a device of a given sizexe2x80x94is limited. CCD technology has branched into two major classes. One class provides low cost sensors for large consumer markets such as camcorders, line scanners, etc. whereas the other class provides very high quality CCDs for scientific imaging. The low cost sensors are capable of achieving high data rates (xcx9c60-100 MHz for certain line scanners), they suffer in image quality and are not satisfactory for high frame rate scanning arrays. The high quality CCD sensors, while providing excellent low noise performance, cannot provide that performance at high frame rates. Thus, while Si based CCD technology has made great progress, there is still a large gap between what is desired for high quality imaging and performance of the present generation of CCD sensors. The CCD devices do not provide the high gain, bandwidth and response time of dynode devices. Attempts have been made to fabricate plural-channel dynode arrays heretofore. Ehrfeld et al., U.S. Pat. No. 4,990,827 and Shimabukuro et al. U.S. Pat. No. 5,329,110 propose making arrays of small electron multipliers by certain microfabrication techniques. However, the techniques and structures taught by these references are suitable for making linear arrays of electron multipliers; they are not well suited to fabrication of a two-dimensional array of dynode channels. Comby et al., Nuc. Inst. Meth. Phys. Res. A 343, 263 describe an all ceramic multichannel electron multiplier in a PMT having four imaging pixels of 0.6 mm diameter employing a five stage dynode structure. The dynodes are provided as metallic plates arranged along a channel. Openings in the plates are offset from one another to form a tortuous path. According to the reference, the results of gain measurements from these devices demonstrated that machined channels can be built with high gain. Using a Agxe2x80x94Oxe2x80x94Cs coated dynode material, they were able to achieve gains of about 100 for the five stage multiplier, amplifying photoelectrons from a CS3Sb photocathode. As set forth in Comby et al, Proceedings. International Conference On Inorganic Scintillators and Their Applications, SCINT95, DELFT Univ. of Tech. The Netherlands, September (1995), by treating Au dynodes with Sbxe2x80x94Cs, gains in excess of 103 were demonstrated in an all ceramic PMT with 0.6 mm pixels in a 4xc3x974 array. These articles propose that it may be possible to fabricate a 256-pixel device. Thus, dynode array devices available heretofore do not provide the spatial resolution needed for high-quality imaging. Another electron multiplying device is known as a microchannel plate or xe2x80x9cMCPxe2x80x9d. MCP""s typically have numerous continuous channels extending through an insulating layer. A coating of a material having high electron emissivity is applied on the interior of each channel. The coating has a high electrical resistance. A voltage applied through electrically conductive layers extending on opposite side of the insulating layer creates a potential gradient between opposite ends of the coating. Electrons entering each channel are accelerated along the channel by the potential gradient, and impinge on the walls of the channel. Such collisions yield secondary electrons which are also accelerated and provide further collisions. Although MCP""s provide advantages such as fast rise times, high spatial resolution and low cross-talk between adjacent channels, the gain of a MCP deteriorates at relatively low electron currents. After electrons are emitted from each portion of the electron-emissive channel lining, that part of the layer must be recharged by conduction through the lining. The high resistance of the lining limits the rate of recharging. Thus, the gain of a typical MCP deteriorates significantly at electron currents of about 0.1 Coulomb per cm2 of plate area. Moreover, because current continually flows through the resistive coatings on the channel walls, MCPs typically draw appreciable power at all times. This limits their use in battery-powered devices. One aspect of the present invention provides a multichannel microdynode device. A device in accordance with this aspect of the invention includes a porous structure defining an entry side and an exit side. The structure incorporates a plurality of dynode layers and a plurality of electrically insulating spacer layers. These layers are disposed in alternating sequence between the entry side and the exit side. Each microchannel has an entrance aperture at the entry side of the porous structure and an exit aperture at the exit side of the porous structure. Each channel has a lengthwise direction between the entrance and exit apertures. As used herein with reference to a channel, the term xe2x80x9cforward directionxe2x80x9d means lengthwise direction from the entrance aperture to the exit aperture, whereas the xe2x80x9creversexe2x80x9d direction is the opposite direction. Each microchannel preferably has a mean diameter less than about 150 microns. The structure defines walls surrounding each microchannel and substantially segregating each microchannel from the other microchannels. The device further includes an electron emissive material in the microchannels within the dynode layers and means for connecting the dynode layers to biasing voltages. The term xe2x80x9celectron-emissive materialxe2x80x9d as used herein refers to a material having a high coefficient of secondary electron emission. Most preferably, the dynode layers and the spacer layers are bonded to one another and form a monolithic structure. In a particularly preferred arrangement, the dynode layers and the spacer layers have confronting surfaces bonded to one another over substantially the entire extent of these surfaces other than the areas occupied by the microchannels. The confronting surfaces of the dynode layers and the spacer layers may be bonded directly to one another or else may be bonded to one another by layers of bonding material interposed between these layers. The dynode layers may be fabricated from a semiconductive or nonconductive structural materials such as undoped silicon and may have via liners formed from an electrically conductive material such as a metal overlying the structural material on the dynode regions of the microchannel walls, within the dynode layers. In this instance, the dynode layer may include, or may be contiguous with, a conductive layer such as a metallic layer which extends to the conductive walls of the holes. Alternatively, the dynode layer may be formed from a conductive material such as a metal, and the metal of the dynode layers may define the interior walls of the holes in the dynode layer. In either case, the conductive layers act to connect the dynode layers to biasing voltage and provide a direct, conductive connection to the interiors of the holes in the dynode layer. The conductive layer in or contiguous with each dynode layer may be connected to a source of voltage at a potential different from the potentials connected to the other dynode layers. Thus, a potential gradient is maintained along the length of each microchannel by the different potentials, with more positive potential toward the exit aperture. Electrons entering the entrance aperture of each microchannel will be accelerated along the channel in the forward direction and will impinge upon the dynode layers, causing secondary electron emissions. The secondary electrons in turn are accelerated and pass along the channel where they impinge upon the walls in further dynode layers, and the process continues to provide electron multiplication or gain. In particularly preferred structures according to this aspect of the present invention, the microchannels have mean diameters less than about 100 microns, more preferably less than about 25 microns, and most preferably between about 5 microns and about 10 microns. The small diameter of the microchannels provides several significant effects. A substantial number of electrons will collide with the walls of the channel even if the channel is straight or only gently curved. Likewise, any positively charged ions entering the channels at the exit end or generated within the channels will have a high probability of collision with the walls of the channels. Accordingly, the probability of an ion being accelerated along the channel and passing out of the channel in the reverse direction will be very small. Thus, although the channel may be curved, it is not necessary to provide a tortuous path. Thus, the central axis of each microchannel may be substantially straight and may extend in a smooth curve or else may extend in a smooth curve, desirably with two or fewer changes of direction of curvature between the entry side of the structure and the exit side. The microchannels may have essentially any cross sectional shape. A cross sectional shape which is a circle or a regular polygon, such as a square, is particularly preferred. In a particularly preferred arrangement, the walls of the microchannel slope inwardly towards one another within each dynode layer in the direction through the dynode layer towards the exit end of the structure. This further enhances the probability of electron collisions with the walls of the dynode layer and hence further enhances the gain of the system. In a particularly preferred arrangement, the center-to-center distance between the central axes of adjacent microchannel ranges from about 1.01 to about 2 times the main diameter of each microchannel. Thus, the microchannels occupy a substantial portion of the area of the porous structure. Stated another way, the open area or combined cross sectional areas of the microchannels measured at the entrance apertures of the microchannels desirably constitutes at least about 50% and more preferably at least about 75% of the area of the porous structure. Still higher open area percentages, in some cases up to 98%, at the entrance apertures are attainable where the microchannels taper in the forward direction. In a further embodiment of the invention, the dynode layers may be provided with mesh structures subdividing each microchannel at each dynode layer. Each such mesh structure has non-scale passages, substantially smaller than the microchannel, extending through it in the lengthwise direction. The walls of these passages have the electron emissive material thereon. Such a mesh structure provides an even greater probability of collisions between electrons passing lengthwise along the channel, and an even greater probability of collision for any positive ions passing in the reverse direction along the channel. The preferred electron multiplier structures in accordance with the foregoing aspects of the invention provide all of the advantages of a dynode structure, including high gain, low current consumption and high frequency response, and can also provide very high spatial resolution. Thus, the preferred structures in accordance with the foregoing aspects of the invention provide closely spaced microchannels. Moreover, the microchannels are effectively isolated from one another, so that the structure provides low crosstalk between adjacent channels. In effect, the preferred structures in accordance with this aspect of the invention combine the best advantages of microchannel plates with the best advantages of dynode structures. Electron multiplier structures in accordance with the foregoing aspects of the invention desirably are used in conjunction with a cathode structure capable of emitting electrons overlying the entry side of the structure so that regions of the cathode structure are exposed to entrance apertures of the microchannels, and an anode structure overlying the exit side of the porous structure. Most preferably, the cathode structure and the anode structure are sealingly connected to the porous structure, so that the anode structure, cathode structure and porous structure cooperatively maintain vacuum within the microchannels. The anode structure and cathode structure may be formed with the porous structure, or bonded to the porous structure, to provide a single monolithic device. Such a monolithic device provides a compact, rugged unit which can be employed without any external shell or vacuum envelope. The anode structure may include conductors extending to the exterior of the monolithic device, which eliminates the need for any separate feed-throughs. The cathode structure may incorporate a photocathode adapted to emit electrons in response to light, whereas the anode structure may incorporate a plurality of separate anodes overlying the exit apertures of the microchannels. Preferably, an individual anode is aligned with the exit aperture of each microchannel. In this case, the electrons impinging on the individual anode associated with each microchannel will represent light impinging on the particular region of the photocathode overlying the entry aperture of that microchannel. Thus, the device will provide a plurality of separate signals, each representing the brightness of light in a single pixel. These signals can be handled and processed in a microelectronic circuit. The microelectronic circuit may be formed as part of the same monolithic structure with the other elements of the device. Such a monolithic device can be used instead of a CCD sensor and can be made with comparable spatial resolution to a CCD sensor. However, the device in accordance with these embodiments of the invention can provide markedly superior signal output levels and bandwidth. In other embodiments, the anode structure may incorporate a phosphor layer adapted to emit light in response to electrons impinging on the phosphor layer. Where the cathode structure includes a photocathode, the device will act as a light amplifier; the light emitted by the anode phosphor will be far brighter than that impinging on the photocathode. These devices can be incorporated in night vision systems. In still other embodiments, the cathode structure may include plural individual cathodes adapted to emit electron currents. An appropriate circuit may be provided for selectively energizing individual cathodes to cause these individual cathodes to emit. This will cause individual portions of the anode structure phosphor layer to be illuminated. Such a device may be used as a flat panel display. Further aspects of the present invention provide methods of making microdynode devices. A method in accordance with one embodiment of the invention includes the steps of providing a plurality of electrically insulating spacer layers having holes therein and providing a plurality of dynode layers also having holes therein. These steps are performed so that the spacer layers and dynode layers are stacked in alternating sequence, with at least one of the dynode layers being sandwiched between two of the spacer layers and so that the holes in the dynode layers are aligned with the holes in the spacer layers to form continuous microchannels extending through the stack. A method in accordance with this aspect of the present invention desirably includes the step of providing an electrode emissive material in the holes of each dynode layer before that dynode layer is sandwiched between spacer layers. The step of providing an electron emissive material in the holes of each dynode layer may include the step of depositing either the electrode emissive material itself or a precursor adapted to form an electron emissive material into the holes of each dynode layer. For example, the step of providing these plural layers may include the step of forming the layers sequentially, one above the other by selectively depositing the materials of the dynode layers and spacer layers. In accordance with a further aspect of the invention, a microdynode device may be made by a method including the steps of first providing a set of elongated mandrels extending codirectionally with one another and desirably parallel to one another and then depositing an electrically insulating material over the mandrels to form the spacer layers and a second material to form the dynode layers. These materials are deposited in alternating sequence to form a stack including the dynode layers and the spacer layers in alternating sequence. The method further includes the step of removing the mandrels so as to leave elongated microchannels extending through the stack and including holes extending through the various layers. This method may include the step of depositing an electron emissive material for a precursor adapted to form such a material onto the mandrels adjacent the previously deposited layers of the stack before depositing the second material to form a new dynode layer. Thus, the deposited emissive material will form a lining in the holes of the newly formed dynode layer. The second material deposited to form a dynode layer desirably is an electrically conductive material such as a metal. The mandrels may be formed by a molding process as further discussed below. A method according to a further aspect of the invention is performed by making one or more dual layer structures. Each dual layer structure is made by providing a spacer layer of an electrically insulating first material, forming depressions in a top surface of this spacer layer and then depositing an electrically conductive material on the top surface to form a dynode layer. The depositing step desirably is performed so that the conductive material extends into the depressions on the top surface so as to form hollow conductive via liners in the depressions as part of the dynode layer. An electron emissive layer is provided on the interior walls of the vias liners and holes are formed extending from the depressions through the spacer layer to the bottom surface of the spacer layer. These steps are repeated so as to form a plurality of dual layer structures and thus form a stack of a plurality of spacer layers and dynode layers. Thus, the dynode layer on the top surface of each spacer layer faces the bottom surface of the next higher spacer layer in the stack. The steps are performed so that the holes and via liners form continuous microchannels extending through the stack. The step of providing each spacer layer formed by depositing insulating first material on the top surface of a previously formed dynode layer so as to form a new spacer layer. The remaining steps of forming depressions depositing the conductive material providing the electron emissive layer and forming the holes may be performed on each new spacer layer after the material of that layer is deposited. Thus, the stack continually grows by addition of new layers. The via liners of each dynode layer may be filled temporarily with a sacrificial plug before depositing the insulating first material to form the next higher spacer layer. The step of forming the holes in the various spacer layers may be formed after the stack is formed and after the spacer layers have been deposited by etching the stack so as to form the holes in all or several of the spacer layers in a single operation. To facilitate etching of holes in the spacer layer, the step of providing the electron emissive material in the via liners desirably includes the step of depositing the electron emissive material so that the emissive material does not coat the bottoms of the depressions. Thus, the electron emissive material may be applied by sputtering or other processes which direct the material along directions oblique to the top surface, so that the material is deposited on the interior walls of the vias, but not on the bottom surfaces of the depressions. Yet another method of making a microdynode device includes the step of forming an insulating spacer layer and providing a dynode layer on a top surface of the insulating layer. The dynode layer is selectively treated in a plurality of spots so as to form a mesh in each spot, with a plurality of nanoscale passages extending through the dynode layer. For example, where the dynode layers are formed from aluminum, the step of selectively treating the dynode layer may include the step of anodizing the aluminum in the spots. Where the dynode layers are formed from silicon the step of selectively treating the dynode layer in the spots may include the step of anisotropically etching the dynode layer in the spots. An electron emissive material is provided on the interior surfaces of the passages holes are formed in the spacer layer in alignment with the spots so that each hole is in communication with a multiplicity of passages. These steps are repeated and a stack including a plurality of spacer layers and a plurality of dynode layers is formed so that the dynode layer on the top surface of each spacer layer faces the bottom surface of the next higher spacer layer and so that the holes and the mesh spots form continuous microchannels extending through the stack, with the mesh of each spot on a particular dynode layer extending across the microchannel. Desirably, the step of providing a spacer layer is performed by providing a layer of a curable material such as a photoimagable polymer and selectively curing this material to leave a plurality of uncured spots extending through the layer of curable material. The selective curing steps and the steps of selectively treating the dynode layers desirably are performed so that the mesh spots in the dynode layers are disposed in alignment with the uncured spots of the spacer layers. The steps of forming the holes in these spacer layers may be performed by removing the uncured material in the spots of each spacer layer after formation of the mesh in the dynode layer atop that spacer layer. The uncured material may be left in place while additional layers are deposited and the uncured material in the spots of several spacer layers may be removed simultaneously as by directing a washing solution through the microchannels. Yet another method of making a microdynode device includes the step of providing plural layers of silicon having holes therein and having silicon dioxide layers. A plurality of dynode layers of silicon are also provided. These have holes and a layer of electron emissive material in the holes. Each dynode layer has a layer of an electrically conductive material on a top or bottom surface of the dynode layer. The spacer layers and the dynode layers are stacked so that the holes in the layers are aligned with one another and form continuous microchannels and the stacked layers are then bonded to one another as by anodic bonding to form a monolithic structure. Thus, particularly preferred forms of the present invention provide a fully integrated, very compact, monolithic, high pixel density, imaging electron multiplier with comparable pixel size and spatial resolution to CCD detectors, but with considerably higher sensitivity, improved signal to noise, faster readout, and lower manufacturing cost. This integrated electron multiplier technology will enable a new generation of compact, rugged, high resolution imaging electron detectors that are expected to find widespread applications in scientific instrumentation, medical imaging, document transmission and reproduction, digital video and still cameras, telecommunications and machine vision. These and other objects, features and advantages of the present invention will be more readily apparent from the detailed description set forth below, taken in conjunction with the accompanying drawings.
I Felt like My Practice Was Catching up with My Beliefs: A Longitudinal Cognitive Study of Seven Early Career Literacy Teachers and Their Praxis ABSTRACT Using a qualitative approach, this article reports findings of a longitudinal study of seven successful elementary educators from the inception of their final preservice field experience through the first seven years of their independent teaching. The research centers the development of teachers literacy-related instructional practices over the course of their early teaching careers, as well as the factors that influenced and impacted their instructional choices. Through repeated surveys and a culminating reflective interview, the researchers examined patterns of literacy beliefs and practices reported by these teachers over time, as well as the extent to which they internalized and used those beliefs and practices. The authors share what impacted teachers perceptions and practice of reading instruction throughout the formative stages of their teaching careers and suggest implications for teacher preparation programs in attending to pre- and early-career teachers praxis.
Convection-Enhanced Delivery for the Treatment of Pediatric Neurologic Disorders Direct perfusion of specific regions of the central nervous system by convection-enhanced delivery is becoming more widely used for the delivery of compounds in the research and treatment of various neural disorders. In contrast to other currently available central nervous system delivery techniques, convection-enhanced delivery relies on bulk flow for distribution of solute. This allows for safe, targeted, reliable, and homogeneous delivery of smallmolecular-weight and largemolecular-weight substances over clinically relevant volumes in a manner that bypasses the blood-central nervous system barrier. Recent studies have also shown that coinfused imaging surrogate tracers can be used to monitor and control the convective distribution of therapeutic agents in vivo. The unique features of convection-enhanced delivery, including the ability to monitor distribution in realtime, provide an opportunity to develop new research and treatment paradigms for pediatric patients with a variety of intrinsic central nervous system disorders.
A lawyer for 180 victims said the move will block their efforts to learn “who knew about (Larry) Nassar’s criminal conduct and failed to stop it." Facing a mountain of lawsuits over the Larry Nassar sexual abuse scandal, USA Gymnastics filed for bankruptcy Wednesday. The beleaguered organization has been beset by financial struggles and leadership turnover as it has sought to contain a scandal sparked by Nassar, the longtime team doctor accused of molesting hundreds of young women and girls. USA Gymnastics said it is currently facing 100 lawsuits representing more than 350 Nassar victims around the country. By filing for bankruptcy, the organization is able to put on hold all litigation, including ongoing discovery and depositions of key figures, multiple sources told NBC News. Kathryn Carson, who was recently elected chair of the USA Gymnastics Board of Directors, said the cases will be consolidated before a bankruptcy judge — a move that will allow the organization to resolve claims more quickly. "We owe it to the survivors to resolve, fully and finally, claims based on the horrific acts of the past and, through this process, seek to expedite resolution and help them move forward," Carson said in a statement. USA Gymnastics said the victims' claims are covered by insurance but the organization has “no other significant assets" to fund other expenses. The organization is "not looking to close its doors," Carson added in a phone call with reporters. Lawyer John Manly, who represents 180 alleged victims of Nassar, said the legal move will block their "ongoing efforts to discover the truth about who at USA Gymnastics and the U.S. Olympic Committee knew about Nassar’s criminal conduct and failed to stop it." "The leadership of USA Gymnastics has proven itself to be both morally and financially bankrupt," Manly said. "They have inflicted and continue to inflict unimaginable pain on survivors and their families." The organization has lost many of its sponsors over its handling of the Nassar scandal, including Proctor and Gamble, Kellogg and AT&T. According to a 2017 tax filing, USA Gymnastics' revenue dropped 26% from the previous year. The U.S. Olympic Committee, which has started the process of revoking USA Gymnastics' status as the sports' governing body, said it is reviewing the filing. "Financial stability and viability are essential for a national governing body to operate in the best interests of the athletes," said spokesman Patrick Sandusky. Nassar, who also worked for Michigan State University, was sentenced in January to up to 125 years in prison after he pleaded guilty to molesting 10 girls. Then in October, Steve Penny, who resigned last year as president and chief executive of USA Gymnastics, was arrested on felony charges of tampering with evidence in connection with a Texas investigation into Nassar. Penny's arrest came six months after an NBC News investigation revealed that he had reached out to several top U.S. gymnasts in what they believe was an attempt to silence them as the Nassar scandal was unfolding. Text messages, emails and other materials supported the claims by athletes and parents that Penny and others at USA Gymnastics stressed discretion above all else, even as the gymnasts and their parents pushed to meet with law enforcement officials. The gymnasts told NBC News that they felt that not following Penny's warnings would jeopardize their potential spots on the Olympic team. Last December, NBC News reported that Olympic gymnast McKayla Maroney filed a lawsuit alleging that USA Gymnastics tried to silence her by making her sign a non-disclosure agreement as part of a financial settlement she needed to pay for psychological treatment. USA Gymnastics said the "concept of confidentiality" was initiated by Maroney's attorney at the time of the settlement.
#!/usr/bin/env python3.7 # -*- coding:utf-8 # -*- """ @File : guess_age.py @Author : <NAME> @Email : <EMAIL> @IDE : PyCharm @Create_Time : 2019-01-18 15:33 """ real_age = 33 guess_age = int(input("Aeg:")) if real_age > guess_age: print("猜小了") elif real_age < guess_age: print("猜到了") elif real_age == guess_age: print("猜对了")
On March 31, 2008 MARIE CLAIRE FERN loving wife of the late Charles J. Fern; beloved daughter of the late Ernst and Myrtle Utz; dear mother of Carrie Engel, Mary J. Florian and her husband Bob, Charles J. Fern, Jr. and his wife Molly, Janet Wiser and her husband Rich, Joe E. Fern and his wife Laura, Susan J. Smith and her husband Ray; cherished grandmother of Matt, Katie and Vicky Florian, Charlie and Will Fern, Maria and Bobby Amoruso, Danny and Sara Wiser, Joe and Maddy Fern, Kevin, Brittany and Becca Smith and the late Jackie Florian; great-grandmother of Cameron and Jaylen Amoruso. The family will receive friends in the LEMMON FUNERAL HOME OF DULANEY VALLEY, INC., 10 W. Padonia Road (at York Road), Timonium, MD 21093 on Wednesday, 2-4 and 7-9 p.m. A Funeral Mass will be celebrated in Our Lady of Grace, 18310 Middletown Road, Parkton, MD 21120 on Thursday, April 3 at 1 p.m. Interment Dulaney Valley Memorial Gardens. Expressions of sympathy may be directed in Marie's name to St. Joseph Medical Center, 7401 Osler Drive, Towson, MD 21286.
#include "InputParameters.hpp" #include <algorithm> #include <iostream> #include <fstream> #include <iterator> #include <string> #include <math.h> #include <sstream> #include <vector> using namespace GlassBR; using std::string; using std::vector; using std::ifstream; using std::ofstream; InputParameters::InputParameters() { a = 0.0; b = 0.0; t = 2.5; gt = 1; w = 0.0; tnt = 0.0; sdx = 0.0; sdy = 0.0; sdz = 0.0; pbtol = 0.0; asprat = 0.0; sd = 0.0; h = 0.0; gtf = 0.0; ldf = 0.0; wtnt = 0.0; E = 7.17 * (pow(10.0, 7.0)); td = 3.0; m = 7.0; k = 2.86 * (pow(10.0, -53.0)); lsf = 1.0; } InputParameters::~InputParameters() { }
Amplitude analysis of the $B_{(s)} \to K^{*0} \overline{K}^{*0}$ decays and measurement of the branching fraction of the $B \to K^{*0} \overline{K}^{*0}$ decay The $B^0 \to K^{*0} \overline{K}^{*0}$ and $B^0_s \to K^{*0} \overline{K}^{*0}$ decays are studied using proton-proton collision data corresponding to an integrated luminosity of 3fb$^{-1}$. An untagged and time-integrated amplitude analysis of $B^0_{(s)} \to (K^+\pi^-)(K^-\pi^+) $ decays in two-body invariant mass regions of 150 MeV$/c^2$ around the $K^{*0}$ mass is performed. A stronger longitudinal polarisation fraction in the ${B^0 \to K^{*0} \overline{K}^{*0}}$ decay, ${f_L = 0.724 \pm 0.051 \,({\rm stat}) \pm 0.016 \,({\rm syst})}$, is observed as compared to ${f_L = 0.240 \pm 0.031 \,({\rm stat}) \pm 0.025 \,({\rm syst})}$ in the ${B^0_s\to K^{*0} \overline{K}^{*0}}$ decay. The ratio of branching fractions of the two decays is measured and used to determine $\mathcal{B}(B^0 \to K^{*0} \overline{K}^{*0}) = (8.0 \pm 0.9 \,({\rm stat}) \pm 0.4 \,({\rm syst})) \times 10^{-7}$. Introduction The B 0 → K * 0 K * 0 decay is a Flavour-Changing Neutral Current (FCNC) process. 1 In the Standard Model (SM) this type of processes is forbidden at tree level and occurs at first order through loop penguin diagrams. Hence, FCNC processes are considered to be excellent probes for physics beyond the SM, since contributions mediated by heavy particles, contemplated in these theories, may produce effects measurable with the current sensitivity. Evidence of the B 0 → K * 0 K * 0 decay has been found by the BaBar collaboration with a measured yield of 33.5 +9. 1 −8.1 decays. An untagged time-integrated analysis was presented finding a branching fraction of B = (1.28 +0. 35 −0.30 ±0.11)10 −6 and a longitudinal polarisation fraction of f L = 0.80 +0.11 −0.12 ± 0.06. In untagged time-integrated analyses the distributions for B 0 and B 0 decays are assumed to be identical and summed, so that they can be fitted with a single amplitude. However, if CP -violation effects are present, the distribution is given by the incoherent sum of the two contributions. The Belle collaboration also searched for this decay and a branching fraction of B = (0.26 +0.33+0.10 −0.29−0.07 ) 10 −6 was measured, disregarding S-wave contributions. There is a 2.. Perturbative QCD predicts B = (0.64 +0. 24 −0.23 ) 10 −6. 2 These theoretical predictions agree with the experimental results within the large uncertainties. The measurement of f L agrees with the nave hypothesis, based on the quark helicity conservation and the V −A nature of the weak interaction, that charmless decays into pairs of vector mesons (V V ) should be strongly longitudinally polarised. See, for example, the Polarization in B Decays review in Ref.. The B 0 s → K * 0 K * 0 decay was first observed by the LHCb experiment with early LHC data. A later untagged time-integrated study, with data corresponding to 1 fb −1 of integrated luminosity, measured B = (10.8 ± 2.1 ± 1.5) 10 −6 and f L = 0.201 ± 0.057 ± 0.040. More recently, a complete CP -sensitive time-dependent analysis of B 0 s → (K + − )(K − + ) decays in the (K) mass range from 750 to 1600 MeV/c 2 has been published by LHCb, with data corresponding to 3 fb −1 of integrated luminosity. A determination of f L = 0.208 ± 0.032 ± 0.046 was performed as well as the first measurements of the mixing-induced CP -violating phase dd s and of the direct CP asymmetry parameter ||. These LHCb analyses of B 0 s → (K + − )(K − + ) decays lead to three conclusions: firstly, within their uncertainties, the measured observables are compatible with the absence of CP violation; secondly, a low polarisation fraction is found; finally, a large S-wave contribution, as much as 60%, is measured in the 150 MeV/c 2 window around the K * 0 mass. The low longitudinal polarisation fraction shows a tension with the prediction of QCDF (f L = 0.63 +0.42 −0.29 ) and disfavours the hypothesis of strongly longitudinally polarised V V decays. Theoretical studies try to explain the small longitudinal polarisation with mechanisms such as contributions from annihilation processes. It is intriguing that the two channels B 0 → K * 0 K * 0 and B 0 s → K * 0 K * 0, which are related Figure 2: Definition of the helicity angles, employed in the angular analysis of the B 0 (s) → K * 0 K * 0 decays. Each angle is defined in the rest frame of the decaying particle. of the K +(−) meson and the direction opposite to the B-meson momentum in the rest frame of the K * 0 (K * 0 ) resonance, and, the angle between the decay planes of the two vector mesons in the B-meson rest frame. From angular momentum conservation, three relative polarisations of the final state are possible for V V final states that correspond to longitudinal (0 or L), or transverse to the direction of motion and parallel ( ) or perpendicular (⊥) to each other. For the two-body invariant mass of the (K + − ) and (K − + ) pairs, noted as m 1 ≡ M (K + − ) and m 2 ≡ M (K − + ), a range of 150 MeV/c 2 around the known K * 0 mass is considered. Therefore, (K) pairs may not only originate from the spin-1 K * 0 meson, but also from other spin states. This justifies that, besides the helicity angles, a phenomenological description of the two-body invariant mass spectra, employing the isobar model, is adopted in the analytic model. In the isobar approach, the decay amplitude is modelled as a linear superposition of quasi-two-body amplitudes. For the S-wave (J = 0), the K * 0 0 resonance, the possible K * 0 0 (or ) and a non-resonant component, (K) 0, need to be accounted for. This is done using the LASS parameterisation, which is an effective-range elastic scattering amplitude, interfering with the K * 0 0 meson, where represents the K * 0 0 width. In Eq. and Eq. q is the (K) centre-of-mass decay momentum, and M 0, 0 and q 0 are the K * 0 0 mass, width and centre-of-mass decay momentum at the pole, respectively. The effective-range elastic scattering amplitude component depends on where a is the scattering length and b the effective range. For the P-wave (J = 1), only the K * 0 resonance is considered. Other P-wave resonances, such as K * 0 or K * 0, with pole masses much above the fit region, are neglected. Resonances with higher spin, for instance the D-wave K * 2 0 meson, are negligible in the considered two-body mass range and are also disregarded. The K * 0 amplitude is parameterised with a spin-1 relativistic Breit-Wigner amplitude, The mass-dependent width is given by where M 1 and 1 are the K * 0 mass and width, r is the interaction radius parameterising the centrifugal barrier penetration factor, and q 1 corresponds to the centre-of-mass decay momentum at the resonance pole. The values of the mass propagator parameters are summarised in Table 1. The differential decay rate for B 0 (s) mesons 3 at production is given by, where 4 is the four-body phase space factor. The index i runs over the first column of Table 2 where the different decay amplitudes, A i ≡ |A i |e i i, and the angular-mass functions, g i, are listed. The angular dependence of these functions is obtained from spherical harmonics as explained in Ref.. For CP -studies, the CP -odd, A + S, and CP -even, A − S, eigenstates of the S-wave polarisation amplitudes are preferred to the vector-scalar (V S) and scalar-vector (SV ) helicity amplitudes, to which they are related by The remaining amplitudes, except for A ⊥, correspond to CP -even eigenstates. The contributions can be quantified by the terms F ij, defined as which are normalised according to This condition ensures that 6 i=1 |A i | 2 = 1. The polarisation fractions of the V V amplitudes are defined as where A 0, A and A ⊥ are the longitudinal, parallel and transverse amplitudes of the P-wave. Therefore, f L is the fraction of B 0 (s) → K * 0 K * 0 longitudinally polarised decays. The polarisation fractions are preferred to the amplitude moduli since they are independent of the considered (K) mass range. The P-wave amplitudes moduli can always be recovered as The phase of all propagators is set to be zero at the K * 0 mass. In addition, a global phase can be factorised without affecting the decay rate setting 0 ≡ 0. The last two requirements establish the definition of the amplitude phases (, ⊥, − S, + S and SS ) as the phase relative to that of the longitudinal P-wave amplitude at the K * 0 mass. Since B 0 (s) mesons oscillate, the decay rate evolves with time. The time-dependent amplitudes are obtained replacing A i → A i (t) and i → i (t) in Eq. being where In this analysis, no attempt is made to identify the flavour of the initial B 0 (s) meson and time-integrated spectra are considered. Consequently, the selected candidates correspond to untagged and time-integrated decay rates and there is no sensitivity to direct and mixing-induced CP violation. Moreover, since the origin of phases is set in a CP -even eigenstate ( 0 = 0), for the CP -odd eigenstates, the untagged time-integrated decay is only sensitive to the phase difference ⊥ − + S. The present experimental knowledge is compatible with small CP violation in mixing and with the absence of direct CP violation in the B 0 s → (K + − )(K − + ) system. The dependence of the decay rate in an untagged and time-integrated analysis of a B 0 (s) meson can be expressed as where the A i amplitudes account for the the average of B 0 (s) and B 0 (s) decays and N is a normalisation constant. For the B 0 meson, a further simplification of the decay rate is considered, since ∆/ = −0.002 ± 0.010 the light and heavy mass eigenstate widths can be assumed to be equal, and this factor can be extracted as part of the normalisation constant in Eq.. For the B 0 s meson the central values H = 0.618 ps −1 and L = 0.708 ps −1 are considered. Detector and simulation The LHCb detector is a single-arm forward spectrometer covering the pseudorapidity range 2 < < 5, designed for the study of particles containing b or c quarks. The detector includes a high-precision tracking system consisting of a siliconstrip vertex detector surrounding the pp interaction region, a large-area silicon-strip detector located upstream of a dipole magnet with a bending power of about 4 Tm, and three stations of silicon-strip detectors and straw drift tubes placed downstream of the magnet. The tracking system provides a measurement of the momentum, p, of charged particles with a relative uncertainty that varies from 0.5% at low momentum to 1.0% at 200 GeV/c. The minimum distance of a track to a primary vertex (PV), the impact parameter (IP), is measured with a resolution of (15 + 29/p T ) m, where p T is the component of the momentum transverse to the beam, in GeV/c. Different types of charged hadrons are distinguished using information from two ring-imaging Cherenkov detectors. Photons, electrons and hadrons are identified by a calorimeter system consisting of scintillating-pad and preshower detectors, an electromagnetic and a hadronic calorimeter. Muons are identified by a system composed of alternating layers of iron and multiwire proportional chambers. The magnetic field deflects oppositely charged particles in opposite directions and this can lead to detection asymmetries. Periodically reversing the magnetic field polarity throughout the data-taking almost cancels the effect. The configuration with the magnetic field pointing upwards (downwards), MagUp (MagDown), bends positively (negatively) charged particles in the horizontal plane towards the centre of the LHC ring. The online event selection is performed by a trigger, which consists of a hardware stage, based on information from the calorimeter and muon systems, followed by a software stage, which applies a full event reconstruction. In the offline selection, trigger signatures are associated with reconstructed particles. Since the trigger system uses the p T of the charged particles, the phase-space and time acceptance is different for events where signal tracks were involved in the trigger decision (called trigger-on-signal or TOS throughout) and those where the trigger decision was made using information from the rest of the event only (noTOS). Simulated samples of the B 0 → K * 0 K * 0 and B 0 s → K * 0 K * 0 decays with longitudinal polarisation fractions of 0.81 and 0.64, respectively, are primarily employed in these analyses, particularly for the acceptance description as explained in Sect. 6. Simulated samples of the main peaking background contributions, In the simulation, pp collisions are generated using Pythia with a specific LHCb configuration. Decays of hadronic particles are described by EvtGen, in which final-state radiation is generated using Photos. The interaction of the generated particles with the detector, and its response, are implemented using the Geant4 toolkit as described in Ref.. Signal selection Both data and simulation are filtered with a preliminary selection. Events containing four good quality tracks with p T > 500 MeV/c are retained. In events that contain more than one PV, the B 0 (s) candidate constructed with these four tracks is associated with the PV that has the smallest 2 IP, where 2 IP is defined as the difference in the vertex-fit 2 of the PV reconstructed with and without the track or tracks in question. Each of the four tracks must fulfil 2 IP > 9 with respect to the PV and originate from a common vertex of good quality ( 2 /ndf < 15, where ndf is the number of degrees of freedom of the vertex). To identify kaons and pions, a selection in the difference of the log-likelihoods of the kaon and pion hypothesis (DLL K ) is applied. This selection is complemented with fiducial constraints that optimise the particle identification determination: the pion and kaon candidates are required to have 3 < p < 100 GeV/c and 1.5 < < 4.5 and be inconsistent with muon hypothesis. The final state opposite charge (K) pairs are combined into K * 0 and K * 0 candidates with a mass within 150 MeV/c 2 of the K * 0 mass. The K * 0 and K * 0 candidates must have p T > 900 MeV/c and vertex 2 /ndf < 9. The intermediate resonances must combine into B 0 (s) candidates within 500 MeV/c 2 of the B 0 s mass, with a distance of closest approach between their trajectories of less than 0.3 mm. To guarantee that the B 0 (s) candidate originates in the interaction point, the cosine of the angle between the B 0 (s) momentum and the direction of flight from the PV to the decay vertex is required to be larger than 0.99 and the 2 IP with respect to the PV has to be smaller than 25. A multivariate selection based on a Boosted Decision Tree with Gradient Boost (BDTG) is employed. It relies on the aforementioned variables and on the B 0 (s) candidate flight distance with respect to the PV and its p T. Simulated B 0 → K * 0 K * 0 decays with tracks matched to the generator particles and filtered with the preliminary selection are used as signal sample, whereas the four-body invariant-mass sideband 5600 < M (K + − K − + ) < 5800 MeV/c 2, composed of purely combinatorial (K + − )(K − + ) combinations, is used as background sample for the BDTG training. The number of events in the signal training sample of the BDTG is determined using the ratio between the B 0 s and the B 0 yields from Ref. and the B 0 s yield obtained with a four-body mass fit to the data sample after the preliminary selection. decays, are strongly suppressed by the requirement in the (K) mass. Resonances in three-body combinations (K + K − + ) and (K + + − ) are also explored. In the case of the former, the three-body invariant mass in the data sample is above all known charm resonances. For the latter, no evidence of candidates originated in is found. Three-body combinations with a pion misidentified as a kaon are reconstructed, mainly searching for All of them are suppressed to a negligible level by the applied selection. A search of three-body combinations with a proton misidentified as a kaon is performed, finding no relevant contribution from decays involving a + c baryon. Decays into five final-state particles are also investigated. Contributions of the B 0 → ( + − )K * 0 decay can be neglected due to the small misidentification probability and the four-body mass distribution whereas the B 0 s → ( 0 + − )(K + K − ) decay is negligible due to the requirement on the (K) mass. Four-body mass spectrum The signal and background yields are determined by means of a simultaneous extended maximum-likelihood fit to the invariant-mass spectra of the four final-state particles in the 2011 and 2012 data samples. The B 0 (s) → (K + − )(K − + ) signal decays are parameterised with double-sided Hypatia distributions with the same parameters except for their means that are shifted by the difference between the B 0 and B 0 s masses, 87.13 MeV/c 2. contributions are described with the sum of a Crystal Ball function and a Gaussian distribution which shares mean with the Crystal Ball core. The parameters of these distributions are obtained from simulation, apart from the mean and resolution values which are free to vary in the fit. Whereas the distribution mean values are constrained to be the same in the 2011 and 2012 data, the resolution is allowed to have different values for the two samples. The small contributions from decays have a broad distribution in the four-body mass and are the object of specific treatment. The contribution from B 0 → 0 K * 0 decays has an expected yield of 3.5 ± 1.3 (6.6 ± 2.3) in the 2011 sample. It is estimated from the detection and selection efficiency measured with simulation, the collected luminosities, the cross section for bb production, the hadronisation fractions of B 0 and B 0 s mesons and the known branching fraction of the mode. Simulated events containing this decay mode are added with negative weights to the final data sample to subtract its contribution. The contribution of 0 b → (p − )(K − + ) decays in the 2011 sample is determined to be 36 ± 16 (120 ± 28) from a fit to the (p − K − + ) four-body mass spectrum of the selected data. In this study the four-body invariant mass is recomputed assigning the proton mass to the kaon with the largest DLL pK value. In these fits the 0 b component is described with a Gaussian distribution and the dominant B 0 s → (K + − )(K − + ) background is described with a Crystal Ball function. The parameters of both lineshapes are obtained from simulation. The remaining contributions, mainly B 0 → (K + − )(K − K + ) and partially reconstructed events, are parameterised with a decreasing exponential with a free decay constant. The 0 b → (p − )(K − + ) decay angular distribution is currently unknown and its contribution can not be subtracted with negatively weighted simulated events. Its subtraction is commented further below. Finally, contributions from partially reconstructed b-hadron decays and combinatorial background are also considered. The former is composed of Band B 0 s -meson decays containing neutral particles that are not reconstructed. Because of the missing particle, the measured four-body invariant mass of these candidates lies in the lower sideband of the spectrum. All contributions to this background are jointly parameterised with an ARGUS function convolved with a Gaussian resolution function, with the same width as the signal. The endpoint of the distribution is also fixed to the B 0 s mass minus the 0 mass. The combinatorial background is composed of charged tracks that are not originating from the signal decay chain. It is modelled with a linear distribution, with a free slope parameter, separate for 2011 and 2012 data samples. The results of the fit to the four-body mass spectrum are shown in Fig. 3 and the yields are reported in Table 3. In total, about three hundred B 0 → (K + − )(K − + ) signal candidates are found, a factor seven larger than previous analyses. To perform a background-subtracted amplitude analysis, the sPlot technique is applied to isolate, the dotted yellow line to B 0 → (K + − )(K − K + ) and the dotted cyan line represents the partially reconstructed background. The tiny combinatorial background contribution is not represented. The black points with error bars correspond to data to which the B 0 → 0 K * 0 contribution has been subtracted with negatively weighted simulation, and the overall fit is represented by the thick blue line., for which the yield is fixed, is treated using extended weights according to Appendix B.2 of Ref.. The sPlot method suppresses the background contributions using their relative abundance in the four-body invariant mass spectrum and, therefore, no assumption is required for their phase-space distribution. Amplitude analysis Each of the background-subtracted samples of B 0 → (K + − )(K − + ) and B 0 s → (K + − )(K − + ) decays is the object of a separate amplitude analysis based on the model described in Sect. 2. As a first step, the effect of a non-uniform efficiency, depending on the helicity angles and the two-body invariant masses, is examined. For this purpose, four categories are defined according to the hardware trigger decisions (TOS or noTOS) and data-taking period (2011 and 2012). The efficiency is accounted for through the complex integrals where is the total phase-space dependent efficiency, k is the sample category and F ij are defined in Eq.. The integrals of Eq. are determined using simulated signal samples of each of the four categories, selected with the same criteria applied to data. A single set of integrals is used for both the B 0 s and the B 0 amplitude analyses. A probability density function (PDF) for each category is built where A i and i are given in Table 2. Candidates from all categories are processed in a simultaneous unbinned maximumlikelihood fit, separately for each signal decay mode, using the PDFs in Eq.. To avoid nonphysical values of the parameters during the minimisation, some of them are redefined as, where x f, x |A + S | 2 and x |A SS | 2 are used in the fit, together with f L, |A − S | 2,, ⊥ − + S, − S and SS. The former three variables are free to vary within the range, ensuring that the sum of all the squared amplitudes is never greater than 1. The fit results are corrected for a small reducible bias, originated in discrepancies between data and simulation, as explained in Sect. 7. The final results are shown in Table 4. Figs. 4 and 5 show the one-dimensional projections of the amplitude fit to the B 0 → (K + − )(K − + ) and B 0 s → (K + − )(K − + ) signal samples in which the background is statistically subtracted by means of the sPlot technique. Three contributions are shown: V V, produced by (K + − ) (K − + ) pairs originating in a K * 0 K * 0 decay; V S, accounting for amplitudes in which only one of the (K) pairs originates in a K * 0 decay; and SS, where none of the two (K) pairs originate in a K * 0 decay. The fraction of V V decays, or purity at production, of the B 0 → K * 0 K * 0 signal, f P B 0, is estimated from the amplitude analysis and found to be.050 (stat) ± 0.017 (syst). Table 4: Results of the amplitude analysis of B 0 → (K + − )(K − + ) and B 0 s → (K + − )(K − + ) decays. The observables above the line are directly obtained from the maximum-likelihood fit whereas those below are obtained from the former, as explained in the text, with correlations accounted for in their estimated uncertainties. For each result, the first quoted uncertainty is statistical and the second systematic. The estimation of the latter is described in Sect. 7. 0.023 ± 0.014 ± 0.004 0.087 ± 0.011 ± 0.011 S-wave fraction 0.408 ± 0.050 ± 0.017 0.694 ± 0.016 ± 0.010 The significance of this magnitude, computed as its value over the sum in quadrature of the statistical and systematic uncertainty, is found to be 10.8 standard deviations. This significance corresponds to the presence of B 0 → K * 0 K * 0 V V decays in the data sample. The S-wave fraction of the decay is equal to 0.408 = 1 − f P B 0. For the B 0 s → K * 0 K * 0 mode the S-wave fraction is found to be 0.694 ± 0.016 (stat) ± 0.010 (syst). Systematic uncertainties of the amplitude analysis Several sources of systematic uncertainty that affect the results of the amplitude analysis are considered and discussed in the following. Fit method. Biases induced by the fitting method are evaluated with a large ensemble of pseudoexperiments. For each signal decay, samples with the same yield of signal observed in data (see Table 3) are generated according to the PDF of Eq. with inputs set to the results summarised in Table 4. The use of the weights defined in Eq. to account the detector acceptance would require a full simulation and, instead, a parametric efficiency is considered. For each observable, the mean deviation of the result from the input value is assigned as a systematic uncertainty. Description of the kinematic acceptance. The uncertainty on the signal efficiency relies on the coefficients of Eq. that are estimated with simulation. To evaluate its impact on the amplitude analysis results, the fit to data is repeated several times with alternative coefficients varied according to their covariance matrix. The standard deviation of the distribution of the fit results for each observable is assigned as a systematic uncertainty. Resolution. The fit performed assumes a perfect resolution on the phase-space variables. The impact of the detector resolution on these variables is estimated with sets of pseudoexperiments adding per-event random deviations according to the resolution estimated from simulation. For each observable, the mean deviation of the result from the measured value is assigned as a systematic uncertainty. P-wave mass model. The amplitude analysis is repeated with alternative values of the parameters that define the P-wave mass propagator, detailed in Table 1, randomly sampled from their known values. The standard deviation of the distribution of the amplitude fit results for each observable is assigned as a systematic uncertainty. S-wave mass model. In addition to the default S-wave propagator, described in Sect. 2, two alternative models are used: the LASS lineshape with the parameters of Table 5, obtained with B 0 → J/K + − decays within the analysis of Ref., and the propagator proposed in Ref.. The amplitude fit is performed with these two alternatives and, for each observable, the largest deviation from the baseline result Table 5: Alternative parameters of the LASS mass propagator used in the S-wave systematic uncertainty estimation. is assigned as a systematic uncertainty. Differences between data and simulation. An iterative method, is used to weight the simulated events and improve the description of the track multiplicity and B 0 (s) -meson momentum distributions. The procedure is repeated multiple times and, for each observable, the mean bias of the amplitude fit result is corrected for in the results of Table 4 while its standard deviation is assigned as a systematic uncertainty. Background subtraction. The data set used in the amplitude analysis is background subtracted using the sPlot method that relies in the lineshapes of the fourbody mass fit discussed in Sect. 5. The uncertainty related to the determination of the signal weights is evaluated repeating the amplitude analysis fits with weights obtained fitting the four-body invariant-mass with two alternative models. In the first case, the model describing the signal is defined by the sum of two Crystal Ball functions with a common, free, peak value and the resolution parameter fixed from simulation. In the second case, the model describing the combinatorial background is assumed to be an exponential function. The amplitude fit is performed with the sPlot-weights obtained with the two alternatives and, for each observable, the largest deviation from the baseline result is assigned as a systematic uncertainty. This procedure is also used when addressing the systematic uncertainties in the measured yields of the different subsamples, as discussed in Sect. 8. Peaking backgrounds. The uncertainty related to the fluctuations in the yields of the 0 b → (p − )(K − + ) and B 0 → 0 K * 0 background contributions are estimated repeating the amplitude-analysis fit with the yield values varied by their uncertainties reported in Sect. 5. For each observable, the largest deviation from the default result is assigned as a systematic uncertainty. This procedure is also used when addressing the systematic uncertainties of the four-body invariant mass yields in Sect. 8. Time acceptance. The amplitude analysis does not account for possible decay-time dependency of the efficiency, however, the trigger and the offline selections may have an impact on it. This effect only affects B 0 s -meson decays and is accounted for by estimating effective shifts: H = 0.618 → 0.598 ps −1 and L = 0.708 → 0.732 ps −1, which are obtained with simulation. For each observable, the variation of the result of the fit after introducing these values in the amplitude analysis is considered as a systematic uncertainty. The resulting systematic uncertainties and the corrected biases, originated in the differences between data and simulation, are detailed in Table 6 for the parameters of the amplitude-analysis fit. The corresponding values for the derived observables are summarised in Table 7. The total systematic uncertainty is computed as the sum in quadrature of the different contributions. Determination of the ratio of branching fractions In this analysis, the B 0 → K * 0 K * 0 branching fraction is measured relative to that of B 0 s → K * 0 K * 0 decays. Since both decays are selected in the same data sample and share a common final state most systematic effects cancel. However, some efficiency corrections, eg. those originated from the difference in phase-space distributions of events of the two modes, need to be accounted for. The amplitude fit provides the relevant information to tackle the differences between the two decays. This branching-fraction ratio is obtained as : Systematic uncertainties for the parameters of the amplitude-analysis fit of the B 0 (s) → (K + − )(K − + ) decay. The bias related to differences between data and simulation is included in the results shown in Table 4. Decay mode Table 2. Also in this case, for the Table 7: Systematic uncertainties for the derived observables of the amplitude-analysis fit of the B 0 (s) → (K + − )(K − + ) decay. The bias related to differences between data and simulation is included in the results shown in Table 4. Decay mode The detection efficiency is determined from simulation for each channel separately for the different categories discussed in Sect. 6: year of data taking, trigger type and, in addition, the LHCb magnet polarity. An exception is applied to the particle-identification selection whose efficiency is determined from large control samples of D * + → D 0 +, D 0 → K − + decays. Differences in kinematics and detector occupancy between the control samples and the signal data are accounted for in this particle-identification efficiency study. The different sources of systematic uncertainty in the branching fraction determination are discussed below. Systematic uncertainties in the factor. The uncertainties on the parameters of the amplitude analysis fit described in Sect. 7 affect the determination of the factors defined in Eq. as summarised in Table 8. Table 8: Systematic uncertainties in the factor defined in Eq. split in categories. The bias originated in differences between data and simulation is corrected for in the results shown in Table 9. Systematic uncertainties in the signal yields. As discussed in Sect. 7 uncertainties on the signal yields arise from the model used to fit the four-body invariant mass. The uncertainties from the different proposed alternative signal and background lineshapes are summed in quadrature to compute the final systematic uncertainty. Decay mode Systematic uncertainty in the efficiencies. A dedicated data method is employed to estimate the uncertainty in the signal efficiency originated in the PID selection. The inputs employed for measuring the relative branching fraction are summarised in Table 9. The factor is different for the two decay modes because of two main reasons: firstly, the discrepancy between the polarisation assumed in simulation and its measurement is larger for the B 0 s → K * 0 K * 0 than for the B 0 → K * 0 K * 0 decay. Secondly, the different S-wave fraction of the decays. Also, the efficiency ratio of the two modes deviating from one is explained upon the different polarisation of the simulation samples. The LHCb detector is less efficient for values of cos 1 (cos 2 ) close to unity because of slow pions emitted in K * 0 (K * 0 ) decays and these are more frequent the larger is the longitudinal polarisation. The final result of the branching-fraction ratio is obtained as the weighted mean of the per-category result obtained with Eq. for the eight categories of Table 9, and found to be Considering that B(B 0 s → K * 0 K * 0 ) = (1.11 ± 0.22 (stat) ± 0.12 (syst)) 10 −5, from Ref., the absolute branching fraction for the B 0 → K * 0 K * 0 mode is found to be B(B 0 → K * 0 K * 0 ) = (8.0 ± 0.9 (stat) ± 0.4 (syst)) 10 −7. Table 9: Parameters used to determine B(B 0 → K * 0 K * 0 )/B(B 0 s → K * 0 K * 0 ). When two uncertainties are quoted, the first is statistical and the second systematic. The value of f s /f d is taken from Ref.. It is worth noticing that, since the B 0 s → K * 0 K * 0 branching fraction was determined with the B 0 → K * 0 decay as a reference, the uncertainty on f s /f d, which appears in the ratio of Eq., does not contribute to the absolute branching fraction measurement. Summary and final considerations The first study of B 0 → (K + − )(K − + ) decays is performed with a data set recorded by the LHCb detector, corresponding to an integrated luminosity of 3.0 fb −1 at centre-ofmass energies of 7 and 8 TeV. The B 0 → K * 0 K * 0 mode is observed with 10.8 standard deviations. An untagged and time-integrated amplitude analysis is performed, taking into account the three helicity angles and the (K + − ) and (K − + ) invariant masses in a 150 MeV/c 2 window around the K * 0 and K * 0 masses. Six contributions are included in the fit: three correspond to the B 0 → K * 0 K * 0 P-wave, and three to the S-wave, along with their interferences. A large longitudinal polarisation of the B 0 → K * 0 K * 0 decay, f L = 0.724 ± 0.051 (stat) ± 0.016 (syst), is measured. The S-wave fraction is found to be 0.408 ± 0.050 (stat) ± 0.023 (syst). A parallel study of the B 0 s → (K + − )(K − + ) mode within 150 MeV/c 2 of the K * 0 mass is performed, superseding a previous LHCb analysis. A small longitudinal polarisation, f L = 0.240 ± 0.031 (stat) ± 0.025 (syst) and a large S-wave contribution of 0.694 ± 0.016 (stat) ± 0.012 (syst) are measured for the B 0 s → K * 0 K * 0 decay, confirming the previous LHCb results of the time-dependent analysis of the same data. This result is inconsistent with the prediction of R sd = 16.4 ± 5.2. Within models such as QCDF or the soft-collinear effective theory, based on the heavy-quark limit the predictions, longitudinal observables, such as the one in Eq., have reduced theoretical uncertainties as compared to parallel and perpendicular ones. The heavy-quark limit also implies the polarisation hierarchy f L f,⊥. The measured value for R sd and the f L result of the B 0 s → K * 0 K * 0 decay put in question this hierarchy. The picture is even more intriguing since, contrary to its U-spin partner, the B 0 → K * 0 K * 0 decay is confirmed to be strongly polarised.
Institute of entrepreneurship as a tool for economic development Purpose of the article. The article is devoted to the role of entrepreneurship in the economic development of the country. Materials and methods. Approaches to the definition of the concept of entrepreneurship and the institution of entrepreneurship are considered, the special creative and subjective nature of entrepreneurial activity is emphasized. Results. The distinctive features of entrepreneurial activity are characterized, a scheme is proposed that reflects the structure of economic factors of profit arising because of entrepreneurial activity. The system of conditions for the development of entrepreneurial structures is analyzed, which reflects the role of the state in entrepreneurial activity. The concept of state entrepreneurship and its main characteristics are considered. Conclusion. The conclusion is drawn that the institution of entrepreneurship contributes to the formation of a market structure of the national and regional economy.
Molecular biology of P2Y purinoceptors: expression in rat heart. 1. Application of molecular biology to the study of P2Y purinoceptors has led to the identification of seven such receptors. Here we briefly review their properties and investigate qualitatively the expression of four rat receptor transcripts in heart. 2. The reverse transcriptase-polymerase chain reaction was used to ascertain whether the rat P2Y1, P2Y2, P2Y4 and P2Y6 receptor transcripts were expressed in whole heart, neonatal cardiac fibroblasts, neonatal cardiac myocytes and adult cardiac myocytes. 3. All receptor sequences could be amplified from neonatal rat whole heart, with P2Y6 appearing the most abundant transcript of the four. P2Y1 is expressed at higher levels in comparison to P2Y2, P2Y4 and P2Y6 in the neonatal myocyte. In the adult myocyte P2Y1, P2Y2 and P2Y6 could be amplified but P2Y4 could not be detected. In the neonatal fibroblast, P2Y1 and P2Y6 appear to be expressed at higher levels than P2Y2 and P2Y4. 4. In summary, it is concluded that multiple P2Y receptor subtypes are expressed in heart and that the expression in myocytes changes from the neonate to the adult.
Published: March 19, 2013 at 04:32 p.m. Updated: March 19, 2013 at 08:42 p.m. Nine-time Pro Bowl safety Ed Reed is coming off a season in which he earned $7.2 million from the Baltimore Ravens. Whether it comes from the Ravens, Houston Texans or another team, Reed wants a similar annual figure in his next deal, according to The Baltimore Sun. Reed's inability to find that explains why he's still on the market one full week into free agency. The Sun also reported Tuesday that the Texans' offer to Reed last week was in the three-year, $4 million annual range. The Houston Chronicle reported the Texans and Reed's agent remain in contact at the NFL Annual Meeting, but a deal is not close. The Ravens have an offer out to Reed, but like the Texans, it's one that's kept earthbound by their tight salary-cap situation. Both teams continue to wait on Reed, but his time to decide isn't infinite. Eventually one team, or both, will simply move on.
// Calls HandlePosition for all generators at an index in the vm func HandlePositionAtIndex(m *vm.Machine, idx int, gens ...CodeGenerator) error { for _, x := range gens { if err := HandlePosition(m.Positions[idx], x); err != nil { return err } } return nil }
Lethal cause of abdominal pain: a diagnosis not to be missed. A 77-year-old woman was brought to the accident and emergency department with a 11-day history of abdominal pain. She was in shock and required resuscitation. She had attended the accident and emergency a few days before and had been discharged home with a diagnosis of non-specific abdominal pain. The previous abdominal X-ray is shown in Figure 1. Her past medical history included bilateral below knee amputation for peripheral vascular disease, pacemaker for heart block and hypertension. On examination, post-resuscitation, she was confused and apyrexial with a heart rate of 100 beats/minute and a blood pressure of 110/70 mmHg. Her abdomen was diffusely distended with minimal tenderness in the epigastrium and supra-umbilical region. There was no rigidity or guarding. Bowel sounds were normal. Digital rectal examination showed soft faeces. Blood test showed a haemoglobin of 6.6 g/dl (down from 11 g/dl during her previous accident and emergency visit). There was slight impairment of renal function with a ure...
Turning a normal microscope into a super-resolution instrument using a scanning microlens array We report dielectric microsphere array-based optical super-resolution microscopy. A dielectric microsphere that is placed on a sample is known to generate a virtual image with resolution better than the optical diffraction limit. However, a limitation of such type of super-resolution microscopy is the restricted field-of-view, essentially limited to the central area of the microsphere-generated image. We overcame this limitation by scanning a micro-fabricated array of ordered microspheres over the sample using a customized algorithm that moved step-by-step a motorized stage, meanwhile the microscope-mounted camera was taking pictures at every step. Finally, we stitched together the extracted central parts of the virtual images that showed super-resolution into a mosaic image. We demonstrated 130nm lateral resolution (~/4) and 5105 m2 scanned surface area using a two by one array of barium titanate glass microspheres in oil-immersion environment. Our findings may serve as a basis for widespread applications of affordable optical super-resolution microscopy. placed underneath the microsphere matters, so that the microsphere-based imaging resolution may become sample-dependent 36. Therefore, in certain cases the super-resolution capability of a system can be only slightly better than the diffraction limit. Because of the geometrical optics properties of the microsphere, which acts as a lens, a virtual image will be projected about half the microsphere diameter distance below the sample plane. This virtual image plane can be placed in the focus of the microscope objective. An image, recorded while observing this plane will contain information about the sub-diffraction features, therefore enables super-resolution microscopy. The major drawback of such an imaging is that the field-of-view is limited to size of the central part of the microsphere. To overcome this limitation we established a scanning mechanism, with which we could restore the field-of-view to the full size of the microscope objective. Our setup consists of two major components as shown in Fig. 2. The first is a metal frame, which is composed from 30 mm cage system parts (Thorlabs, Germany) including an SM1Z, Z-axis translator that are fixed to the microscope objective (Fig. 2a,b). An in-house designed and fabricated aluminum element is attached to the inside thread of the SM1Z translator (Fig. 2c), the aim of which is to fix a glass-based microsphere array chip onto the objective (Fig. 2d). The role of the Z-axis translator between the objective revolver and the chip holder is to enable focus adjustment along the Z-axis, as needed for positioning the chip in the right focal plane prior to imaging based on our previous study in this topic 37. The array chip was fabricated in the clean room using negative photoresist-based photolithography (Fig. 2e). The dimensions of the chip substrate is 22 22 0.15 mm 3 and it is made of D 263 M borosilicate glass (Menzel-Glser, Germany). After oxygen plasma cleaning, it was coated with 20 m 3025 type SU8 (MicroChem, USA). The glass-chromium mask used for the lithography consisted of an array of 40 m diameter wells with a pitch of 60 m. After development, a 4 l droplet of Norland Optical Adhesive 63 (NOA63, Norland Products, USA) was spread on the top of the well array. Then the chip was placed in a vacuum chamber for 20 minutes to remove the air bubbles stuck in the wells of the SU8 layer. Subsequently, we placed 38-45 m diameter BTG (Cospheric, USA) microspheres on the NOA63 layer and swiped them over the surface until they were located in the wells. The excess amount of microspheres was removed to prevent them acting as a spacer during imaging. Finally, the chip was exposed to UV light until an accumulated dose of 4.5 Joules/cm 2 was reached, which is required for curing the NOA63 glue. We placed our sample on a motorized microscope stage (Axio Imager M2m with HAL100 halogen light source, Zeiss, Germany) which was controlled by our custom algorithm. The scanning protocol was established as follows: after an initial focus setting along the Z-axis, the microscope-attached camera (AxioCam MRm, Zeiss, Germany) took a picture, when focused on the virtual image plane of the sample. To make a single scanning step, the stage moved 5 m downwards along the Z-axis to prevent scratching the sample and took one step along either the X-or the Y-axis, where the in-plane step-size was set by the user before the scanning. Finally, it moved back to the original Z-axis position and was ready for taking the next picture. This scanning process was repeated until the pre-set sample area was fully scanned. Hereafter, the saved pictures were cropped to the region of interest (ROI) and subsequently stitched together to create a big field-of-view, super-resolution image. We implemented Figure 1. Operation principle of the imaging system. (a) Excitation: light approaches through the microscope objective towards the dielectric microsphere with diameter d. In absence of an object to be imaged in the light path, the dielectric microsphere generates a photonic nanojet on its shadow side, as is shown on the finite element simulation of the electric field in the inset. (b) If an object is present underneath the microsphere, reflection occurs: the simulation shows reflection from a sample consisting of a modulated pattern of eleven lines and spaces with dimensions below the diffraction limit. The modulation is preserved and the near-field information of the diffraction-limited sample is propagated into the far-field within the microsphere. At the same time, the microsphere acts as a lens and generates a virtual image at d/2 distance below the sample plane, as illustrated by the green cone. a stitching algorithm that overlapped the regions in the image that were just outside the ROIs, to keep the useful amount of super-resolution pixels at maximum. To achieve that, we used the fact that the scanning went along a predefined path and that the useful area of a taken photograph was always at the same position, so that its size show super-resolution. At every step of the scanning process, the inside squares (marked with yellow for the first and with blue for the second microsphere, respectively) are retained for generating the final image. (b) Schematics of the step-by-step scanning that is carried out using a motorized stage, controlled by an in-house developed scanning algorithm. (c) Final image at the end of the process. First, individual tiles, two of which are indicated by the white squares, are extracted from the center of the microsphere images and are stitched together to form a mosaic image. Next, the thus-generated mosaic images of the individual microspheres, indicated by yellow and blue tiles, are combined. Since the pitch of the microsphere array is smaller than the scanned area, overlap between the yellow tiles from the first microsphere and blue tiles from the second microsphere occurs. Scale bar 5 m. could be calculated in advance. Because of this, we did not have to use the conventional stitching algorithms where the edges of the tiles are compared pixel-to-pixel for stitching. During experiments we used a 63, oil immersion, NA = 1.4 objective, which limited the field-of-view to a 2 2 array of microspheres. Therefore, we had up to four ROIs per picture. Since each ROI was limited by the central part of the microsphere, we could not use conventional stitching algorithms. Results and Discussion In Fig. 3a, one can see a typical image captured from the virtual image plane. Technically, up to four microspheres could fit into the field-of-view of the camera. Practically, because of the size distribution of the microspheres and the dependence of the sensitivity of the detection principle on the local distance between the sample and the microsphere surface, we chose to use two microspheres for easy simultaneous imaging. In the center of the two microspheres (marked with the green dashed circles in Fig. 3a) super-resolution imaging is enabled. The yellow and the blue rectangles mark the ROI that will be extracted for the final image. During imaging, the microspheres have a fixed position on the pictures, while the sample is scanned (Fig. 3b). In Fig. 3c a composed image of a silicon-based microscope calibration target (MetroBoost, USA) is shown. The calibration target shows L-shaped line-space patterns with 130, 140 and 150 nm line width, from the left to right, respectively. The patterns are repeated in every row; therefore, the patterns in row nine (marked as R9 S) are nominally the same as the ones in row eight (marked as R8 S). One can observe the individual tiles that were used for stitching (yellow and blue corresponds to the two microspheres) and the overlap between the two scanned areas. The reason for this overlap is the pre-set scanning parameters, as the step-size was set to 5 m along both X-and Y-axis, meanwhile the full scanned area was 100 100 m. Since the pitch distance of the microspheres is 60 m, this resulted in a 40 m-wide overlap area. Based on these results, it is possible to see the two major advantages of implementing scanning with multiple microspheres. With such a configuration, the scanning time could be reduced or the imaged area could be increased. The gain is proportional to the number of microspheres used during the process in both cases. To determine the imaging performance of our system, we measured the modulation of line-space patterns with different lateral dimensions as shown in Fig. 4. Figure 4a is a typical example of an image of 140 nm line-space pattern, showing that lines are better resolved towards the center of the microsphere and less sharp image is generated for increasing radial distance r. Experimentally we placed a 524-565 nm band-pass filter (AHF, Germany) in the optical path and we quantified the imaging performance by measuring the variation of the pixel intensity along the seven dashed lines of a width of 2 m, corresponding to 22 pixels. Hereby, we repositioned the line-space pattern so that the complete range 0 < r < 12 m could be studied. The extracted pixel gray values were normalized, taking as hundred percent the lighter region outside of the line pattern and zero percent the darkest pixel intensity of the micro-patterned structures. The peak-to-valley distances of the thus obtained curves were measured and marked as modulation. The graphs of Fig. 4b were constructed by placing line-space patterns with 260 nm, 280 nm and 300 nm pitch, respectively, in the center of a single microsphere. Seven measurement lines were placed along the horizontal axis (shown on Fig. 4a) of the images, starting from the center with 2 m increments. The modulation rapidly decreases as the local distance between the sample and the microsphere surface increases, in good agreement with theoretical calculations of the evanescent behavior of sub-diffraction-sized nanostructures 37. In Fig. 4c the modulation performance of the microscope objective with (yellow) and without (purple) the presence of the microsphere array was compared by imaging line-space patterns with different pitch in the 240-400 nm range. Data analysis showed that there is a significant gain due to the use of the microsphere when the lateral dimension of the sample is below 180 nm, i.e. exactly in the diffraction limited region. To benchmark the performance of our approach, we compared the composed picture to the image that was taken by the microscope camera without using a microsphere (Fig. 5). In Fig. 5a, we see the line-space patterns of row nine from the sample of Fig. 3 in the upper part, and the line-space patterns of row eight in the lower part. The white dashed rectangle shows a single field-of-view of the microscope mounted camera. To be able to make fair comparison with our composed image, we took two photographs from the microscope and stitched them together. In the insets, enlarged images of the line-space patterns are shown, clearly indicating that the microscope cannot resolve features below the diffraction limit. To further support this statement, we drew five pixel-wide measurement lines on the taken photographs (blue lines correspond to patterns of row nine, while orange lines correspond to patterns of row eight), on which we evaluated the pixel gray values. We positioned these lines on exactly the same spot for every pattern, except for the 150 nm wide lines where they are shifted up by a few microns, because of a damaged region in the pattern of row eight. To exclude the shift caused by eventual different brightness of the light source, we normalized all pixel gray values, resulting in a modulation pattern as discussed already in Fig. 4a. Therefore, on the plots in the center of Fig. 5a, the zero value corresponds to the darkest pixel and the one value corresponds to the lightest region next to the line-space pattern, i.e. the down-pointing peaks correspond to the dark lines in the pattern. One can observe that the peaks are distinguishable on the most right side plot (evaluating the 150 nm wide lines), but that they disappear as the line width is decreased to 140 nm (center plot) and finally to 130 nm (left side plot). In Fig. 5b we show the image of the same area, but in this case, the picture was created with our microsphere array. We applied yellow and blue colors on the picture to show which part of it was created by the first and which by the second microsphere in our array. The insets show enlarged stitched images of the line-space patterns, with markings of the positions of our measurement lines. Just by eye observation, it is already clear that the lines, independently of their size, are more visible than in Fig. 5a. For evaluating the gray values along the measurement lines, we used the same method as described in the previous paragraph. On the plots in the center of Fig. 5b, one can observe that the peaks corresponding to the black lines on the sample are sharper and that the modulation amplitude is bigger. It is important to note, that the modulation did not change significantly between the biggest (150 nm) and the smallest (130 nm) line width, i.e. our imaging system could well resolve down to 130 nm wide features using a halogen light source. Finally, to demonstrate the robustness and full possibility of our imaging technique, we show in Fig. 6 a super-resolution imaging corresponding to a large surface area (0.5 mm 1.0 mm). During scanning, 20 301 individual pictures were collected using our custom algorithm, resulting in ~60 GB of raw data. Our stitching algorithm composed the final image that had ~175 MPixel and ~530 MB file size. One can observe that due to the shear stress generated during the scanning, a slight systematic tilt occurred on the picture, which was corrected by our image reconstruction algorithm. The shadow effect at the edge of the tiles could not be compensated by our algorithm, therefore the quality of the stitching could be improved, e.g. by using seamless stitching in ImageJ, but it is important to note, that our solution completed the stitching ~100 faster than the ImageJ algorithm. As the insets in Fig. 6 show, the 130 nm lateral resolution was preserved over the total area of the scanned surface. Conclusion We demonstrated an advanced implementation of an optical microscopy super-resolution imaging technique, using an ordered array of dielectric microspheres. The imaging principle was explained to be related to the existence of a photonic nanojet upon illumination of a microsphere and the near-field interactions between the sample and the microsphere. We showed that it is possible to overcome some of the field-of-view limitations of previously published microsphere-based super-resolution imaging techniques by implementing a scanning and stitching process. Our simple but smart system achieved a 240 nm pitch lateral resolution in static mode. Furthermore, 260 nm pitch and simultaneously a much bigger total field-of-view than the one of the microscope-mounted camera was demonstrated. To show the robustness of the system, a surface scan of 5 10 5 m 2 was presented. However, we believe that even bigger areas can be imaged, since there are no intrinsic limits in our process. Later, the scanning system could eventually be optimized for mass production with the help of 3D printing, as this technique enables very flexible microfabrication of customized parts, as was shown earlier 31. We therefore hope that our findings will help repositioning dielectric microsphere-based optical super-resolution microscopy beyond the proof-of-concept stage towards a fully operational real-life application. Data availability. The data that support the plots within this paper and other findings of this study are available from the corresponding author upon reasonable request.
Authorities in the northern Spanish city of Zaragoza have come under fire for a controversial booklet which advises readers how to best rack up lines of cocaine. The 31-page pamphlet, titled “Drugs: the world, the neighborhood,” is filled with tips on how to use both legal intoxicants, like alcohol and tobacco, as well as more unsavory mind-altering substances like speed, cannabis and cocaine, in a way that avoids unwanted consequences. The guide also advises about choosing different strains of marijuana, and to make speed lines smaller than those of cocaine because amphetamines are generally more potent. The pamphlet was released by Zaragoza’s city hall, led by a coalition of left-wing parties including the populist party Podemos, and handed out among a network of neighborhood associations and youth houses. But opposition groups including the conservative Popular Party (PP) are outraged at what they see as Zaragoza’s leftist leadership endorsing drug use. On Thursday, the PP filed an emergency motion calling for an immediate withdrawal of the pamphlet and for mayor Pedro Santisteve to be reprimanded for supporting it. Azcon also criticized the booklet for equating illegal drugs with legal substances or medicines, and for suggesting drug use might be fun. Opposition to the city’s pamphlet has come from both sides of the political spectrum. The center-left party Ciudadanos announced on Tuesday that its legal department was looking into possible action against the Zaragoza authorities, which spokeswoman Sara Fernandez said was an attempt to "normalize” drug use, undermining years of zero-tolerance policy. The booklet argues that societies throughout history have always used mind-altering, potentially dangerous substances, from caffeine to alcohol and marijuana. According to the paper, people are likely to take drugs regardless of the law, just as they drank alcohol during Prohibition, so it makes sense to lower the risks involved. For example, a lot of deaths caused by illegal drugs stem from additives thrown in by unscrupulous dealers rather than the substances themselves. The Zaragoza pamphlet is backed by Consumo ConCiencia, a local harm reduction group which offers to test user’s drugs for purity. Drug policy reformers have argued that anti-drug campaigns aimed at scaring young people away by stressing their life-destroying properties, such as Nancy Reagan’s Just Say No, are overly simplistic and moralistic without addressing the complex issues surrounding the problem. Consumo ConCienca’s founder Javier Sanchez countered that the booklet “in no way promotes drug use” but rather provides objective information to “reduce the risks involved in drug taking.” Spain is one of the key entry points for cocaine into Europe, as well as hashish from Morocco, and has one of the highest rates of drug use on the continent.
from datetime import date import pytest from schemas import ProductionRecord, ProductionWell, ProductionWellSet @pytest.fixture def production(): yield [ { "prod_date": "2020-01-01", "days_in_month": "31", "oil": "10", "oil_uom": "BBL", "gas": "10", "gas_uom": "BBL", "water": "10", "water_uom": "BBL", "gor": "10", "gor_uom": "BBL", "water_cut": "10", } for x in range(0, 10) ] @pytest.fixture def well(production): yield { "entity12": "123456789102", "entity": "123456789102404", "api10": "1234567891", "api14": "12345678910000", "status": "ACTIVE", "provider": "data_provider", "last_update_at": "2020-03-12T20:44:16.992142", "perf_upper_min": 1000, "perf_lower_max": 3000, "perf_ll": 2000, "products": "O", "production": production, } class TestProdRecord: def test_convert_aliases(self): record = { "first_date": "2020-01-01", "last_day": "31", "liquid": "10", "liquid_uom": "BBL", } actual = ProductionRecord(**record).dict(exclude_none=True) expected = { "prod_date": date(2020, 1, 1), "days_in_month": 31, "oil": 10, "oil_uom": "BBL", } assert expected == actual class TestProdWell: def test_records(self, well): actual = ProductionWell(**well).records() assert len(actual) == len(well["production"]) class TestProdWellSet: def test_records(self, well): wells = [well for x in range(0, 5)] actual = ProductionWellSet(wells=wells).records() expected = sum([len(x["production"]) for x in wells]) assert len(actual) == expected def test_df(self, well): wells = [well for x in range(0, 5)] actual = { *ProductionWellSet(wells=wells).df(create_index=False).columns.tolist() } expected = { *[x for x in ProductionWell.__fields__.keys() if x not in ["production"]], *ProductionRecord.__fields__.keys(), } assert actual == expected def test_df_with_index(self, well): wells = [well for x in range(0, 5)] df = ProductionWellSet(wells=wells).df(create_index=True) assert {*df.index.names} == {"api10", "prod_date"}
<filename>src/main.rs use std::io; use std::io::Write; use tconv::*; fn print_help() { println!("Enter temperature with a unit (C for Celsius, F for Fahrenheit)."); println!("Type `help` to display this message"); println!("Type `quit` to leave the program."); } fn print_unrecognized() { println!("Unrecognized input. Type `help` to get some hints."); } fn print_result(temperature: &Temperature, result: &Temperature) { println!("{:?} => {:?}", temperature, result); } fn main() { print_help(); loop { let mut input = String::new(); print!("> "); io::stdout().flush().unwrap(); io::stdin() .read_line(&mut input) .expect("Failed to read line"); let action = Action::parse(&input); match action { Action::Unrecognized => print_unrecognized(), Action::Quit => break, Action::Help => print_help(), Action::Convert(temperature, mode) => { print_result(&temperature, &temperature.convert(mode)); } }; } }
def convertDate(d): new_date = datetime.strptime(d, "%Y-%m-%dT%H:%M:%fZ") return new_date
#include<bits/stdc++.h> using namespace std; #define ll long long const int mod =1e9+7; ll power(ll a,ll b){ ll res=1; while(b){ if(b&1)res=res*a%mod; b>>=1; a=a*a%mod; } return res; } ll inv(ll x){ return power(x,mod-2); } ll gcd(ll a,ll b){ if(a%b==0)return b; return gcd(b,a%b); } vector<int>g[211111]; int a[211111]; int visited[211111]; int dis[211111]; int mark[211111]; int main(){ ll n,m,t,i,j,k,x,y,ma=0,mi=1e18,cnt=0,res=0,sum=0; cin>>n>>m; while(m--){ scanf("%d%d",&x,&y); g[y].push_back(x); } cin>>k; for(i=0;i<k;i++){ scanf("%d",&a[i]); } queue<int>q; q.push(a[k-1]); visited[a[k-1]]=1; while(!q.empty()){ int temp=q.front(); for(i=0;i<g[temp].size();i++){ if(!visited[g[temp][i]]){ visited[g[temp][i]]=1; dis[g[temp][i]]=dis[temp]+1; q.push(g[temp][i]); } else{ if(dis[g[temp][i]]==dis[temp]+1)mark[g[temp][i]]=1; } } q.pop(); } for(i=k-2;i>=0;i--){ if(dis[a[i]]-dis[a[i+1]]!=1)res++; else cnt+=mark[a[i]]; } cout<<res<<" "<<res+cnt; }
package database.filehelpers; import jdk.internal.util.xml.impl.Input; import util.PropertiesReader; import java.io.ByteArrayInputStream; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.nio.file.Files; import java.nio.file.InvalidPathException; import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.List; /** * A class for managing the storage and retrieval of files * located within the subfolders of /data/filedata/ * * @author <NAME> */ public class FileDataManager { /* Instance Variables */ private String baseFolder; /** * Enum defining the different folders in the data folder of the * project where user saved files can be read and written from by the * FileDataManager class */ public enum FileDataFolders { IMAGES ("images"), SOUNDS ("sounds"); private final String filepath; FileDataFolders(String path) { this.filepath = PropertiesReader.path("db_files") + path + "/"; } /** * @return A {@code String} representing the path of the folder within the project */ String path() { return filepath; } } /** * Creates a new FileDataManager that is able to manipulate * files within the given folder within the data folder * @param folder is a {@code DataFolders} enum value that * specifies which of the subfolders in data * where you want to manipulate user saved files */ public FileDataManager(FileDataFolders folder) { baseFolder = folder.path(); } /** * Creates an InputStream of bytes corresponding to the passed in file name * @param filename is a {@code String} that represents the name of the file * to return a stream for * @return A {@code InputStream} that can then be used to create an image, sound, * etc. If the file is not found, then the stream will just be one for an empty * byte array */ public InputStream readFileData(String filename) { byte[] fileBytes; try { Path fileLocation = Paths.get(baseFolder + filename); fileBytes = Files.readAllBytes(fileLocation); } catch(InvalidPathException | IOException e) { fileBytes = new byte[0]; } return new ByteArrayInputStream(fileBytes); } /** * Retrieves all the files contained below the path specified. Includes files within subfolders * of the specified paths. * @param path is {@code String} specifying the subfolder to retrieve files from * @return A {@code List<InputStream>} that contains the InputStreams of each file in the subfolder */ public List<InputStream> retrieveSubfolderFiles(String path) { List<InputStream> fileStreams = new ArrayList<>(); File base = new File(baseFolder + path); if(base.exists()) { for(File subfile : base.listFiles()){ if(subfile.isDirectory()) { fileStreams.addAll(retrieveSubfolderFiles(path + "/" + subfile.getName())); } else { InputStream fileStream = readFileData(path + "/" + subfile.getName()); fileStreams.add(fileStream); } } } return fileStreams; } /** * Deletes the specified User file * @param filename is a {@code String} representing the name of the file to * be deleted within the base folder * @return {@code true} if the file was successfully deleted, and {@code false} * otherwise */ public boolean deleteFileData(String filename) { // Get the path to the file Path p = (new File(baseFolder + filename)).toPath(); try { return Files.deleteIfExists(p); } catch(IOException e) { return false; } } /** * Writes the given array of bytes to the file specified. * @param fileBytes is a {@code byte[]} that represents the bytes of the file * to be written * @param filename is a {@code String} that represents the name of the file to * save the bytes as * @return {@code true} if the file successfully saves, and {@code false} otherwise */ public boolean writeFileData(byte[] fileBytes, String filename) { try { Files.write(Paths.get(baseFolder + filename), fileBytes); return true; } catch (IOException e) { return false; } } }
<filename>src/main/java/br/com/forumbrabo/model/Mensagem.java package br.com.forumbrabo.model; import java.io.Serializable; import java.text.SimpleDateFormat; import java.util.Date; import javax.faces.bean.ManagedBean; import javax.faces.bean.RequestScoped; //import javax.enterprise.context.RequestScoped; //import javax.inject.Named; import javax.persistence.Entity; import javax.persistence.GeneratedValue; import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.JoinColumn; import javax.persistence.ManyToOne; //@Named //@RequestScoped @SuppressWarnings("deprecation") @ManagedBean @RequestScoped @Entity(name = "mensagens" ) public class Mensagem implements Serializable{ private static final long serialVersionUID = 1L; @Id @GeneratedValue(strategy = GenerationType.SEQUENCE) private int id; private String mensagem; private Date dataMsg = new Date(); @ManyToOne @JoinColumn(name="idUsuario") Usuario usuario; public Mensagem() { } public Mensagem(String mensagem, Usuario usuario) { this.mensagem = mensagem; this.usuario = usuario; } public int getId() { return id; } public String getMensagem() { return mensagem; } public void setMensagem(String mensagem) { this.mensagem = mensagem; } public String getDataMsg() { SimpleDateFormat formato = new SimpleDateFormat("HH:mm (dd/MM/yyyy)"); String dataFormatada = formato.format(dataMsg); return dataFormatada; } public Usuario getUsuario() { return usuario; } public void setUsuario(Usuario usuario) { this.usuario = usuario; } @Override public String toString() { if (mensagem == null) return ""; else { SimpleDateFormat formato = new SimpleDateFormat("HH:mm (dd/MM/yyyy)"); String dataFormatada = formato.format(dataMsg); return usuario.getUsuario()+" disse: \""+mensagem+"\" às "+dataFormatada; } } }
The Dodo in the Long Eighteenth Century: An Exploration of the Gray Ghost Outside of the English Sentimental Eye This article examines the specific type of monster that developed in eighteenth-century British culture arising from early attempts to reconstruct the extinct dodo. Specifically, discussions about the absent dodo took shape in early ornithological texts, where it was universally depicted as a monster, a lumbering, clumsy, gluttonous animal whose survival was unquestionably doomed by its ungainly morphology. This iteration of the dodo was constructed in the absence of productive empirical information about the real bird, largely from cultural prejudices and conjecture extracted from studying old, inaccurate paintings. This reconstructed monster-dodo shares little with the real dodo, about which we know only from fragmentary seventeenth-century accounts by witnesses who actually saw the living bird and recent studies drawn from its fossil record. This article explores why English culture took the relative blank slate left by this extinct bird and, contrary to emerging notions of sentimentality, re-fashioned the dodo as a monster.
Prosodic Effects on Spoken Word Recognition in Second Language: Processing of Lexical Stress by Korean-Speaking Learners of English The present study explores how language-specific variation in prosodic structure affects L2 learners processing of prosodic categories during L2 spoken word recognition. Specifically, the study examines Korean-speaking learners of English in the processing of English lexical stress over the time course of spoken word recognition, by native English speakers and Korean-speaking learners of English. English employs word-level stress reliably, either with trochaic or iambic stress patterns. Lexically stressed syllables are often pitch-accented in utterances, and most lexically unstressed syllables contain reduced vowels. The prominence of stressed syllables over unstressed syllables is manifested in the acoustic fundamental frequency (f0) peak, duration, and intensity. Lexically stressed syllables are often pitch-accented in utterances, and most lexically unstressed syllables contain reduced vowels. In contrast, Korean has neither lexical stress nor reduced vowels as the segmental correlates of lexical stress. Instead it has phrase-level prosodic structure ((T)HLH %) (Jun 1993). Utterances often align the phrase-initial tone (T) before a high tone (H) with the beginning of each content word. The phrase-initial tone is realized as either high (H) or low (L) tone depending on the phonation type of the initial consonantH for tense/aspirated and L for lax. H is realized with a relatively high fundamental frequency (f0) and L with a relatively low f0. The Perceptual Assimilation Model (PAM; Best 1995) predicts that if relevant L2 sounds have no corresponding categories in L1, as is the case with the English lexical stress contrast for Koreanspeaking learners of English, discrimination of the two sounds should be good because the discrimination is less strongly affected by native phonological equivalence classes. However, this prediction was not borne out in a preliminary eye-tracking study (Shin & Speer, 2009). In Shin & Speer, native English speakers were able to use lexical stress information during spoken word recognition. Their processing of words was facilitated when they encountered a stressed syllable in a word, and thus processing benefit was pronounced in the recognition of trochaic words over iambic words. Korean-speaking learners of English, however, didnt make much use of English lexical stress for spoken word recognition, showing delayed recognition of target words compared to native English speakers. Post hoc analyses showed that the Korean-speaking learners of English were good at identifying high-pitch accented trochaic words but were poor at identifying iambic words that began with what they heard as aspirated sounds. The finding suggests that Korean phrase-level prosodic structure might have constrained the processing of English lexical stress in Korean learners of English. In this dissertation, two experiments further investigated whether L1 phrase-level prosody constrains the use of L2 word-level prosodic information during L2 spoken word recognition, with controlled number of aspirated versus lax word-initial consonants. First, a cross-modal gating experiment was conducted to focus on the effects of word-initial phonation type on the perception of English lexical stress. The result was similar to that of the preliminary study. When the first
The oldest LGBT community center in the Bay Area, the Pacific Center for Human Growth, is losing its longtime executive director, Leslie Ewing, to retirement. She has been with the Berkeley mental health and wellness center that serves Alameda County for a little over a decade. Her last day will be July 31. "My hope is that the new executive director will love the job as much as I do," Ewing, a lesbian, told the Bay Area Reporter Thursday (March 28). "Perhaps someone very different than me. It's time for the next 10 years and important to foster a new generation to do the work and time for me to step back." Ewing, 70, said it's a personal decision to retire and that it's time for new leadership at the center. Once Ewing departs, Jared Fields, its deputy director, will handle the day-to-day operations until an interim director is chosen. The Oakland resident said among some of her proudest accomplishments during her time with the center is its expansion of programs. Today, the center offers youth programs, peer groups, counseling and psychotherapy, and serves more than 3,000 people at four locations throughout the county. "Small community centers are really the heart and soul of the LGBT civil rights movements," Ewing said. "Our organization has really evolved into the LGBT support organization for all of Alameda County, not just Berkeley." Ewing has formerly been board president of the AIDS Emergency Fund and associate executive director at Lyon-Martin Health Services. She is looking forward to her next chapter and what the future holds for her. "I'll always say yes until I have a reason to say no," she said. Interested candidates for the executive director position are asked to send their cover letter and resume to boardpresident@pacificcenter.org. The Bay Area Reporter will have an expanded article in next week's paper.
<gh_stars>1000+ import React from "react"; import "./FormHeader.scss"; const CLASS_NAME = "amp-form-header"; export type Props = { title?: string; children?: React.ReactNode; }; export const FormHeader = ({ title, children }: Props) => { return ( <div className={CLASS_NAME}> <div className={`${CLASS_NAME}__strip`}> <h1 className={`${CLASS_NAME}__title`}>{title}</h1> <div className={`${CLASS_NAME}__controls`}>{children}</div> </div> </div> ); };
<filename>utils/obj.py<gh_stars>0 #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Author: Zheng <<EMAIL>> # Date: 2019-05-07 # Desc: 对象操作函数库 def dict_to_str(dict_obj): """ 将字典的所有key, value转换成string类型并返回 :param dict_obj: 字典对象 :return: """ if not isinstance(dict_obj, dict): return None return {str(k): str(v) for k, v in dict_obj.items()} def list_to_str(list_obj): """ 列表转成字符串(空格代替逗号分隔符) :param list_obj: :return: """ list_obj = [str(i) for i in list_obj] return "[{}]".format(" ".join(list_obj))
<gh_stars>1-10 # Copyright 2018 <NAME> # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import setuptools from eyws.about import __version__, __author__, __license__, __email__, __description__, __name__, __url__ requires = [ "jinja2>=2.10", "python-dateutil>= 2.7.3", "boto3==1.8.1", "botocore==1.11.1", ] def readme(): with open("README.md") as f: return f.read() setuptools.setup( name=__name__, version=__version__, description=__description__, long_description=readme(), author=__author__, author_email=__email__, url=__url__, license=__license__, keywords="aws cli", install_requires=requires, python_requires=">=3.1", include_package_data=False, entry_points={ "console_scripts": [ "eyws = eyws.parser:execute" ] }, zip_safe=False, packages=setuptools.find_packages(), classifiers=[ "Development Status :: 4 - Beta", "Intended Audience :: Developers", "Intended Audience :: System Administrators", "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Operating System :: POSIX :: Linux", "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Programming Language :: Python :: 3.1", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Utilities"] )
<gh_stars>1-10 /* * Copyright (C) 2014 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package nuclei.media.playback; import android.content.BroadcastReceiver; import android.content.Context; import android.content.Intent; import android.content.IntentFilter; import android.media.AudioManager; import android.net.Uri; import android.net.wifi.WifiManager; import android.os.Build; import android.os.Handler; import android.os.PowerManager; import android.support.v4.media.MediaMetadataCompat; import android.support.v4.media.session.PlaybackStateCompat; import android.text.TextUtils; import android.view.Surface; import com.google.android.exoplayer2.ExoPlaybackException; import com.google.android.exoplayer2.ExoPlayer; import com.google.android.exoplayer2.ExoPlayerFactory; import com.google.android.exoplayer2.Format; import com.google.android.exoplayer2.PlaybackParameters; import com.google.android.exoplayer2.SimpleExoPlayer; import com.google.android.exoplayer2.Timeline; import com.google.android.exoplayer2.extractor.DefaultExtractorsFactory; import com.google.android.exoplayer2.source.AdaptiveMediaSourceEventListener; import com.google.android.exoplayer2.source.ExtractorMediaSource; import com.google.android.exoplayer2.source.MediaSource; import com.google.android.exoplayer2.source.TrackGroupArray; import com.google.android.exoplayer2.source.hls.HlsMediaSource; import com.google.android.exoplayer2.trackselection.AdaptiveTrackSelection; import com.google.android.exoplayer2.trackselection.DefaultTrackSelector; import com.google.android.exoplayer2.trackselection.TrackSelectionArray; import com.google.android.exoplayer2.upstream.DataSource; import com.google.android.exoplayer2.upstream.DataSpec; import com.google.android.exoplayer2.upstream.DefaultBandwidthMeter; import com.google.android.exoplayer2.upstream.DefaultHttpDataSourceFactory; import com.google.android.exoplayer2.upstream.FileDataSourceFactory; import com.google.android.exoplayer2.upstream.HttpDataSource; import com.google.android.exoplayer2.util.Util; import java.io.IOException; import nuclei.media.MediaId; import nuclei.media.MediaMetadata; import nuclei.media.MediaProvider; import nuclei.media.MediaService; import nuclei.logs.Log; import nuclei.logs.Logs; public class ExoPlayerPlayback extends BasePlayback implements Playback, AudioManager.OnAudioFocusChangeListener, ExoPlayer.EventListener, ExtractorMediaSource.EventListener, AdaptiveMediaSourceEventListener { static final Log LOG = Logs.newLog(ExoPlayerPlayback.class); // The volume we set the media player to when we lose audio focus, but are // allowed to reduce the volume instead of stopping playback. private static final float VOLUME_DUCK = 0.2f; // The volume we set the media player when we have audio focus. private static final float VOLUME_NORMAL = 1.0f; // we don't have audio focus, and can't duck (play at a low volume) private static final int AUDIO_NO_FOCUS_NO_DUCK = 0; // we don't have focus, but can duck (play at a low volume) private static final int AUDIO_NO_FOCUS_CAN_DUCK = 1; // we have full audio focus private static final int AUDIO_FOCUSED = 2; protected static final DefaultBandwidthMeter BANDWIDTH_METER = new DefaultBandwidthMeter(); private final Handler mHandler; final MediaService mService; private final WifiManager.WifiLock mWifiLock; private int mState; private boolean mPlayOnFocusGain; private Callback mCallback; private volatile boolean mAudioNoisyReceiverRegistered; private volatile long mCurrentPosition; private volatile MediaId mCurrentMediaId; private volatile MediaMetadata mMediaMetadata; private boolean mPrepared; private boolean mRestart; private boolean mPlayWhenReady = true; // Type of audio focus we have: private int mAudioFocus = AUDIO_NO_FOCUS_NO_DUCK; private final AudioManager mAudioManager; private SimpleExoPlayer mMediaPlayer; private long mSurfaceId; private Surface mSurface; private PlaybackParameters mPlaybackParams; private final PowerManager.WakeLock mWakeLock; private final IntentFilter mAudioNoisyIntentFilter = new IntentFilter(AudioManager.ACTION_AUDIO_BECOMING_NOISY); private int mIllegalStateRetries; private final BroadcastReceiver mAudioNoisyReceiver = new BroadcastReceiver() { @Override public void onReceive(Context context, Intent intent) { if (AudioManager.ACTION_AUDIO_BECOMING_NOISY.equals(intent.getAction())) { LOG.d("Headphones disconnected."); if (isPlaying()) { Intent i = new Intent(context, MediaService.class); i.setAction(MediaService.ACTION_CMD); i.putExtra(MediaService.CMD_NAME, MediaService.CMD_PAUSE); mService.startService(i); } } } }; public ExoPlayerPlayback(MediaService service) { mService = service; mHandler = new Handler(); final Context ctx = service.getApplicationContext(); mAudioManager = (AudioManager) ctx.getSystemService(Context.AUDIO_SERVICE); PowerManager powerManager = (PowerManager) ctx.getSystemService(Context.POWER_SERVICE); WifiManager wifiManager = (WifiManager) ctx.getSystemService(Context.WIFI_SERVICE); // Create the Wifi lock (this does not acquire the lock, this just creates it) mWifiLock = wifiManager.createWifiLock(WifiManager.WIFI_MODE_FULL, "nuclei_media_wifi_lock"); mWakeLock = powerManager.newWakeLock(PowerManager.PARTIAL_WAKE_LOCK, "nuclei_media_cpu_lock"); } @Override public void start() { } @Override public void stop(boolean notifyListeners) { LOG.d("stop"); if (mMediaMetadata != null) mMediaMetadata.setTimingSeeked(false); mState = PlaybackStateCompat.STATE_STOPPED; if (notifyListeners && mCallback != null) { mCallback.onPlaybackStatusChanged(mState); } //mCurrentPosition = getCurrentStreamPosition(); // Give up Audio focus giveUpAudioFocus(); unregisterAudioNoisyReceiver(); // Relax all resources relaxResources(true); } @Override public void temporaryStop() { LOG.d("stop"); mState = PlaybackStateCompat.STATE_STOPPED; if (mCallback != null) { mCallback.onPlaybackStatusChanged(mState); } if (mMediaPlayer != null) mMediaPlayer.stop(); relaxResources(false); } @Override public void updateLastKnownStreamPosition() { mCurrentPosition = getCurrentStreamPosition(); } public void stopFully() { stop(true); } @Override public void setState(int state) { mState = state; if (state == PlaybackStateCompat.STATE_ERROR) { try { mCurrentPosition = getCurrentStreamPosition(); } catch (Exception err) { LOG.e("Error capturing current pos", err); } } } @Override public int getState() { return mState; } @Override public boolean isConnected() { return true; } @Override public boolean isPlaying() { return mPlayOnFocusGain || isStatePlaying(); } private boolean isStatePlaying() { return isMediaPlayerPlaying() || mState == PlaybackStateCompat.STATE_PLAYING || mState == PlaybackStateCompat.STATE_BUFFERING || mState == PlaybackStateCompat.STATE_CONNECTING; } @Override protected long internalGetCurrentStreamPosition() { if (mMediaPlayer != null) { return mMediaPlayer.getCurrentPosition(); } return mCurrentPosition; } @Override protected void internalPlay(MediaMetadata metadataCompat, Timing timing, boolean seek) { mPlayOnFocusGain = true; tryToGetAudioFocus(); registerAudioNoisyReceiver(); boolean mediaHasChanged = mCurrentMediaId == null || !TextUtils.equals(metadataCompat.getDescription().getMediaId(), mCurrentMediaId.toString()); if (mediaHasChanged || mRestart) { mRestart = false; mCurrentPosition = getStartStreamPosition(); mMediaMetadata = metadataCompat; mMediaMetadata.setCallback(mCallback); mCurrentMediaId = MediaProvider.getInstance().getMediaId(metadataCompat.getDescription().getMediaId()); } mPlayWhenReady = true; if (mState == PlaybackStateCompat.STATE_PAUSED && !mediaHasChanged && mMediaPlayer != null) { if (!mWakeLock.isHeld()) mWakeLock.acquire(); if (!mWifiLock.isHeld()) mWifiLock.acquire(); configMediaPlayerState(false, true); } else { mState = mMediaPlayer != null ? mState == PlaybackStateCompat.STATE_STOPPED ? PlaybackStateCompat.STATE_STOPPED : PlaybackStateCompat.STATE_PAUSED : PlaybackStateCompat.STATE_STOPPED; relaxResources(false); // release everything except MediaPlayer setTrack(metadataCompat); } if (timing != null && seek) internalSeekTo(timing.start); } @Override protected void internalPrepare(MediaMetadata metadataCompat, Timing timing) { boolean mediaHasChanged = mCurrentMediaId == null || !TextUtils.equals(metadataCompat.getDescription().getMediaId(), mCurrentMediaId.toString()); if (mediaHasChanged) { stop(true); mCurrentPosition = getStartStreamPosition(); mMediaMetadata = metadataCompat; mMediaMetadata.setCallback(mCallback); mCurrentMediaId = MediaProvider.getInstance().getMediaId(metadataCompat.getDescription().getMediaId()); if (mCallback != null) mCallback.onMetadataChanged(mMediaMetadata); mPlayWhenReady = false; setTrack(metadataCompat); if (timing != null) internalSeekTo(timing.start); } } private void setTrack(MediaMetadata track) { track.setTimingSeeked(false); @SuppressWarnings("ResourceType") String source = track.getString(MediaProvider.CUSTOM_METADATA_TRACK_SOURCE); @SuppressWarnings("ResourceType") int type = (int) track.getLong(MediaProvider.CUSTOM_METADATA_TRACK_TYPE); if (LOG.isLoggable(Log.INFO)) LOG.i("setTrack=" + source + ", type=" + type); createMediaPlayer(source, type); if (mPlayWhenReady) mState = PlaybackStateCompat.STATE_BUFFERING; if (mCallback != null) mCallback.onPlaybackStatusChanged(mState); } private boolean isMediaPlayerPlaying() { if (mMediaPlayer == null || !mMediaPlayer.getPlayWhenReady()) return false; int state = mMediaPlayer.getPlaybackState(); return state == ExoPlayer.STATE_READY || state == ExoPlayer.STATE_BUFFERING; } @Override public void pause() { LOG.d("pause"); mPlayWhenReady = false; if (isPlaying()) { // Pause media player and cancel the 'foreground service' state. if (isMediaPlayerPlaying()) { mCurrentPosition = getCurrentStreamPosition(); mMediaPlayer.setPlayWhenReady(false); } } // while paused, retain the MediaPlayer but give up audio focus relaxResources(false); giveUpAudioFocus(); mState = PlaybackStateCompat.STATE_PAUSED; if (mCallback != null) { mCallback.onPlaybackStatusChanged(mState); } unregisterAudioNoisyReceiver(); } @Override protected long internalGetDuration() { return mMediaPlayer == null ? -1 : mMediaPlayer.getDuration(); } @Override protected void internalSeekTo(long position) { if (LOG.isLoggable(Log.INFO)) LOG.d("internalSeekTo"); mCurrentPosition = position; if (mMediaPlayer == null) { if (mCallback != null) { mCallback.onPlaybackStatusChanged(mState); } } else { if (isPlaying()) { mState = PlaybackStateCompat.STATE_BUFFERING; } mMediaPlayer.seekTo(position); if (mCallback != null) { mCallback.onPlaybackStatusChanged(mState); } } } @Override public void setCallback(Callback callback) { this.mCallback = callback; } @Override public void setCurrentStreamPosition(long pos) { this.mCurrentPosition = pos; } @Override protected void internalSetCurrentMediaMetadata(MediaId mediaId, MediaMetadata metadata) { mCurrentMediaId = mediaId; mMediaMetadata = metadata; } @Override public MediaId getCurrentMediaId() { return mCurrentMediaId; } @Override public MediaMetadata getCurrentMetadata() { return mMediaMetadata; } /** * Try to get the system audio focus. */ private void tryToGetAudioFocus() { LOG.d("tryToGetAudioFocus"); if (mAudioFocus != AUDIO_FOCUSED) { int result = mAudioManager.requestAudioFocus(this, AudioManager.STREAM_MUSIC, AudioManager.AUDIOFOCUS_GAIN); if (result == AudioManager.AUDIOFOCUS_REQUEST_GRANTED) { mAudioFocus = AUDIO_FOCUSED; } } } /** * Give up the audio focus. */ private void giveUpAudioFocus() { LOG.d("giveUpAudioFocus"); if (mAudioFocus == AUDIO_FOCUSED) { if (mAudioManager.abandonAudioFocus(this) == AudioManager.AUDIOFOCUS_REQUEST_GRANTED) { mAudioFocus = AUDIO_NO_FOCUS_NO_DUCK; } } } /** * Reconfigures MediaPlayer according to audio focus settings and * starts/restarts it. This method starts/restarts the MediaPlayer * respecting the current audio focus state. So if we have focus, it will * play normally; if we don't have focus, it will either leave the * MediaPlayer paused or set it to a low volume, depending on what is * allowed by the current focus settings. This method assumes mPlayer != * null, so if you are calling it, you have to do so from a context where * you are sure this is the case. */ private void configMediaPlayerState(boolean updateMetaData, boolean forcePlay) { if (LOG.isLoggable(Log.DEBUG)) LOG.d("configMediaPlayerState. mAudioFocus=" + mAudioFocus); if (mAudioFocus == AUDIO_NO_FOCUS_NO_DUCK) { // If we don't have audio focus and can't duck, we have to pause, if (isPlaying()) { pause(); } } else { // we have audio focus: if (mAudioFocus == AUDIO_NO_FOCUS_CAN_DUCK) { if (mMediaPlayer != null) { mMediaPlayer.setVolume(VOLUME_DUCK); // we'll be relatively quiet } } else { if (mMediaPlayer != null) { mMediaPlayer.setVolume(VOLUME_NORMAL); // we can be loud again } // else do something for remote client. } // If we were playing when we lost focus, we need to resume playing. if (mPlayOnFocusGain && mPlayWhenReady) { if (!isMediaPlayerPlaying()) { if (LOG.isLoggable(Log.INFO)) LOG.d("configMediaPlayerState startMediaPlayer. seeking to " + mCurrentPosition); if (mState == PlaybackStateCompat.STATE_PAUSED || mState == PlaybackStateCompat.STATE_STOPPED) { if (forcePlay || mCurrentPosition != mMediaPlayer.getCurrentPosition()) { if (!mWakeLock.isHeld()) mWakeLock.acquire(); if (!mWifiLock.isHeld()) mWifiLock.acquire(); mState = PlaybackStateCompat.STATE_BUFFERING; mMediaPlayer.seekTo(mCurrentPosition); mMediaPlayer.setPlayWhenReady(true); } else mState = PlaybackStateCompat.STATE_PLAYING; } else { mMediaPlayer.seekTo(mCurrentPosition); mState = PlaybackStateCompat.STATE_BUFFERING; } } mPlayOnFocusGain = false; } } if (mCallback != null) { if (updateMetaData) mCallback.onMetadataChanged(mMediaMetadata); mCallback.onPlaybackStatusChanged(mState); } } @Override public void onPlaybackParametersChanged(PlaybackParameters playbackParameters) { } /** * Called by AudioManager on audio focus changes. * Implementation of {@link AudioManager.OnAudioFocusChangeListener} */ @Override public void onAudioFocusChange(int focusChange) { if (LOG.isLoggable(Log.INFO)) LOG.d("onAudioFocusChange. focusChange=" + focusChange); if (focusChange == AudioManager.AUDIOFOCUS_GAIN) { // We have gained focus: mAudioFocus = AUDIO_FOCUSED; } else if (focusChange == AudioManager.AUDIOFOCUS_LOSS || focusChange == AudioManager.AUDIOFOCUS_LOSS_TRANSIENT || focusChange == AudioManager.AUDIOFOCUS_LOSS_TRANSIENT_CAN_DUCK) { // We have lost focus. If we can duck (low playback volume), we can keep playing. // Otherwise, we need to pause the playback. boolean canDuck = focusChange == AudioManager.AUDIOFOCUS_LOSS_TRANSIENT_CAN_DUCK; mAudioFocus = canDuck ? AUDIO_NO_FOCUS_CAN_DUCK : AUDIO_NO_FOCUS_NO_DUCK; // If we are playing, we need to reset media player by calling configMediaPlayerState // with mAudioFocus properly set. if (mState == PlaybackStateCompat.STATE_PLAYING && !canDuck) { // If we don't have audio focus and can't duck, we save the information that // we were playing, so that we can resume playback once we get the focus back. mPlayOnFocusGain = true; } } else { LOG.e("onAudioFocusChange: Ignoring unsupported focusChange: " + focusChange); } configMediaPlayerState(false, false); } /** * Returns a new DataSource factory. * * @param useBandwidthMeter Whether to set {@link #BANDWIDTH_METER} as a listener to the new * DataSource factory. * @return A new DataSource factory. */ protected DataSource.Factory buildDataSourceFactory(Context context, boolean useBandwidthMeter, boolean http) { return http ? buildHttpDataSourceFactory(context, useBandwidthMeter) : buildFileDataSourceFactory(useBandwidthMeter); } protected HttpDataSource.Factory buildHttpDataSourceFactory(Context context, boolean useBandwidthMeter) { final String userAgent = Util.getUserAgent(context, "NucleiPlayer"); return new DefaultHttpDataSourceFactory(userAgent, useBandwidthMeter ? BANDWIDTH_METER : null, 15000, 15000, false); } protected DataSource.Factory buildFileDataSourceFactory(boolean useBandwidthMeter) { return new FileDataSourceFactory(useBandwidthMeter ? BANDWIDTH_METER : null); } protected SimpleExoPlayer newMediaPlayer(Context context, String url, int type) { return ExoPlayerFactory.newSimpleInstance(context, new DefaultTrackSelector(new AdaptiveTrackSelection.Factory(BANDWIDTH_METER))); } protected MediaSource newMediaSource(Context context, String url, int type, Handler handler) { boolean hls = false; boolean localFile = url.startsWith("file://"); if (!localFile) { try { hls = type == MediaId.TYPE_VIDEO || Uri.parse(url).getPath().endsWith(".m3u8"); } catch (Exception ignore) { } } // expecting MP3 here ... otherwise HLS if ((localFile || type == MediaId.TYPE_AUDIO) && !hls) { return new ExtractorMediaSource(Uri.parse(url), buildDataSourceFactory(context, true, !localFile), new DefaultExtractorsFactory(), handler, this); } else { return new HlsMediaSource(Uri.parse(url), buildDataSourceFactory(context, true, true), handler, this); } } /** * Makes sure the media player exists and has been reset. This will create * the media player if needed, or reset the existing media player if one * already exists. */ private void createMediaPlayer(String url, int type) { if (mMediaPlayer != null) { mMediaPlayer.release(); mMediaPlayer.removeListener(this); } mPrepared = false; mMediaPlayer = newMediaPlayer(mService.getApplicationContext(), url, type); mMediaPlayer.addListener(this); MediaSource mediaSource = newMediaSource(mService.getApplicationContext(), url, type, mHandler); mMediaPlayer.prepare(mediaSource); // Make sure the media player will acquire a wake-lock while // playing. If we don't do that, the CPU might go to sleep while the // song is playing, causing playback to stop. if (!mWakeLock.isHeld()) mWakeLock.acquire(); if (!mWifiLock.isHeld()) mWifiLock.acquire(); } @Override public void onLoadingChanged(boolean isLoading) { } @Override public void onTracksChanged(TrackGroupArray trackGroups, TrackSelectionArray trackSelections) { } @Override public void onPlayerStateChanged(boolean playWhenReady, int playbackState) { if (LOG.isLoggable(Log.DEBUG)) LOG.d("onStateChanged=" + playbackState + ", " + playWhenReady); if (!mPrepared && playbackState == ExoPlayer.STATE_READY && mMediaPlayer != null) { mPrepared = true; if (!mWakeLock.isHeld()) mWakeLock.acquire(); if (!mWifiLock.isHeld()) mWifiLock.acquire(); configMediaPlayerState(true, false); setSurface(mSurfaceId, mSurface); mMediaPlayer.seekTo(mCurrentPosition); mMediaPlayer.setPlayWhenReady(mPlayWhenReady); } else if (mMediaPlayer != null && mState != PlaybackStateCompat.STATE_ERROR && mState != PlaybackStateCompat.STATE_BUFFERING) mCurrentPosition = mMediaPlayer.getCurrentPosition(); if (mMediaPlayer != null && mMediaMetadata != null) { final long duration = getDuration(); if (mMediaMetadata.getLong(MediaMetadataCompat.METADATA_KEY_DURATION) != duration) mMediaMetadata.setDuration(duration); } switch (playbackState) { case ExoPlayer.STATE_BUFFERING: mState = PlaybackStateCompat.STATE_BUFFERING; mIllegalStateRetries = 0; break; case ExoPlayer.STATE_ENDED: mState = PlaybackStateCompat.STATE_NONE; mIllegalStateRetries = 0; break; case ExoPlayer.STATE_IDLE: if (mState != PlaybackStateCompat.STATE_ERROR) mState = PlaybackStateCompat.STATE_NONE; break; case ExoPlayer.STATE_READY: mIllegalStateRetries = 0; if (isMediaPlayerPlaying()) { mState = PlaybackStateCompat.STATE_PLAYING; if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) { if (mPlaybackParams != null) mMediaPlayer.setPlaybackParameters(mPlaybackParams); } } else mState = PlaybackStateCompat.STATE_PAUSED; break; default: mState = PlaybackStateCompat.STATE_NONE; break; } if (mCallback != null) mCallback.onPlaybackStatusChanged(mState); if (playbackState == ExoPlayer.STATE_ENDED) { mRestart = true; if (mCallback != null) mCallback.onCompletion(); } } @Override public void onTimelineChanged(Timeline timeline, Object manifest) { } @Override public void onPlayerError(ExoPlaybackException error) { onError(error); } @Override public void onPositionDiscontinuity() { } @Override public void onLoadStarted(DataSpec dataSpec, int dataType, int trackType, Format trackFormat, int trackSelectionReason, Object trackSelectionData, long mediaStartTimeMs, long mediaEndTimeMs, long elapsedRealtimeMs) { } @Override public void onLoadCompleted(DataSpec dataSpec, int dataType, int trackType, Format trackFormat, int trackSelectionReason, Object trackSelectionData, long mediaStartTimeMs, long mediaEndTimeMs, long elapsedRealtimeMs, long loadDurationMs, long bytesLoaded) { } @Override public void onLoadCanceled(DataSpec dataSpec, int dataType, int trackType, Format trackFormat, int trackSelectionReason, Object trackSelectionData, long mediaStartTimeMs, long mediaEndTimeMs, long elapsedRealtimeMs, long loadDurationMs, long bytesLoaded) { } @Override public void onLoadError(DataSpec dataSpec, int dataType, int trackType, Format trackFormat, int trackSelectionReason, Object trackSelectionData, long mediaStartTimeMs, long mediaEndTimeMs, long elapsedRealtimeMs, long loadDurationMs, long bytesLoaded, IOException error, boolean wasCanceled) { onError(error); } @Override public void onUpstreamDiscarded(int trackType, long mediaStartTimeMs, long mediaEndTimeMs) { } @Override public void onDownstreamFormatChanged(int trackType, Format trackFormat, int trackSelectionReason, Object trackSelectionData, long mediaTimeMs) { } @Override public void onLoadError(IOException error) { onError(error); } private void onError(Exception e) { LOG.e("onError", e); if (e instanceof ExoPlaybackException && e.getCause() instanceof IllegalStateException) { final int maxRetries = 4; if (mIllegalStateRetries < maxRetries) { mIllegalStateRetries++; pause(); relaxResources(true); if (mMediaMetadata != null) { play(mMediaMetadata); return; } } } Throwable err = e; do { if (err instanceof IOException) { if (mCallback != null) { mCallback.onError(e, false); long pos = getCurrentStreamPosition(); stop(true); mCurrentPosition = pos; } return; } err = err.getCause(); } while (err != null); if (mCallback != null) { stop(true); mCallback.onError(e, false); } } /** * Releases resources used by the service for playback. This includes the * "foreground service" status, the wake locks and possibly the MediaPlayer. * * @param releaseMediaPlayer Indicates whether the Media Player should also * be released or not */ private void relaxResources(boolean releaseMediaPlayer) { if (LOG.isLoggable(Log.DEBUG)) LOG.d("relaxResources. releaseMediaPlayer=" + releaseMediaPlayer); mService.stopForeground(true); // stop and release the Media Player, if it's available if (releaseMediaPlayer && mMediaPlayer != null) { mSurface = null; mMediaPlayer.setVideoSurface(null); mMediaPlayer.release(); mMediaPlayer.removeListener(this); mMediaPlayer = null; mPrepared = false; } if (mWifiLock.isHeld()) mWifiLock.release(); if (mWakeLock.isHeld()) mWakeLock.release(); } private void registerAudioNoisyReceiver() { if (!mAudioNoisyReceiverRegistered) { mService.registerReceiver(mAudioNoisyReceiver, mAudioNoisyIntentFilter); mAudioNoisyReceiverRegistered = true; } } private void unregisterAudioNoisyReceiver() { if (mAudioNoisyReceiverRegistered) { mService.unregisterReceiver(mAudioNoisyReceiver); mAudioNoisyReceiverRegistered = false; } } @Override public long getSurfaceId() { return mSurfaceId; } @Override public Surface getSurface() { return mSurface; } @Override public void setSurface(final long surfaceId, final Surface surface) { if (surface == null && mSurfaceId != surfaceId) return; mSurfaceId = surfaceId; mSurface = surface; if (mMediaPlayer != null) mMediaPlayer.setVideoSurface(surface); } @Override public void setPlaybackParams(PlaybackParameters playbackParams) { mPlaybackParams = playbackParams; if (mMediaPlayer != null) mMediaPlayer.setPlaybackParameters(playbackParams); if (mCallback != null) mCallback.onPlaybackStatusChanged(mState); } }