content
stringlengths
7
2.61M
package com.jay.car.impl; import com.jay.car.Car; import com.jay.car.enums.ActionEnum; import com.jay.car.enums.OrientationEnum; import org.apache.commons.lang3.StringUtils; /** * Driverless Car */ public class DriverLessCar implements Car { private int endPoint; private int startingPoint; // Abscissa private int positionX; // Ordinate private int positionY; // North by default private OrientationEnum orientation; public DriverLessCar() { this.endPoint = 4; this.startingPoint = 1; this.positionX = startingPoint; this.positionY = startingPoint; this.orientation = OrientationEnum.NORTH; } public DriverLessCar(OrientationEnum orientation, int positionX, int positionY) { this(4, 1, positionX, positionY, orientation); } public DriverLessCar(int endPoint, int startingPoint, int positionX, int positionY, OrientationEnum orientation) { this.endPoint = endPoint; this.startingPoint = startingPoint; this.positionX = positionX; this.positionY = positionY; this.orientation = orientation; } @Override public void move(String command) { try { if (StringUtils.isBlank(command)) { throw new NullPointerException(); } if (command.equalsIgnoreCase(ActionEnum.RUN.getCommand())) { // run moveByRun(); } else if (command.equalsIgnoreCase(ActionEnum.CLOCKWISE_ROTATION.getCommand())) { // clockwise rotation moveByClockwiseRotation(); } else if (command.equalsIgnoreCase(ActionEnum.COUNTER_CLOCKWISE_ROTATION.getCommand())) { // counter clockwise rotation moveByCounterClockwiseRotation(); } else { throw new IllegalAccessError("This command is error!"); } } finally { checkOutOfBoundary(); } } protected void moveByRun() { switch (orientation) { case EAST: moveByPositionX(1); break; case WEST: moveByPositionX(-1); break; case NORTH: moveByPositionY(1); break; case SOUTH: moveByPositionY(-1); break; } } protected void moveByPositionX(int posX) { this.positionX = this.positionX + posX; } protected void moveByPositionY(int posY) { this.positionY = this.positionY + posY; } protected void moveByClockwiseRotation() { switch (orientation) { case EAST: orientation = OrientationEnum.SOUTH; break; case SOUTH: orientation = OrientationEnum.WEST; break; case WEST: orientation = OrientationEnum.NORTH; break; case NORTH: orientation = OrientationEnum.EAST; break; } } protected void moveByCounterClockwiseRotation() { switch (orientation) { case EAST: orientation = OrientationEnum.NORTH; break; case SOUTH: orientation = OrientationEnum.EAST; break; case WEST: orientation = OrientationEnum.SOUTH; break; case NORTH: orientation = OrientationEnum.WEST; break; } } /** * Check if it's outside the parking lot boundary, If so, an exception is thrown */ protected void checkOutOfBoundary() { if (this.positionX < startingPoint || this.positionY < startingPoint || this.positionX > endPoint || this.positionY > endPoint) throw new IllegalAccessError("You can't leave the parking lot"); } @Override public int getPositionX() { return positionX; } @Override public int getPositionY() { return positionY; } @Override public String getOrientation() { return orientation.getName(); } @Override public String toString() { return String.format("At present, it is located at x = %d and y = %d, and the direction is %s", positionX, positionY, orientation.getName()); } }
/** * Test of getWind_dir method, of class Path. */ @Test public void testGetWind_dir() { System.out.println("getWind_dir"); double expResult = 20; double result = path.getWind_dir(); assertEquals(expResult, result,0.0); }
<reponame>Carcophan/Jabit<filename>demo/src/test/java/ch/dissem/bitmessage/TestNodeRegistry.java /* * Copyright 2015 <NAME> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ch.dissem.bitmessage; import ch.dissem.bitmessage.entity.valueobject.NetworkAddress; import ch.dissem.bitmessage.ports.NodeRegistry; import java.util.LinkedList; import java.util.List; /** * Empty {@link NodeRegistry} that doesn't do anything, but shouldn't break things either. */ class TestNodeRegistry implements NodeRegistry { private List<NetworkAddress> nodes = new LinkedList<>(); public TestNodeRegistry(int... ports) { for (int port : ports) { nodes.add( new NetworkAddress.Builder() .ipv4(127, 0, 0, 1) .port(port) .build() ); } } @Override public List<NetworkAddress> getKnownAddresses(int limit, long... streams) { return nodes; } @Override public void offerAddresses(List<NetworkAddress> addresses) { // Ignore } }
/* * * * * Less painful Android development with Scala * * * http://scaloid.org * * * * * * * Copyright 2013 <NAME> and Scaloid team * * Sung-Ho Lee and Scaloid team licenses this file to you under the Apache License, * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ package org.scaloid; import android.provider.ContactsContract; /** * ## Static fields on protected interfaces * <p/> * Android API has some protected interfaces which has static fields, and inherited it in public classes. * For example `android.provider.ContactsContract.Contacts` inherits a protected interface * `android.provider.ContactsContract.ContactsColumns`, which defines a static field `ContactsColumns.DISPLAY_NAME`. * In Java code, you can access it with `Contacts.DISPLAY_NAME`. However, Scala does not support accessing in this way * (please refer [this](https://issues.scala-lang.org/browse/SI-1806) and [this](http://www.scala-lang.org/faq/4)). * It is a bad news for Android-Scala programmer. So we provide a workaround implementation for this problem. Just * copy-and-paste `Workaround.java` and declare `import org.scaloid.Workarounds._`. * Then you can use the interfaces publicly which is originally defined as protected. */ public interface Workarounds { interface BaseColumns { String _ID = ContactsContract.RawContacts._ID; String _COUNT = ContactsContract.RawContacts._COUNT; } interface RawContactsColumns { String CONTACT_ID = ContactsContract.RawContacts.CONTACT_ID; String AGGREGATION_MODE = ContactsContract.RawContacts.AGGREGATION_MODE; String DELETED = ContactsContract.RawContacts.DELETED; } interface ContactsColumns { String DISPLAY_NAME = ContactsContract.Contacts.DISPLAY_NAME; String PHOTO_ID = ContactsContract.Contacts.PHOTO_ID; String IN_VISIBLE_GROUP = ContactsContract.Contacts.IN_VISIBLE_GROUP; String HAS_PHONE_NUMBER = ContactsContract.Contacts.HAS_PHONE_NUMBER; String LOOKUP_KEY = ContactsContract.Contacts.LOOKUP_KEY; } interface ContactStatusColumns { String CONTACT_PRESENCE = ContactsContract.Contacts.CONTACT_PRESENCE; // Not available in API Level 10 // String CONTACT_CHAT_CAPABILITY = ContactsContract.Contacts.CONTACT_CHAT_CAPABILITY; String CONTACT_STATUS = ContactsContract.Contacts.CONTACT_STATUS; String CONTACT_STATUS_TIMESTAMP = ContactsContract.Contacts.CONTACT_STATUS_TIMESTAMP; String CONTACT_STATUS_RES_PACKAGE = ContactsContract.Contacts.CONTACT_STATUS_RES_PACKAGE; String CONTACT_STATUS_LABEL = ContactsContract.Contacts.CONTACT_STATUS_LABEL; String CONTACT_STATUS_ICON = ContactsContract.Contacts.CONTACT_STATUS_ICON; } interface ContactOptionsColumns { String TIMES_CONTACTED = ContactsContract.RawContacts.TIMES_CONTACTED; String LAST_TIME_CONTACTED = ContactsContract.RawContacts.LAST_TIME_CONTACTED; String STARRED = ContactsContract.RawContacts.STARRED; String CUSTOM_RINGTONE = ContactsContract.RawContacts.CUSTOM_RINGTONE; String SEND_TO_VOICEMAIL = ContactsContract.RawContacts.SEND_TO_VOICEMAIL; } interface ContactNameColumns { // Not available in API Level 10 // String DISPLAY_NAME_SOURCE = ContactsContract.RawContacts.DISPLAY_NAME_SOURCE; // String DISPLAY_NAME_PRIMARY = ContactsContract.RawContacts.DISPLAY_NAME_PRIMARY; // String DISPLAY_NAME_ALTERNATIVE = ContactsContract.RawContacts.DISPLAY_NAME_ALTERNATIVE; // String PHONETIC_NAME_STYLE = ContactsContract.RawContacts.PHONETIC_NAME_STYLE; // String PHONETIC_NAME = ContactsContract.RawContacts.PHONETIC_NAME; // String SORT_KEY_PRIMARY = ContactsContract.RawContacts.SORT_KEY_PRIMARY; // String SORT_KEY_ALTERNATIVE = ContactsContract.RawContacts.SORT_KEY_ALTERNATIVE; } interface BaseSyncColumns { String SYNC1 = ContactsContract.RawContacts.SYNC1; String SYNC2 = ContactsContract.RawContacts.SYNC2; String SYNC3 = ContactsContract.RawContacts.SYNC3; String SYNC4 = ContactsContract.RawContacts.SYNC4; } interface SyncColumns extends BaseSyncColumns { String ACCOUNT_NAME = ContactsContract.RawContacts.ACCOUNT_NAME; String ACCOUNT_TYPE = ContactsContract.RawContacts.ACCOUNT_TYPE; String SOURCE_ID = ContactsContract.RawContacts.SOURCE_ID; String VERSION = ContactsContract.RawContacts.VERSION; String DIRTY = ContactsContract.RawContacts.DIRTY; } interface DataColumns { String MIMETYPE = ContactsContract.Data.MIMETYPE; String RAW_CONTACT_ID = ContactsContract.Data.RAW_CONTACT_ID; String IS_PRIMARY = ContactsContract.Data.IS_PRIMARY; String IS_SUPER_PRIMARY = ContactsContract.Data.IS_SUPER_PRIMARY; String DATA_VERSION = ContactsContract.Data.DATA_VERSION; String DATA1 = ContactsContract.Data.DATA1; String DATA2 = ContactsContract.Data.DATA2; String DATA3 = ContactsContract.Data.DATA3; String DATA4 = ContactsContract.Data.DATA4; String DATA5 = ContactsContract.Data.DATA5; String DATA6 = ContactsContract.Data.DATA6; String DATA7 = ContactsContract.Data.DATA7; String DATA8 = ContactsContract.Data.DATA8; String DATA9 = ContactsContract.Data.DATA9; String DATA10 = ContactsContract.Data.DATA10; String DATA11 = ContactsContract.Data.DATA11; String DATA12 = ContactsContract.Data.DATA12; String DATA13 = ContactsContract.Data.DATA13; String DATA14 = ContactsContract.Data.DATA14; String DATA15 = ContactsContract.Data.DATA15; } }
<gh_stars>0 package org.suifeng.baseframework.api.service; import org.suifeng.baseframework.model.dto.AccessLogDTO; /** * 系统日志 Service 接口 * @author luoxc * @since 1.0.0 */ public interface SystemLogService { void addAccessLog(AccessLogDTO accessLogDTO); }
The Importance of Genetic Counseling, DNA Diagnostics, and Cardiologic Family Screening in Left Ventricular Noncompaction Cardiomyopathy BackgroundLeft ventricular (LV) noncompaction (LVNC) is a distinct cardiomyopathy featuring a thickened bilayered LV wall consisting of a thick endocardial layer with prominent intertrabecular recesses with a thin, compact epicardial layer. Similar to hypertrophic and dilated cardiomyopathy, LVNC is genetically heterogeneous and was recently associated with mutations in sarcomere genes. To contribute to the genetic classification for LVNC, a systematic cardiological family study was performed in a cohort of 58 consecutively diagnosed and molecularly screened patients with isolated LVNC (49 adults and 9 children). Methods and ResultsCombined molecular testing and cardiological family screening revealed that 67% of LVNC is genetic. Cardiological screening with electrocardiography and echocardiography of 194 relatives from 50 unrelated LVNC probands revealed familial cardiomyopathy in 32 families (64%), including LVNC, hypertrophic cardiomyopathy, and dilated cardiomyopathy. Sixty-three percent of the relatives newly diagnosed with cardiomyopathy were asymptomatic. Of 17 asymptomatic relatives with a mutation, 9 had noncompaction cardiomyopathy. In 8 carriers, nonpenetrance was observed. This may explain that 44% (14 of 32) of familial disease remained undetected by ascertainment of family history before cardiological family screening. The molecular screening of 17 genes identified mutations in 11 genes in 41% (23 of 56) tested probands, 35% (17 of 48) adults and 6 of 8 children. In 18 families, single mutations were transmitted in an autosomal dominant mode. Two adults and 2 children were compound or double heterozygous for 2 different mutations. One adult proband had 3 mutations. In 50% (16 of 32) of familial LVNC, the genetic defect remained inconclusive. ConclusionLVNC is predominantly a genetic cardiomyopathy with variable presentation ranging from asymptomatic to severe. Accordingly, the diagnosis of LVNC requires genetic counseling, DNA diagnostics, and cardiological family screening.
Analyzing Students Learning Progressions Throughout a Teaching Sequence on Acoustic Properties of Materials with a Model-Based Inquiry Approach The study we have carried out aims to characterize 15- to 16-year-old students learning progressions throughout the implementation of a teachinglearning sequence on the acoustic properties of materials. Our purpose is to better understand students modeling processes about this topic and to identify how the instructional design and actual enactment influences students learning progressions. This article presents the design principles which elicit the structure and types of modeling and inquiry activities designed to promote students development of three conceptual models. Some of these activities are enhanced by the use of ICT such as sound level meters connected to data capture systems, which facilitate the measurement of the intensity level of sound emitted by a sound source and transmitted through different materials. Framing this study within the design-based research paradigm, it consists of the experimentation of the designed teaching sequence with two groups of students (n = 29) in their science classes. The analysis of students written productions together with classroom observations of the implementation of the teaching sequence allowed characterizing students development of the conceptual models. Moreover, we could evidence the influence of different modeling and inquiry activities on students development of the conceptual models, identifying those that have a major impact on students modeling processes. Having evidenced different levels of development of each conceptual model, our results have been interpreted in terms of the attributes of each conceptual model, the distance between students preliminary mental models and the intended conceptual models, and the instructional design and enactment. Introduction This study aims to characterize 15-to 16-year-old secondary school students' learning progressions on the acoustic properties of materials (APM) throughout the implementation of an innovative teaching-learning sequence (TLS) ). Our purpose is to better understand the students' modeling processes when learning about this topic and to identify salient modeling and inquiry activities that influence students' learning progressions. The teaching sequence is largely aimed at promoting students' development of three conceptual models that explain sound attenuation in materials in terms of energy distribution (CM1); the acoustic behavior of materials in terms of their physical properties (CM2); and the acoustic behavior of materials in terms of their internal structure (CM3). We are particularly interested in understanding the process of students' construction and use of the aforementioned conceptual models as a result of their engagement in the activities that are part of a designed TLS on APM. From the perspective of the design-based research paradigm (DBR), we analyze students' learning progressions throughout the designed TLS in order to provide research-based insights for teaching the topic of APM. Theoretical Framework On Models and Modeling in Science and Science Education With the purpose of using a working definition of the term model throughout this article, we explain here our understanding of different terms such as scientific model, conceptual model, and mental model. As defined by Bunge, a scientific model is a representation of a real or conjectured system, consisting of a set of objects with its outstanding properties listed, and a set of law statements that declare the behaviors of these objects. The essential functions of a scientific model, as agreed by many authors (Oh and Oh 2011), are descriptions, explanations, and predictions. Two types of scientific models can be distinguished from the above definition: theoretical models and empirical models. A theoretical model is seen as a scientific model which defines idealized objects (Giere 1999), whereas an empirical model (or 'model of data') is seen as a scientific model which describes patterns or regularities inferred from observable behaviors of real-world entities or systems (Koponen 2007). By conceptual model, we refer to a representation of physical objects, phenomena, or processes which is not contradictory to scientifically accepted knowledge and is shared by a given community (researchers, teachers, etc.) (Greca and Moreira 2000). In science education, a conceptual model is seen as a scientific model that has been didactically transposed to facilitate the understanding of a specific group of students (;). Norman emphasizes the distinction between conceptual models and mental models, understanding the latter as incomplete and unstable representations that correspond to 'what people really have in their heads and what guide their use of things ' (p. 12). Literature has shown that students' mental models can be identical to, similar to, or quite different from the conceptual models that are intended to be taught in science classes. Currently, many authors in philosophy and history of science and cognitive studies of science consider that the model-based views of scientific knowledge construction and of scientific reasoning are valid for depicting the practice of science (e.g., Giere 1999;Nersessian 1995Nersessian, 1999. These model-based views state that the development of scientific knowledge consists of the progressive or cyclical construction, evaluation, and revision of models. Given this model-based philosophical stance toward scientific knowledge development, science education researchers have highlighted the need to promote modelbased pedagogical approaches to teaching and learning science in schools (e.g., Clement 2000; Gilbert and Boulter 1998;Gobert and Buckley 2000;Izquierdo-Aymerich and Adriz-Bravo 2003;Tiberghien 1994). Thus, these pedagogical approaches are grounded on the idea that the particular practices which are integral to the core work of science, and which can consequently offer an authentic scientific experience to learners, are organized around the development and use of conceptual models explaining how the natural world works. Model-Based Teaching Approach and Modeling-Based Teaching Approach The model-based teaching approach is a polysemic term in science education: some authors refer to it when talking about teaching and learning the knowledge content of conceptual models; others focus on teaching modeling processes as a scientific practice. We consider that teaching and learning science as a modeling process is essentially different from teaching and learning scientific models in the science classroom. The so-called modeling-based teaching and learning approach () is more focused on students' construction and refinement of mental models. We agree with Rea-Ramirez et al., who suggest that any model-based teaching and learning approach in the science classroom should engage students in a modeling process that allows them to reflect on and progressively improve their own mental models through recurring cycles of generation, evaluation, and modification, in order to accord with their own thinking and with the data obtained from the external world. The Two Worlds framework described by Buty et al. also recognizes that modeling processes play a central part in understanding science by relating descriptions of objects and events in the material world to the world of theories and models. According to these authors, everyday knowledge and scientific knowledge offer ideas and languages for describing objects and events of the material world, and these are linked via modeling processes. As stated by Tiberghien, 'the distinction between the world of theories/models and the world of objects/ events serves to make explicit the modeling processes that establish relationships between them' (, p. 335). Model-Based Inquiry Teaching and Learning Approach The model-based teaching approach as a modeling process can be put into practice through different modeling activities, such as the ones described in the literature by Mellar and Bliss. These authors distinguish between two types of modeling activities: exploratory modeling, in which students investigate the properties of conceptual models which are explicitly or implicitly introduced; and expressive modeling, in which students create models to express their own conceptions about particular targets (phenomena, events, mechanisms). Schwarz et al. propose another classification of modeling instructional activities: exploring phenomena that may necessitate using a model to figure it out, constructing a model, empirically or conceptually testing the model, evaluating the model, revising the model, and using the model to explain and predict. Within the literature devoted to modeling processes in science education, we can find some authors (e.g., Justi and Gilbert 2002) who include certain inquiry practices (e.g., designing and performing experiments) as part of the modeling process that students carry out in the science class. On the other hand, within the literature devoted to inquiry-based science education, we can also find references (e.g., ) to the development of conceptual models as one of the goals of learning by inquiry. For instance, we can highlight the contribution made by Lhner et al., who added one type of modeling activity to the two modeling activities described by Mellar and Bliss : inquiry modeling, in which students construct models that allow them to interpret and to predict outcomes from experimenting with phenomena. We also consider that many so-called inquiry activities, such as those highlighted by Minner et al. -generating/reflecting on scientifically oriented questions, designing experiments, collecting data, drawing conclusions, and communicating findings-might be also conceived as modeling activities since these two types of activities are often addressed to promote the development of conceptual models. According to all these authors, at the heart of inquiry, there is the careful collection of data, the observation of patterns in the data, and the generation of explanations for those patterns. They claim that the goal of teaching science is twofold: to make students understand the conceptual models that have been generated by that scientific enterprise, and to foster students' abilities to use their knowledge to engage in inquiry and to understand how scientific knowledge is generated and justified. Thus, several authors (;Khan 2007;;;) link modeling and inquiry as two related teaching approaches. This integrated teaching approach is usually called model-based inquiry (MBI). One argument for incorporating this approach in teaching science is put by Buckley, who highlights that students' mental models are not only a source of comprehension but also a source of new questions. She argues that students' evaluation of their own mental models leads them to generate new questions which sustain them in an inquiry cycle of question generation, investigation, and model revision. In line with this, Schwarz and Gwekwerere propose an instructional framework based on modeling and inquiry activities called EIMA, standing for Engage-Investigate-Model-Apply. These authors state that one of the major tasks in this instructional framework is to explore phenomena and construct and reconstruct models in the light of the results of inquiry activities. However, it is common to find inquiry activities disconnected from science content learning that involve students investigating relationships without considering the possible cause of relationships. We agree with Campbell et al. that MBI might foster a further connection between inquiry and science content or conceptual models. We are aware that, as stated by Chinn and Malhotra, modeling-based and inquiry-based teaching could also become epistemologically non-authentic teaching approaches. In particular, many scientific inquiry tasks given to students in schools do not reflect the core attributes of authentic scientific reasoning (Viennot 2010). Simple inquiry tasks may not only fail to help students learn to reason scientifically, but they may also foster a view of scientific reasoning as simple, certain, algorithmic, and focused at a surface level of observation, and science may be viewed as a process of accumulating simple facts about the world. The same could be said of some modelbased approaches, which only aim at teaching the knowledge content of scientifically accepted models but not the process of developing them. In sum, we consider that model-based teaching approaches that do not include inquiry activities such as questioning or design of experiments might result in non-authentic practices that fail in making students aware of relevant aspects of the nature of science. Similarly, some inquirybased teaching approaches lack of activities devoted to students' development of core scientific knowledge. This has led us to develop a teaching sequence on APM with a MBI approach. We understand the MBI approach as a modelingbased teaching approach which integrates inquiry and modeling school activities that, despite their simplicity, are intended to capture core components of scientific practices. How is this Theoretical Framework Applied to the Design of the Teaching-Learning Sequence on APM? Taking into account the model of educational reconstruction proposed by Duit et al., the design of the TLS on APM involved several stages such as the critical analysis of the subject matter to be taught (), and the analysis of students' preconceptions of sound attenuation and the properties of sound insulating materials (). This preliminary analysis contributed to our decisions on the order of and connections among the three conceptual models to be taught (CM1, CM2, CM3). Furthermore, we designed the TLS on APM with a MBI approach, integrating the theoretical underpinnings of the frameworks suggested by several authors such as the Two Worlds framework (), the types of modeling activities described by Schwarz et al., the types of inquiry activities described by (), and the emphasis on inquiry activities focused on understanding scientific content (;Viennot 2010). Therefore, the design principles of the TLS on APM are related to: The organization of and connections among the conceptual models to be taught: As discussed in the paper published by Hernndez et al., the analysis of students' preconceptions of sound attenuation and the properties of sound insulating materials gave us insight into how to organize the content to be taught in the TLS in order to facilitate students' development of a more coherent conceptual framework. According to the findings reported in that research study, we decided to organize the teaching of conceptual models starting from the phenomenon of sound attenuation (CM1), followed by the analysis of physical (macro-) properties of sound reflectors and sound absorbers (CM2) and finally, by the analysis of internal (or micro-) structure of these materials (CM3). The types of activities selected to configure the MBI approach of the TLS on APM: The teaching sequence integrates modeling and inquiry activities, with a focus to make students' develop, elicit, revise, and use conceptual models while at the same time, they answer to scientifically oriented questions, design experiments, collect data, discuss them, draw conclusions, and communicate findings. The structure of each sequence of tasks: We proposed a common structure in order to design each sequence of tasks intended to contribute to students' development of each conceptual model. This common structure includes the aforementioned modeling and inquiry activities to promote: 1. Students' elicitation of a preliminary mental model, 2. Students' revision of their mental models in agreement with new evidence obtained in hands-on or thought experiments, 3. Students' revision of their mental models in agreement with the scientific perspective, 4. Students' use of their revised mental models in a new task. Tables 1, 2, and 3 show how these design principles are applied throughout the TLS and how modeling and inquiry activities are integrated to contribute to students' development of the three conceptual models CM1, CM2, and CM3. Table 1 summarizes the sequence of tasks intended to contribute to students' development of the conceptual model of sound attenuation in materials (CM1), which allows them: To explain how sound attenuation is produced in materials in terms of energy distribution (energy of incident sound, energy of reflected sound, energy of transmitted sound and absorbed energy), and To predict the acoustic behavior (sound reflector or sound absorber) of materials in terms of the intensity level that is measured inside a closed space which has been covered with sound-attenuating materials. Table 2 summarizes the sequence of tasks intended to contribute to students' development of the conceptual model of the acoustic behavior of materials in terms of their physical properties (CM2), which allows them: To explain how sound-attenuating materials behave in front of sound, taking into account their (acoustic) physical properties (density, rigidity, and porosity), and To predict the acoustic behavior of materials in terms of their acoustic physical properties (i.e., sound reflectors have high density, high rigidity and no porosity, whereas sound absorbers have low density, low rigidity and porosity). Table 3 summarizes the sequence of tasks intended to contribute to students' development of the conceptual model of the acoustic behavior of materials in terms of their internal structure (CM3), which allows them: To explain the mechanisms of sound attenuation in materials using the particle model of matter in terms of more or less vibration of the particles that form each material, and To relate the acoustic behavior of materials to their physical properties and to relate these properties to the mass and arrangement of their particles and the strength of their bonds. teaching is effective and has become focused more on understanding the dynamics of such teaching and how it can be brought about.' On the other hand, we also recognize that crucial information is still missing in published research studies regarding the process that students follow throughout a TLS with a model-based pedagogical approach, as stated by Louca et al.. The same could be said about MBI as an instructional strategy to teach science since 'there remains little published research investigating the nuances and outcomes of this (pedagogical) approach when implemented in secondary science classrooms' (, p. 261). Taking into account the issues highlighted above and framing our research within the DBR, this study will devote particular attention to experimenting with the designed TLS on APM in order to explain: the dynamics of students' development of conceptual models on sound attenuation and the APM throughout the implementation of the designed teaching sequence, and the influence of the modeling and inquiry activities of the TLS on students' learning progressions. In particular, we will try to answer the following research questions: 1. How do students progress from their preliminary mental models of sound attenuation in materials and of the acoustic behavior of materials toward the intended conceptual models throughout the implementation of the TLS on APM? 2. How do the modeling and inquiry activities of the designed TLS contribute to the students' development of the intended conceptual models? The answer to these questions will help us to better understand how the designed teaching sequence works in two real classrooms. This will allow us to analyze the principles (DBR Collective 2003; ) that guided the design of the structure of the TLS on APM with a MBI approach on the basis of collected evidence. Context of Research The research presented here was carried out within the context of the implementation of the designed TLS on APM. The design and iterative development of this TLS (Hernndez and Pint, in press) was carried out during three consecutive years by three researchers in science education and six experienced secondary school teachers (one physics graduate and five chemistry graduates) from four different schools. The development of the teaching sequence was based on a strong university-school collaboration emphasizing a participatory view of curriculum design (Couso, in press) in order to promote learning on the part of all members and to avoid critical transformations (Pint 2005;) of the innovation when it was implemented. The designed TLS on APM was planned to be implemented in ordinary schools with tenth-graders (15-to Drawing conclusions from provided data a This experiment consists of using a sound level meter connected to a data capture system in order to measure the sound intensity level produced by a sound source (e.g., a buzzer) that has been placed inside a cardboard box whose walls have been covered with a certain material. The box represents the structure of a room or closed space where there is a sound source, and the material that covers the walls represents the material used to soundproof that room. This measurement is compared with the reference value, measured within the box when the box is not covered with any material. If the sound intensity level measured within the box covered with a material is higher than the reference value, then we can conclude that the material behaves as a sound reflector. If the measured value is lower than the reference value, we can conclude that the material behaves as a sound absorber. For more details about the experiment, see Hernndez et al. 16-year-old students) within the science subject 'physics and chemistry.' In the Spanish educational context, tenth grade is the last compulsory academic year for students under 16 years of age, and it is also the first grade in which the study of physics and chemistry is optional. The official physics and chemistry syllabus for the last year of compulsory secondary school, which suggests a qualitative and phenomenological study of the contents, includes the following key topics: sound waves, and the structure and properties of matter, among others. These particular topics were studied by students before the implementation of the innovative sequence on APM. Regarding the implementation of the TLS, it is important to mention that all the teachers involved in the implementation in our context were used to teaching by questioning rather than by telling and to encouraging students to work in groups. However, most of these teachers were not very familiar with the MBI pedagogical approach and were interested in developing more teaching strategies to move toward this approach. For this reason, they were involved in the design, implementation, and later refinement of the TLS and, during all this process, they were supported inside and outside the classroom by the researchers involved in the development of the TLS. After participating in the implementation and refinement of consecutive versions of the TLS during 3 years, researchers could observe that the pedagogical approach and the content of the TLS did not present a major challenge to these teachers when implementing the third (and most refined) version of the TLS. The conditions under which this third version of the TLS was implemented and the research data were collected for this study correspond to the ordinary context of these teachers' science classes. As not all the teachers involved in the design could implement this third and last versionresulting from several refinements-of the designed TLS at the same academic level, for the purposes of the research presented here, we reduced our sample to 29 secondary school students (15-16-year-olds), who belong to two different class groups. The two teachers who could implement the whole sequence at this academic level devoted a similar number of hours/days (12-15, 1 h each day, 3-4 h a week) and followed the written teaching and learning material as it was structured. Both of them tended to interact with the whole class group at the beginning of each session-to present the aims of the session-and at the end of it-to summarize together with students the conclusions of the lesson. During most part of each session, Scientifically oriented questioning a This analogy compares particles that form each medium or material with pool balls connected by means of springs. According to this analogy, density is related to the mass of the balls and rigidity is related to the elastic constant of the springs connecting the balls. According to the balland-spring model, porosity is related to the presence of air particles inside the pores of a material, which is also formed by bonded particles with a different mass both teachers tended to promote collaborative work and active discussion among their students while they approached to each group to listen to them and to ask further questions. Table 4 presents a general description of each of the schools to which these class groups belong as well as the number of students who constitute our sample. Data Collection With the purpose of analyzing students' development of conceptual models, different learning and assessment tasks were collected. These tasks are domain-specific since they allow collecting data in the domain of APM. The collected data correspond to students' written answers to several learning tasks, included in the worksheets that students completed during the implementation of the sequence, and to students' responses to several questions, included in the final assessment. All students answered each of the learning tasks in their worksheets individually although some of these tasks required collaborative work among students and/or class discussions with the teacher, and so we assume that some written answers are the result of these student-teacher and student-student interactions. Students' responses to the questions included in the final assessment were all individual. As this research study is framed within the design-based research paradigm, we planned to collect data in naturalistic settings. Therefore, the learning and assessment tasks were part of the designed teaching sequence. As described before, this TLS had been already implemented and refined two times before the third implementation took place, when the data for the present research study were collected. Therefore, the tasks and questions used to collect data had been already tested with 15-to 16-year-old students in real contexts, and in some cases, they had been consequently reviewed to avoid misinterpretations and to collect relevant data. This procedure contributed to increase the validity of the instruments of data collection. Reliability of findings and measures was promoted not only through creation of refined and validated instruments, but also through triangulation on the part of the three authors of this study, who used the same categories to analyze students' responses, discussed them, and refined them to make them more operational and clearer. Authors' agreement on the findings after analysis and discussion contributed to enhance inter-rater reliability of these findings. Finally, data coming from classroom observations on how the tasks were carried out in class also contributed to increase the reliability of findings. Although the analyzed data came from students' written answers, observations served to collect evidence on the conditions under which the implementation of the tasks took place in class, with regards to the structure of the designed teaching sequence. The data collected through observations took into account the types and content of interactions among teacher and students and among students throughout each lesson, and modifications, if any, to the structure or content of the teaching sequence introduced by a teacher during her/his interventions. Table 5 specifies the tasks embedded in the TLS that were used to collect evidence on the development of each of the three conceptual models. Data Analysis In order to answer our research questions, we decided to explore students' learning progressions throughout the designed TLS, using an interpretative qualitative approach. Tracking students' progressions all through the teachinglearning process of specific teaching-learning interventions has gained prominence in science education over the last decades as many scholars agree that learning ought to be coordinated and sequenced along learning progressions 1 (;;Niedderer and Goldberg 1995;;Scott 1992;Talanquer 2009;Viennot and Rainson 1999). These learning progressions are generally viewed by researchers as conjectural or hypothetical model pathways of learning over periods of time that need to be empirically validated in light of research on students' progress (). Before any implementation took place, each conceptual model that was intended to be developed by students throughout the designed TLS on APM was expressed in terms of a set of learning objectives (LO). Each set of LO is therefore an expression of a conceptual model in a very specific and observable format. Table 6 shows the list of LO associated with each conceptual model to be developed by the students throughout the implementation of the TLS on APM. From the collected data, we proceeded to analyze students' achievement of each learning objective at different moments of the implementation of the TLS with a twofold aim: to evaluate the degree of students' development of each conceptual model at the end of the implementation compared with their starting mental models and to evidence the intermediate models that were built by students at each stage of the implementation. Thus, we did not expect to describe the effectiveness of the implementation of the TLS but several stages of progress (, p.15), which would characterize significant intermediate steps or stepping-stones in the development of increasingly elaborate students' mental models. These stepping-stones allow students to move from 'lower anchors,' which represent the knowledge students bring with them to school, toward 'upper anchors,' which represent our expectations of what students should know and be able to do at the end of the instruction. With the purpose of characterizing students' intermediate models toward the intended conceptual models, we carried out the following analysis protocol: Coding of students' answers to each task from their worksheets and question from the exam selected for data collection (Table 5). A different number was assigned to each student and to each task or question. Interpretation of students' achievement (or not) of each learning objective from students' answers to each task/ question. As shown in Table 5, each task/question is associated with a certain phase of the implementation of the TLS and a certain type of activity (i.e., elicitation of preliminary models, revision of the mental model to be in agreement with new evidence obtained in handson or thought experiments, revision of the mental model to be in agreement with the scientific perspective, use of revised models). Analysis of the percentage of students' achievement of each learning objective associated with each intended conceptual model. Grouping of the subsets of LO that had been achieved by most students achieved at each phase of the implementation of the TLS. Description of students' most representative mental model at each stage in terms of these subsets LO acquired by most of students (Second column Table 7). Elaboration of empirically based categories developed to characterize students' mental models toward each intended conceptual model (First column Table 7). Analysis of the percentage of each student's mental model at each phase of the implementation of the TLS. Tables 7, 8, and 9 show the description of these emergent categories that characterize students' stages of development of conceptual models CM1, CM2, and CM3. These tables also include students' answers to illustrate students' mental models which are progressively more elaborate. Furthermore, the analysis of each student's development of each intended conceptual model at each stage of the implementation of the TLS allowed us to infer a bottom-up learning progression that describes the most representative students' learning progression toward the development of each conceptual model. We tracked the evolution of each student's development of each conceptual model to illustrate whether each of them experienced progression, regression, or no evolution as a result of their engagement in each activity of the TLS. Then, we analyzed the percentage of each type of students' evolution comparing their mental models before and after each activity during the implementation of the TLS. That is to say, the predominant learning progression is grounded To increase sound attenuation in the neighbors' house, the energy and the intensity of sound must decrease. To do so, the walls need to be made of good sound absorbers and insulators so that absorption and reflection of sound are higher Bold text indicates the key terms that characterize each category Description of each learning objective associated with CM2 LO2.1 To identify properties related to density (e.g., density, compactness) of materials as one of the properties that influence the acoustic behavior of materials LO2.2 To identify properties related to rigidity (e.g., hardness, elasticity) of materials as one of the properties that influence the acoustic behavior of materials LO2.3 To identify porosity as one of the properties that influence the acoustic behavior of materials LO2.4 To relate density appropriately to the acoustic behavior of materials (i.e., the more dense they are, the more sound reflect; the less dense they are, the more sound absorb) LO2.5 To relate rigidity appropriately to the acoustic behavior of materials (i.e., the more rigid they are, the more sound reflect; the less rigid they are, the more sound absorb) LO2.6 To relate porosity appropriately to the acoustic behavior of materials (i.e., the more porous they are, the more sound absorb; the less porous they are, the more sound reflect) LO2.7 To explain the acoustic behavior of materials uniquely in terms of their density, rigidity and porosity (not other properties) and relate them appropriately to the acoustic behavior of materials and built on the analysis of the evidence of students' learning that has been obtained, instead of simply being based on a logical task analysis of content domains and personal experiences with teaching (). Finally, we relate students' learning outcomes to specific features of the designed TLS in order to analyze how specific instruction factors help students progress from lower to higher levels of understanding. As Duschl et al. observe, details about the instructional interventions that might influence how students progress are often missing in published research studies of students' learning. In this sense, the characterization of students' evolution of their mental models allows us to analyze the influence of each modeling and inquiry activity of the designed TLS, as it was implemented in class, on students' development of the three conceptual models described above. Students' Development of the Theoretical Conceptual Model of Sound Attenuation in Materials in Terms of Energy (CM1) As described earlier, the analysis of the students' development of each conceptual model took into account the Aluminum would behave as a sound reflector because it is not porous, it is dense, and its surface is smooth. Polyurethane would behave as a sound absorber because it is not very dense and it has holes S4 Students adequately explain and predict the acoustic behavior of materials uniquely in terms of intensive properties, such as density, rigidity, and porosity, using these terms appropriately-LO2.7 Material A would be the best sound reflector as it is dense, rigid, and non-porous. Material C would be the best sound absorber because it is less dense, it is flexible, and it has pores which facilitate sound propagation Bold text indicates the key terms that characterize each category If the material is porous, it will behave as a sound absorber as its particles are more separated, and so the material has empty spaces through which sound can enter S2 Hybrid stage between S1 and S3: students use both the preliminary (S1) and the more elaborate (S3) version of the model to explain the influence of certain characteristics of the material on their acoustic behavior The fact that this material has low density means that its particles are more separated, and therefore, sound can enter this material; that is to say, sound can be absorbed. The material is also very flexible, and therefore, its particles can move more S3 Students explain the acoustic behavior of materials using the particle model of matter and appropriately explaining mechanisms of sound attenuation in materials-LO3.1, LO3.4, and LO3.5 As the material is porous, not very dense and flexible, its particles can vibrate a lot, and so the material absorbs sound Bold text indicates the key terms that characterize each category results of a preliminary analysis of students' achievement of specific LO to characterize later students' mental models in terms of their learning outcomes. Students' Achievement of Each Learning Objective Associated with the Conceptual Model of Sound Attenuation in Materials in Terms of Energy (CM1) The findings of the analysis of students' achievement of LO associated with the conceptual model of sound attenuation in materials in terms of energy (CM1) at different moments of the implementation of the TLS are illustrated in Table 10. As shown in Table 10, at the beginning of the implementation of the TLS on APM (T1.1.2.a and T1.1.2.c) more than 90 % of students were already able to recognize that sound is distributed in different components when reaching an object such as a wall (LO1.1). Nevertheless, most students only identified transmitted sound and reflected sound as the components in which incident sound is distributed (LO1.2). Only 23 % of students identified absorption as a mechanism of sound attenuation (LO1.3). On the other hand, while almost two-thirds of the students already associated sound attenuation in materials with the decrease in the intensity level of the incident sound when it is transmitted through a material (LO1.5), only 12 % of students associated sound attenuation with the distribution of energy (LO1.6). The analysis of students' responses to the intermediate task T1.3.5, after performing an experiment and discussing it in class, indicated that the number of students who recognized absorption within the medium as a possible mechanism of sound attenuation (LO1.3) increased (42 %), whereas the percentage of students who identified reflection decreased (58 %). Nevertheless, these results also illustrate that most students (75 %) were not able to identify both reflection and absorption as mechanisms of sound attenuation (LO1.4) and none of them interpreted sound attenuation produced through a material in terms of energy distribution (LO1.6). After T1.3.5 students were faced with a text and a visual representation that (verbally and diagrammatically) explained how science interprets the process of sound attenuation when sound propagating through air interacts with a solid material. After discussing the text and the visual representation with their teacher and peers, students proceeded to perform other tasks (T1.3.6, T1.3.7, and T1.3.9). The answers to these tasks give evidence of an increase of up to about 90 % of students who were able to identify both reflection and absorption as mechanisms of sound attenuation (LO1.4) and to associate sound attenuation appropriately with the decrease in the intensity level of the incident sound when transmitted (LO1.5). Moreover, the analysis of students' responses also illustrated that more than two-thirds of students (68 %) were able to explain or interpret that sound attenuation involves distribution of energy (LO1.6). Finally, the questions posed in the final assessment reveal similar levels of students' achievement of the LO associated with CM1. At that point, about 90 % of students were able to identify both reflection and absorption as mechanisms of sound attenuation (LO1.4), about 80 % of students were able to associate sound attenuation appropriately with the decrease in the intensity level of the incident sound when transmitted (LO1.5), and almost 70 % of students were able to explain or interpret that sound attenuation involves distribution of energy (LO1.6). Students' Stages of Development of the Conceptual Model of Sound Attenuation in Materials in Terms of Energy (CM1) at Each Phase of the TLS Using the categories presented in Table 7 to characterize students' stages of development of CM1, we analyzed the distribution of students in each stage of development of CM1 throughout the implementation of the TLS on APM. This distribution is represented in Fig. 1. Moreover, Fig. 1 shows the most representative students' learning progression toward the construction of the intended conceptual model of sound attenuation in materials (CM1) throughout the implementation of the TLS on APM. The most representative learning progression is highlighted with a straight line. The diameter of each circle represents the percentage of students in each stage of development of CM1 at different moments of the implementation of the TLS. Therefore, Fig. 1 also shows that most students followed a certain learning progression, whereas other students followed different learning progressions. In Fig. 1, we can see that at the beginning of the implementation of the teaching sequence, most students (77 %) elicited a mental model that explains sound attenuation in materials as the decrease in the intensity level of incident sound resulting from the distribution of sound in different components such as transmitted sound and reflected sound. We consider this students' preliminary mental model as the first stage (S1) or starting point which is more representative in the process of students' development of CM1. After the experiment (T1.3.3) in which students measured the effects of distance on sound intensity level, students' worksheets included some data corresponding to measurements of incident and transmitted sound inside and outside a house (T1.3.4). Analyzing students' answers to T1.3.5, in which they interpreted how sound is attenuated, some progression could be noticed as a higher number of students seemed to hold a mental model that recognizes absorption as a mechanism of sound attenuation (S2 or S3). These results suggest that the experiment that students performed and the discussions that took place in class contributed to the revision of their mental models to fit the new empirical evidence. In spite of this progression of students' mental models of sound attenuation in materials, our results also illustrate that most of their mental models (47 %) still belong to the first stage (S1) of development of CM1. Therefore, these results also emphasize the need for further activities to contribute to students' learning progression toward the construction of CM1. In this context, after students had expressed and revised their preliminary mental models, they were introduced to the scientific perspective and discussed it with their teacher and classmates. This activity seemed to have a strong positive effect in terms of promoting the revision of students' mental models as our results indicate that most of the students (60 % approximately) reached the fourth stage (S4) of the learning progression (i.e., the intended conceptual model of sound attenuation in materials). Finally, the questions posed in the final assessment reveal that almost 40 % of students seemed to hold a conceptual model of sound attenuation which corresponds to the third stage of development (S3) of CM1, whereas most of the students (about 60 %) were found to have been able to develop the intended conceptual model of sound attenuation CM1 (S4) at the end of the teaching sequence. Influence of the Activities of the TLS on Students' Learning Progressions Toward the Construction of the Conceptual Model of Sound Attenuation in Materials in Terms of Energy (CM1) With the purpose of inferring the most representative students' learning progression toward the construction of CM1, we tracked each student's evolution throughout the TLS. Table 11 shows the types of evolution experienced by students while developing their mental models toward the intended conceptual model of sound attenuation in materials. In general terms, these results highlight that the activities that students carried out to obtain new evidence from a hands-on experiment and to draw conclusions from it, and also the students' use of their revised mental models before the final assessment, promoted slight learning progress (29 and 26 %, respectively). In contrast, the activity that had a greater impact on students' development of CM1 (84 %) was their engagement in the discussion with the teacher and classmates of the scientific perspective on sound attenuation in materials, which was introduced to students by means of a written text about how science interprets this phenomenon and a diagram representing how the energy of the incident sound is distributed when sound reaches an object. The findings of the analysis of students' achievement of LO associated with the conceptual model of the acoustic Bold values indicate the highest percentages behavior of materials in terms of their properties (CM2) at different moments of the implementation of the TLS are illustrated in Table 12. Students' Development of the Empirical Conceptual After students had gone through the first chapter of the TLS, intended to promote their development of the conceptual model of sound attenuation in materials in terms of energy (CM1), the second chapter started with an openended question to elicit their previous ideas on the acoustic behavior of materials (T2.1.1). As shown in Table 12, in the open-ended question, we found that more than twothirds of students identified properties of materials roughly related to density (e.g., compactness) and to rigidity (e.g., hardness, elasticity) as APM, i.e., as properties influencing the acoustic behavior of materials (LO2.1 and LO2.2). Nevertheless, as reported in previous studies (;Linder 1993), students often use similar scientific terms to refer to different properties of materials, attributing the same meaning to them. Such is the case with the terms dense used as a synonym for compact or heavy, and rigid used as a synonym for plastic, non-elastic, or hard. Moreover, at this point, students attributed both extensive (e.g., thickness) and intensive (e.g., density) properties to the acoustic behavior of materials, using the word material as a synonym for object. Later in the implementation, students discussed with their classmates the new terminology and perspectives introduced by the statements included in T2.1.2, and they were asked to formulate a consensus model of the acoustic behavior of materials in terms of their properties. As a result of this activity, most students (79 %) started attributing other properties to the acoustic behavior of materials, such as porosity (LO2.3). Nevertheless, in some cases, students blurred the terms dense and little porous, as if the density of materials were uniquely related to their porosity. When students predicted the acoustic behavior of samples of specific materials in T2.1.3, most of students (more than 75 %) identified properties related to density, rigidity, and porosity of materials, among other characteristics, as influencing the acoustic behavior of materials (LO2.1, LO2.2, and LO2.3). Their difficulty with the accurate use of terminology (e.g., flexible as a synonym for elastic or soft) was again widely evidenced, however. Later, students used a data-logging system connected to a sound level meter to test the acoustic behavior of the materials empirically. Next, in T2.1.6.b, students were asked to describe the properties that all the tested sound reflectors have in common and also the properties that all the tested sound absorbers have in common. At this point, most of the students (about 90 %) not only were more accurate in terms of using the specific terms that refer to properties of materials influencing their acoustic behavior (LO2.4, LO2.5 and LO2.6), but they also reduced the number of properties that they associated with the acoustic behavior of materials. Thus, 95 % of them only mentioned the three so-called APM-density, rigidity, and porosity (LO2.7). After discussing the scientific meaning of these properties and analyzing more accurately the properties of the tested materials, students wrote down their conclusions again on the properties of materials that characterize their acoustic behavior (T2.1.8). At that point, all the students appropriately related density, rigidity, and porosity of materials to their acoustic behavior (LO2.4, LO2.5, and LO2.6). In the final assessment, when students were asked to predict the acoustic behavior of certain materials in Q4.a, about 90 % of them did it uniquely in terms of density, rigidity, and porosity relating them appropriately to the acoustic behavior of materials (LO2.7). In Q5.a, in which students were asked to identify in an advertisement of a product the properties that characterized it as a good sound absorber, most of them (about 90 %) mentioned density, rigidity, and porosity. Some of them (about 30 %) also highlighted other characteristics of the material that make it a good product (e.g., durability). We interpret that the demand of Q5.a might not be clear enough as many students did not distinguished between the properties that made the material a good product from those properties that actually influenced its acoustic behavior making it a good sound absorber. development of CM2 throughout the implementation of the teaching sequence. This distribution is represented in Fig. 2. In order to analyze students' learning progressions at a grain size, we have also considered students' development of mental models in two additional phases of the implementation of the TLS, between students' elicitation of their preliminary mental models and students' revision of their mental models to be in agreement with the evidence obtained in the experiment. These two additional phases correspond to students' revision of their mental models after discussing new terminology and students' construction of an agreed preliminary model. Furthermore, Fig. 2 shows the most representative students' learning progression toward the construction of the conceptual model of the acoustic behavior of materials in terms of their physical properties throughout the implementation of the TLS on APM. The most representative learning progression is highlighted with a straight line. The diameter of each circle represents the number of students in each stage of development of CM2 at different moments of the implementation of the TLS. Therefore, Fig. 2 also shows that most students followed a certain learning progression, whereas other students followed different learning progressions. In Fig. 2, we can see that as a starting point most students (75 %) related the acoustic behavior of materials to several intensive and extensive properties, such as density and rigidity, although these terms were not used appropriately (S1). After new terminology and perspectives were introduced and discussed (T2.1.2), more students started recognizing porosity as another acoustic property. In spite of mentioning these three key APM, most of students (more than 70 %) still tended to blur certain scientific terms and also to associate many other characteristics with the acoustic behavior of materials (S2). In T2.1.3, after discussing the influence of certain properties on the acoustic behavior of materials and having reached a consensus model, most students seemed to hold a mental model that corresponded to the second stage of development (S2) of CM2. The results from the analysis of students' answers to T2.1.6 show that, after students carried out the experiment to test the acoustic behavior of specific materials, almost all of them (95 %) adequately explained the acoustic behavior of materials uniquely in terms of intensive properties such as density, rigidity, and porosity, using these terms appropriately (S4). In the final assessment, most students (76 %) had developed the most elaborate version (S4) of CM2. Influence of the Activities of the TLS on Students' Learning Progressions Toward the Construction of the Conceptual Model of the Acoustic Behavior of Materials in Terms of Their Physical Properties (CM2) With the purpose of inferring the most representative students' learning progression toward the construction of CM2, we tracked each student's evolution throughout the TLS. Table 13 shows the types of evolution experienced by students while developing their mental models toward the intended conceptual model of the acoustic behavior of materials in terms of their physical properties (CM2). In general terms, these results highlight that students' discussion of new terminology and perspectives, introduced by means of certain statements (T2.1.2), and also students' engagement in performing and drawing conclusions from an experiment (T2.1.4, T2.1.5, and T2.1.6) had the greatest impact on students' development of CM2 as 75 and 79 % of students, respectively, progressed from their mental models toward a more elaborate version after their engagement in each of these activities. After students had gone through the activities intended to promote the development of CM2, they were engaged in other activities intended to promote their construction of CM3. The findings of the analysis of students' achievement of LO associated with the conceptual model of the acoustic Table 14. As shown in Table 14, the results illustrate that all students related the acoustic behavior of materials to the internal structure of materials (LO3.1) only when they were asked to express their agreement or disagreement with certain statements about the influence of some characteristics such as the separation between particles on the acoustic behavior of materials (T2.1.2). Conversely, when the question was completely open (T2.1.1), students did not mention any characteristics related to the internal structure of materials as influencing their acoustic behavior. When the students discussed with their classmates the new terminology and new perspectives introduced by the statements included in T2.1.2, more than half were able to relate density, rigidity, or porosity of materials to their internal structure (LO3.2) and to use the particle model of matter to explain mechanisms of sound attenuation in materials (LO3.3). Nevertheless, almost half described density and rigidity in terms of distance between particles. Moreover, most of these students considered sound-attenuating materials as sound barriers that prevent the passage of sound through them (). After the thought experiment in which students used an analogy to investigate the influence of the internal structure of materials on their acoustic behavior, about 80 % of them appropriately described density, rigidity, or porosity of materials in terms of their microstructure (LO3.4), and 75 % used the particle model of matter to appropriately explain mechanisms of sound attenuation in materials (LO3.5) in T2.2.2, T2.2.4, and T2.2.5. Thus, these students described the internal structure of materials in terms of the mass of their particles or the strength of the bonds between particles to explain sound attenuation in terms of more or less vibration of the particles that form each material. Later in the implementation (T2.2.6 and T2.2.7), after discussion of the scientific perspective on how the properties of sound absorbers affect their acoustic behavior in terms of their microstructure, almost 75 % of students used the particle model of matter appropriately to explain mechanisms of sound attenuation in materials (LO3.5). A lower number of students (62 %) described density, rigidity, or porosity of materials in terms of their microstructure (LO3.2) and more than two-thirds of these students did it appropriately (LO3.4). Similarly, in the final assessment (Q5.b), about 60 % of students were able to explain some mechanism of sound attenuation in materials using the particle model of matter and describing the internal structure of materials (LO3.2 and LO3.3). Almost 90 % of these students appropriately described some APM in terms of their microstructure (LO3.4), and about 70 % appropriately explained sound attenuation in materials in terms of difficulty or ease of particle vibration (LO3.5). Students' Stages of Development of the Conceptual Model of the Acoustic Behavior of Materials in Terms of Their Internal Structure (CM3) at Each Phase of the TLS After characterizing each stage of development of CM3, we analyzed the distribution of students in each stage of development of CM3 throughout the implementation of the TLS on APM. This distribution is represented in Fig. 3. Furthermore, Fig. 3 shows the most representative students' learning progression toward the construction of the conceptual model of the acoustic behavior of materials in terms of their internal structure throughout the implementation of the TLS on APM. The most representative learning progression is highlighted with a straight line. The diameter of each circle represents the number of students in Fig. 3 also shows that most students followed a certain learning progression whereas other students followed different learning progressions. As shown in Fig. 3, when students elicited their preliminary mental models in T2.1.2, more than 90 % of students explained the acoustic behavior of materials using the particle model of matter and describing inadequate mechanisms of sound attenuation in materials. Nevertheless, the model expressed by these students (S1) was not consistent with the scientific perspective as it dealt with sound as an entity instead of as a process, and consequently sound attenuation was conceived as a process hindering the passage of sound or capturing sound instead of as a process of energy dissipation that involves vibration of particles. After students had carried out the thought experiment described above, more than 80 % started explaining the acoustic behavior of materials using the particle model of matter and appropriately explaining mechanisms of sound attenuation in materials (S3). Even though these students used the conceptual model (S3) to explain the influence of certain properties on the acoustic behavior of materials in terms of their internal structure, most of them also continued using the preliminary version of the model (S1) to explain the influence of other properties. That is to say, most of them used a hybrid mental model (S2). Similar results were evidenced after students were introduced to and discussed the scientific perspective in class with their teacher and classmates. In the final assessment, about half of the students answered in terms of S1 whereas the other half were found to use the more sophisticated version of the model (S3) or the hybrid version (S2). With the purpose of inferring the most representative students' learning progression toward the construction of CM3, we tracked each student's evolution throughout the TLS. Table 15 shows the types of evolution experienced by students while developing their mental models toward the intended conceptual model of the acoustic behavior of materials in terms of their internal structure (CM3). The results expressed in Table 15 highlight that the students' realization and discussion with teacher and classmates of the thought experiment using an analogy was the activity that had a greater impact (72 %) in promoting the progression of students' mental models. The students' engagement in discussing the scientific perspective did not represent a significant activity in terms of promoting progression as the models of almost half of students remained the same after this activity. Finally, about 50 % of students elicited a weaker version of their mental models in the final assessment. Discussion of Results The reported findings support the fact that experimenting with a TLS leads to two types of results (Mheut and Psillos 2004): results in terms of research validity and results in terms of pragmatic value. Consequently, to answer our two research questions, we will discuss the results described earlier in terms of their research validity (e.g., understanding modeling processes) and in terms of their pragmatic value (e.g., implications for teaching). On Students' Learning Progressions from Their Preliminary Mental Models Toward the Intended Conceptual Models To answer our first research question, we discuss the learning processes that students underwent when Bold value indicate the highest percentages developing their mental models for each of the three intended conceptual models dealt throughout the designed TLS on APM. Concerning the stages of students' development of these three conceptual models, our results show that more than half of students reached the (theoretical) conceptual model of sound attenuation in materials in terms of energy (CM1), most of them reached the (empirical) conceptual model of the acoustic behavior of materials in terms of their physical properties (CM2), and about half of them reached the (theoretical) conceptual model of the acoustic behavior of materials in terms of their internal structure (CM3) or a hybrid version of this model. The remaining students reached lower stages of development for each of the three intended conceptual models. That is to say, all the students progressed through several stages of development of their mental models, and some of them reached the most elaborate version of each conceptual model. In short, the conceptual model of the acoustic behavior of materials in terms of their physical properties (CM2) was developed by more students than the conceptual model of sound attenuation in materials in terms of energy (CM1) and, in turn, more students developed CM1 than the conceptual model of the acoustic behavior of materials in terms of their internal structure (CM3). These differences can be interpreted in terms of the attributes of each conceptual model, in terms of the distance between students' preliminary mental models and the intended conceptual models, and in terms of the quality of the instruction that took place. In the words of Duschl et al. (2011, p.152), 'if the learning goals are too sophisticated or if the teaching sequence is ill conceived, then the intended learning outcomes run the risk of being too abstract or beyond the ''boundaries'' of outcome learning expectations for the targeted students.' At this point, we will comment on the two first possible interpretations to account for these differences, and we will discuss the instructional issue later. As discussed earlier, the conceptual model of the acoustic behavior of materials in terms of their physical properties (CM2), as an empirical model, involves real entities (i.e., materials) and their observable properties. Taking into account that students' preliminary mental models often include macroscopic descriptions of natural objects or events, which are easily visible or related to their everyday experience ), we consider that in the case of this conceptual model (CM2) students' preliminary mental models could act as productive intuition for understanding, which can easily become more sophisticated through instruction. By contrast, the theoretical models CM1 and CM3 consist of descriptions of unobservable events in terms of abstract entities such as energy and particles. Several research studies (e.g., Harrison and Treagust 2002;Millar 2005) have reported a variety of difficulties in students' understanding of such concepts. The differences between students' development of the conceptual models CM1 and CM3 can be explained in terms of the distance between these conceptual models and students' preliminary mental models. In the case of CM1, students' preliminary mental models of sound attenuation reflect an intuitive view of the phenomenon of sound attenuation which is not inconsistent with the most elaborate version of the conceptual model. On the contrary, this intuition can be considered a simpler version of the intended conceptual model based on measurable magnitudes (e.g., sound intensity level) that students can later relate to an abstract entity such as energy. In the case of CM3, students' preliminary mental models already included abstract entities such as particles but these models conflicted with the conceptual model as they correspond to different models of sound and to different models of the structure of matter (i.e., properties described in terms of distance between particles vs. properties described in terms of mass of particles and strength of bonds between particles). That would explain the high percentage of students who, at the end of the implementation of the teaching sequence, used their preliminary mental models or a hybrid version of the model (i.e., using at the same time their preliminary mental model and the most elaborate version of the conceptual model). The fact that about 50 % of students elicited their preliminary mental models of the acoustic behavior of materials in terms of their microstructure in the final assessment can be interpreted from other perspectives. This conceptual model (CM3) might be too demanding for the students at this level as it implies using the particle model of matter together with two other models: the model of sound attenuation in materials in terms of energy (CM1) and the model of acoustic behavior of materials in terms of their physical properties (CM2). On the Salient Modeling and Inquiry Activities of the TLS on APM Our second research question raises the issue of the role played by the modeling and inquiry activities of the TLS on APM, as implemented in class, in promoting students' evolution of their mental models toward the intended conceptual models. The results showed that each of the three intended conceptual models entails different learning difficulties. This suggests that the design of the activities to promote the development of each of these conceptual models should differ from one to another. Looking at the influence on students' learning progressions of the activities that they carried out (Figs. 1, 2, and 3), we found that different types of modeling and inquiry activities played a decisive role in promoting students' development of each conceptual model. In the case of the theoretical model of sound attenuation in materials (CM1), our results indicate that the activity that seemed to facilitate more the development of students' mental models was the task in which students discussed with their teacher and classmates the scientific perspective introduced by means of a text and a diagram (i.e., 'What does science tell us?'). However, we suggest that the influence of this activity cannot be interpreted in an isolated way. Rather, we assume that this activity had a greater impact on students' development of CM1 taking into account the attributes of this conceptual model as a theoretical model, and the activities that students had previously carried out (i.e., eliciting their preliminary mental models, obtaining new evidence from an experiment, and drawing conclusions from it). In the case of the empirical model of the acoustic behavior of materials in terms of their physical properties (CM2), the activity that seemed to facilitate more a positive development of students' mental models was the task in which students carried out an experiment and drew conclusions from the new evidence obtained. Another activity that also seemed to have a positive impact on students' development of mental models was the task in which they were asked to reach a consensus model, once they had individually elicited their preliminary mental models and had explored new terminology and different perspectives from provided statements. Finally, in the case of the theoretical model of the acoustic behavior of materials in terms of their internal structure (CM3), the activity that seemed to facilitate more the development of students' mental models was the task in which students carried out a thought experiment using an analogy and drew conclusions from it. Comparing students' evolution of CM3 to students' evolution of CM1, which we have also considered to be a theoretical conceptual model, one might wonder about their differences: most of students made positive progress after discussing the scientific perspective for CM1, whereas only a small portion of students did so for CM3. One possible interpretation is that regarding CM3, most students had already had a positive progression after performing the thought experiment and so, later, after discussing the scientific perspective, most students continued holding the same mental models they had already developed. In the case of CM1, the situation is different since after the empirical task, most of the students' mental models remained the same as before, and later, after discussing the scientific perspective, most students' mental models evolved toward more elaborate mental models. Finally, the poor results of students' development of CM3 make us wonder whether a different approach to the use of the analogy would have resulted in a more positive learning progression. Other possible interpretation of the fact that about 50 % of students elicited their preliminary mental models of the acoustic behavior of materials in terms of their microstructure in the final assessment might be the lack of application of this conceptual model (CM3) in different situations throughout the teaching sequence, which might have contributed to the lack of consolidation of this model at the end of the implementation of the TLS. Conclusions and Implications for Design, Research, and Instruction Tracking students' learning progressions throughout the TLS has turned out to be a very useful methodological procedure for studying students' development of conceptual models and the influence of the activities of the designed TLS on APM. Thus, we consider it an appropriate method for carrying out specific research studies within the design-based research paradigm. We have found that the different intended conceptual models involve different learning demands (Leach and Scott 2002), which we have interpreted in terms of the attributes (theoretical or empirical) of each model, in terms of the distance between students' preliminary mental models and the intended conceptual models, and in terms of instructional activities. Moreover, in this study, we have characterized students' development of mental models toward the intended conceptual models throughout different stepping-stones or stages of development of the conceptual model. This result backs the importance of intermediary steps in supporting student understanding. One important implication for teaching and design is that the empirically based students' learning progressions, expressed as increasingly sophisticated versions of each conceptual model, can help teachers monitor or assess how many students progress in what they are learning in real contexts and adapt their instruction in response to students' evolution and needs in order to support student learning. Thus, these learning progressions should be further investigated in classroom settings in order to empirically validate or adapt them to other educational conditions in light of research (). In any case, we cannot forget that there is no single learning progression that leads students to develop intended conceptual models but multiple ways in which they can reach understanding. In line with this, we recognize that students' learning outcomes at the end of the designed teaching sequence are not to be seen as an end point but as an intermediate one, and require further support and instruction so that students continue learning. Regarding the design principles related to the types of activities of the MBI approach of the TLS on APM, we have gained an insight into the role played by the different types of modeling and inquiry activities in promoting students' development of conceptual models throughout the TLS. According to our results and the research design, we cannot generalize any pattern of influence of the inquiry and modeling activities of the designed teaching sequence on students' learning progressions. We have evidenced that the tasks involving class group discussion on the scientific perspective and the tasks involving thought experiments seem to play a more significant role in contributing to students' development of theoretical models. On the other hand, the tasks involving ICT-supported hands-on experiments and class group discussions on other classmates' perspectives introducing new terminology and ideas seem to have greater impact in promoting students' development of empirical models. By tasks involving class group discussion on the scientific perspective, we do not mean transmissive teaching in which the scientific perspective is introduced by the teacher or the material as a process of communication in a one-way direction. Rather, we refer to guided or oriented activities in which teachers and students discuss and try to reconcile different perspectives and meanings of terms. Regarding the ICT-supported hands-on experiments, it is important to emphasize that these experiments have been feasible because of the use of data capture systems such as the sound level meter. These experiments are thus good examples of how ICT tools can contribute to the development of an empirical conceptual model, which otherwise might not have been developed through experimental work. In any case, we consider that tasks involving students' elicitation of the preliminary or revised mental models are also essential to contribute to students' learning progressions throughout the TLS on APM. Regarding the design principles related to the structure of each sequence of tasks, our results support that the sequence of activities including students' elicitation of their own prior knowledge, followed by their involvement in inquiry tasks and later discussion of and comparison with the scientific perspective, has a positive impact in terms of contributing to students' development of their mental models. Therefore, we highlight the importance of including such kinds of different tasks prior to any classroom discussion of the scientific perspective when a TLS is designed. That is to say, students should be put in the situation of eliciting their own mental models and feeling the need to revise them in agreement with the new (empirical or non-empirical) evidence obtained (from real or thought experiments) before the scientific perspective or intended conceptual model is introduced, discussed, and compared to preliminary mental models. In short, these results highlight that the relationship between modeling and inquiry is complex as different modeling and inquiry activities seem to facilitate students' development of a certain type of conceptual model, but they need to be combined and appropriately sequenced to become effective. Further research is necessary to better understand the interplay between modeling and inquiry processes in teaching and learning science, and about science.
/** * This class is automatically generated. * DO NOT EDIT THIS FILE DIRECTLY. */ @Generated public final class TopicFileExportPAO implements StoreServices { private final TaskManager taskManager; /** * Constructeur. * @param taskManager Manager des Task */ @Inject public TopicFileExportPAO(final TaskManager taskManager) { Assertion.check().isNotNull(taskManager); //----- this.taskManager = taskManager; } /** * Creates a taskBuilder. * @param name the name of the task * @return the builder */ private static TaskBuilder createTaskBuilder(final String name) { final TaskDefinition taskDefinition = Node.getNode().getDefinitionSpace().resolve(name, TaskDefinition.class); return Task.builder(taskDefinition); } /** * Execute la tache TkGetTopicFileExport. * @param botId Long * @param tcaId Long * @return DtList de TopicFileExport topic */ @io.vertigo.datamodel.task.proxy.TaskAnnotation( name = "TkGetTopicFileExport", request = "select top.code," + " top.tto_cd as type_topic," + " top.title," + " tca.code as category," + " top.description," + " null as tag," + " null as date_start," + " null as date_end," + " CASE " + " WHEN top.is_enabled THEN 'ACTIVE'" + " ELSE 'INACTIVE'" + " END as active," + " sin.script," + " tph.agg as training_phrases," + " res.agg as response," + " buttons.doublons as buttons," + " CASE " + " WHEN smt.is_end THEN 'TRUE'" + " ELSE 'FALSE'" + " END as is_end," + " string_agg(tpl.label, ',') as labels" + " from topic top" + " left join topic_topic_label ttl on (ttl.top_id = top.top_id)" + " left join topic_label tpl on (tpl.label_id = ttl.label_id)" + " left join (" + " select nts.top_id," + " string_agg(nts.text,'|') agg " + " from nlu_training_sentence nts" + " join topic top on top.top_id = nts.top_id " + " group by (nts.top_id)" + " ) as tph on top.top_id = tph.top_id" + " left join topic_category tca on tca.top_cat_id = top.top_cat_id " + " left join script_intention sin on sin.top_id = top.top_id" + " left join small_talk smt on smt.top_id = top.top_id " + " left join (" + " select smt.top_id," + " string_agg(utt.text,'|') agg " + " from utter_text utt" + " join small_talk smt on smt.smt_id = utt.smt_id" + " join topic top on smt.top_id = top.top_id " + " group by (smt.top_id)" + " ) as res on top.top_id = res.top_id " + " left join (" + " select t.top_id, string_agg(concat('[',rbu.text,'¤',tre.code,']'),'|') as doublons" + " from response_button rbu" + " join topic tre on tre.top_id = rbu.top_id_response" + " join small_talk st on st.smt_id = rbu.smt_id " + " join topic t on t.top_id = st.top_id " + " group by (t.top_id)" + " ) buttons on buttons.top_id = top.top_id" + " where top.bot_id = #botId#" + " and tca.is_technical = false" + " <%if (tcaId != null) { %>" + " and tca.top_cat_id = #tcaId#" + " <% } %>" + " group by top.code," + " type_topic," + " top.title," + " category," + " top.description," + " tag," + " date_start," + " date_end," + " active," + " sin.script," + " training_phrases," + " response," + " buttons," + " is_end" + " order by top.code", taskEngineClass = io.vertigo.basics.task.TaskEngineSelect.class) @io.vertigo.datamodel.task.proxy.TaskOutput(smartType = "STyDtTopicFileExport") public io.vertigo.datamodel.structure.model.DtList<io.vertigo.chatbot.commons.domain.topic.TopicFileExport> getTopicFileExport(@io.vertigo.datamodel.task.proxy.TaskInput(name = "botId", smartType = "STyId") final Long botId, @io.vertigo.datamodel.task.proxy.TaskInput(name = "tcaId", smartType = "STyId") final Optional<Long> tcaId) { final Task task = createTaskBuilder("TkGetTopicFileExport") .addValue("botId", botId) .addValue("tcaId", tcaId.orElse(null)) .build(); return getTaskManager() .execute(task) .getResult(); } private TaskManager getTaskManager() { return taskManager; } }
KABUL, Afghanistan - An Afghan soldier turned his weapon against foreign and Afghan troops in a southern province, killing one British soldier, another attack by a member of Afghanistan's military against its foreign allies, officials said Tuesday. The Taliban claimed responsibility for the shooting, the first insider attack of 2013. Several British soldiers were also reported wounded. Such "insider attacks" by Afghan soldiers and police, or men wearing their uniforms, rose dramatically last year. The attacks come as NATO and Afghan forces are in closer contact, as foreign troops hand over security to the Afghans and train them before an almost total withdrawal by the end of 2014. NATO command spokesman Brig. Gen. Gunter Katz identified the dead soldier in Monday's shooting as British, but his name was not released. "Yesterday, a suspected member of the Afghan national army shot and killed a British (NATO) soldier," Katz told a news conference. He said the shooting occurred at a patrol base in Nahri Sarraj district of Helmand province and that the shooter fired at both Afghan and British troops. He said the incident is under investigation. An Afghan Defense Ministry official said the shooter was an enlisted soldier, and six British soldiers were wounded. The official spoke anonymously because he was not authorized to brief reporters. A Taliban spokesman, Qari Yousef Ahmadi, said in an email that "an infiltrator" staged the attack and managed to escape from the scene but was then shot and killed after opening fire on a checkpoint. The Taliban have used the term "infiltrator" in the past to refer to members who have enlisted in the military to conduct such an attack. They identified the assailant as Mohammad Qasim Faroq. In London, the Ministry of Defense said the soldier, who was attached to the 21 Engineer Regiment, was killed by small arms fire at Patrol Base Hazrat. Several similar attacks have occurred in Helmand, the country's most violent province, where almost all British forces have been concentrated. Capt. Walter Reid Barrie was shot and killed in Nad Ali district of Helmand Nov. 11, the last British soldier to die before Monday's incident. Two British soldiers were killed by an Afghan policeman last October, and the same month a police officer and militants poisoned their colleagues and shot others, leaving six Afghans dead. British Prime Minister David Cameron's spokesman said Tuesday that in light of the increase in insider attacks, measures have been taken to increase security in Afghanistan - including better vetting and screening of recruits and bolstering counterterrorism efforts. "These are clearly very, very serious incidents," spokesman Jean-Christophe Gray said. "We have taken a number of measures, and the military always keeps force protection measures under review." Insider attacks killed 61 people in 45 incidents last year, compared to 35 killed in 21 attacks a year earlier, according to NATO. This tally does not include the Dec. 24 killing of an American civilian adviser by a female member of the Afghan police, because an investigation of the reportedly mentally unstable woman is continuing. In some cases, militants have donned Afghan army or police uniforms to attack foreign troops. A number of attacks have also been carried out by members of Afghan security forces against their own comrades. A total of 439 British forces personnel and Ministry of Defense civilians have died during the 11-year war, the second highest toll in the NATO-led coalition after the United States. Of these, 396 were killed as a result of hostile action, according to official British death tolls. Associated Press writers Cassandra Vinograd in London and Amir Shah in Kabul contributed.
declare module 'part:*'; declare module '*.module.css' { const content: { [identifier: string]: any; }; export = content; }
from flask_appbuilder import BaseView, expose from config import APP_ICON, APP_NAME from flask import g def get_user(): return g.user def custom_template(): app_name = "GEA" app_version = "1.2" return app_name, app_version class someView(BaseView): """ A simple view that implements the index for the site """ route_base = '' default_view = 'index' index_template = 'appbuilder/index.html' @expose('/') def index(self): from app import db from .models import Partner, Unit, Application, Doctype session = db.session partner = session.query(Partner).count() unit = session.query(Unit).count() application = session.query(Application).count() doctype = session.query(Doctype).count() self.update_redirect() return self.render_template(self.index_template, appbuilder=self.appbuilder, partner=partner, unit=unit, material=application, doctype=doctype, user=g.user) class MyIndexView(someView): index_template = 'index.html'
<gh_stars>1-10 /* * Copyright 2019 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.google.cloud.hadoop.io.bigquery.hive; import java.io.IOException; import java.util.Properties; import com.google.cloud.hadoop.io.bigquery.BigQueryConfiguration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter; import org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat; import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.util.Progressable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /* * Class {@link WrappedBigQueryAvroOutputFormat} serializes Hive records as Avro records and writes to file on GCS */ public class WrappedBigQueryAvroOutputFormat extends AvroContainerOutputFormat { private static final Logger LOG = LoggerFactory.getLogger(WrappedBigQueryAvroOutputFormat.class); /** * Create Hive Record writer. * * @param jobConf Hadoop Job Configuration * @param path Hadoop File Path * @param valueClass Class representing record (in this case AvroGenericRecordWritable) * @param isCompressed Indicates whether records are compressed or not * @param properties Job Properties * @param progressable Instance to represent the task progress * @return Instance of Hive Record Writer * @throws IOException */ @Override public RecordWriter getHiveRecordWriter( JobConf jobConf, Path path, Class<? extends Writable> valueClass, boolean isCompressed, Properties properties, Progressable progressable) throws IOException { Path actual = new Path(getTempFilename(jobConf)); LOG.info("Set temporary output file to {}", actual.getName()); return super.getHiveRecordWriter( jobConf, actual, valueClass, isCompressed, properties, progressable); } /** * Checks output Spec and sets HiveBigQueryOutputCommitter * * @param ignored Hadoop File System * @param jobConf Job Configuration * @throws IOException */ @Override public void checkOutputSpecs(FileSystem ignored, JobConf jobConf) throws IOException { // can perform various checks LOG.info("Setting HiveBigQueryOutputCommitter.."); jobConf.setOutputCommitter(HiveBigQueryOutputCommitter.class); } /** * Generate a temporary file name that stores the temporary Avro output from the * AvroContainerOutputFormat. The file will be loaded into BigQuery later. * * @param jobConf Hadoop Job Configuration * @return Fully Qualified temporary table path on GCS */ public static String getTempFilename(JobConf jobConf) { String tempOutputPath = jobConf.get(BigQueryConfiguration.TEMP_GCS_PATH_KEY); String tempDataset = jobConf.get(BigQueryConfiguration.OUTPUT_DATASET_ID_KEY); String outputTableId = jobConf.get(BigQueryConfiguration.OUTPUT_TABLE_ID_KEY); String uniqueID = jobConf.get(HiveBigQueryConstants.UNIQUE_JOB_KEY); Path tempFilePath = new Path( tempOutputPath, String.format("%s_%s_%s", tempDataset, outputTableId.replace("$", "__"), uniqueID)); return tempFilePath.toString(); } }
<reponame>AriCheng/flare<gh_stars>100-1000 // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this // file except in compliance with the License. You may obtain a copy of the // License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the // License for the specific language governing permissions and limitations under // the License. #include "flare/base/buffer.h" #include <string> #include "gtest/gtest.h" #include "flare/init/override_flag.h" #include "flare/testing/main.h" using namespace std::literals; FLARE_OVERRIDE_FLAG(flare_buffer_block_size, BUFFER_BLOCK_SIZE); namespace flare { namespace { PolymorphicBuffer MakeNativeBuffer(std::string_view s) { auto buffer = MakeNativeBufferBlock(); memcpy(buffer->mutable_data(), s.data(), s.size()); return PolymorphicBuffer(buffer, 0, s.size()); } } // namespace TEST(CreateBufferSlow, All) { static const auto kData = "sadfas234sadf-/+8sdaf sd f~!#"s; auto nb = CreateBufferSlow(kData); ASSERT_EQ(kData, nb.FirstContiguous().data()); ASSERT_EQ(kData, FlattenSlow(nb)); } TEST(NoncontiguousBuffer, Cut) { NoncontiguousBuffer nb; nb.Append(CreateBufferSlow("asdf")); auto r = nb.Cut(3); ASSERT_EQ(1, nb.ByteSize()); ASSERT_EQ("f", FlattenSlow(nb)); ASSERT_EQ(3, r.ByteSize()); ASSERT_EQ("asd", FlattenSlow(r)); } TEST(NoncontiguousBuffer, Cut1) { NoncontiguousBuffer nb; nb.Append(CreateBufferSlow("asdf")); auto r = nb.Cut(4); ASSERT_TRUE(nb.Empty()); ASSERT_EQ(4, r.ByteSize()); } TEST(NoncontiguousBuffer, Cut2) { NoncontiguousBuffer nb; nb.Append(MakeNativeBuffer("asdf")); nb.Append(MakeNativeBuffer("asdf")); auto r = nb.Cut(4); ASSERT_EQ(4, nb.ByteSize()); ASSERT_EQ(4, r.ByteSize()); } TEST(NoncontiguousBuffer, Cut3) { NoncontiguousBuffer nb; nb.Append(MakeNativeBuffer("asdf")); nb.Append(MakeNativeBuffer("asdf")); auto r = nb.Cut(8); ASSERT_TRUE(nb.Empty()); ASSERT_EQ(8, r.ByteSize()); } TEST(NoncontiguousBuffer, Cut4) { auto nb = CreateBufferSlow("asdfasf2345sfsdfdf"); auto nb2 = nb; ASSERT_EQ(FlattenSlow(nb), FlattenSlow(nb2)); NoncontiguousBuffer splited; splited.Append(nb.Cut(1)); splited.Append(nb.Cut(2)); splited.Append(nb.Cut(3)); splited.Append(nb.Cut(4)); splited.Append(std::move(nb)); ASSERT_EQ(FlattenSlow(nb2), FlattenSlow(splited)); } TEST(NoncontiguousBuffer, Skip) { NoncontiguousBuffer splited; splited.Append(CreateBufferSlow("asdf")); splited.Append(CreateBufferSlow("asdf")); splited.Append(CreateBufferSlow("asdf")); splited.Append(CreateBufferSlow("asdf")); splited.Append(CreateBufferSlow("asdf")); splited.Append(CreateBufferSlow("asdf")); splited.Append(CreateBufferSlow("asdf")); splited.Append(CreateBufferSlow("asdf")); splited.Skip(32); ASSERT_EQ(0, splited.ByteSize()); } TEST(NoncontiguousBuffer, Skip2) { NoncontiguousBuffer buffer; EXPECT_TRUE(buffer.Empty()); buffer.Skip(0); // Don't crash. EXPECT_TRUE(buffer.Empty()); } TEST(NoncontiguousBuffer, FlattenSlow) { NoncontiguousBuffer nb; nb.Append(MakeNativeBuffer("asd4234")); nb.Append(MakeNativeBuffer("aXsdfsadfasdf2342")); ASSERT_EQ("asd4234aXs", FlattenSlow(nb, 10)); } TEST(NoncontiguousBuffer, FlattenToSlow) { struct C { std::uint64_t ll; int i; bool f; }; NoncontiguousBuffer nb; nb.Append(MakeNativeBuffer("\x12\x34\x56\x78\x9a\xbc\xde\xf0"s)); nb.Append(MakeNativeBuffer("\x12\x34\x56\x78"s)); nb.Append(MakeNativeBuffer("\x1"s)); nb.Append(MakeNativeBuffer("\x00\x00\x00"s)); // Padding C c; FlattenToSlow(nb, &c, sizeof(C)); ASSERT_EQ(0xf0debc9a78563412, c.ll); // TODO(luobogao): Endianness. ASSERT_EQ(0x78563412, c.i); ASSERT_EQ(true, c.f); } TEST(NoncontiguousBuffer, FlattenSlowUntil) { NoncontiguousBuffer nb; nb.Append(MakeNativeBuffer("asd4234")); nb.Append(MakeNativeBuffer("aXsdfsadfasdf2342")); ASSERT_EQ("asd4234aX", FlattenSlowUntil(nb, "aX")); ASSERT_EQ("asd4", FlattenSlowUntil(nb, "4")); ASSERT_EQ("asd4234aXsdfsadfasdf2342", FlattenSlowUntil(nb, "2342")); ASSERT_EQ("asd42", FlattenSlowUntil(nb, "z", 5)); ASSERT_EQ("asd42", FlattenSlowUntil(nb, "3", 5)); ASSERT_EQ("asd42", FlattenSlowUntil(nb, "2", 5)); ASSERT_EQ("asd4", FlattenSlowUntil(nb, "4", 5)); } TEST(NoncontiguousBuffer, FlattenSlowUntil2) { auto nb = CreateBufferSlow( "HTTP/1.1 200 OK\r\nRpc-SeqNo: 14563016719\r\nRpc-Error-Code: " "0\r\nRpc-Error-Reason: The operation completed " "successfully.\r\nContent-Type: " "application/x-protobuf\r\nContent-Length: 0\r\n\r\nHTTP/1.1 200 " "OK\r\nRpc-Seq"); ASSERT_EQ( "HTTP/1.1 200 OK\r\nRpc-SeqNo: 14563016719\r\nRpc-Error-Code: " "0\r\nRpc-Error-Reason: The operation completed " "successfully.\r\nContent-Type: " "application/x-protobuf\r\nContent-Length: 0\r\n\r\n", FlattenSlowUntil(nb, "\r\n\r\n")); } TEST(NoncontiguousBuffer, FlattenSlowUntil3) { NoncontiguousBuffer nb; nb.Append(MakeNativeBuffer("asd4234")); nb.Append(MakeNativeBuffer("aXsdfsadfasdf2342")); ASSERT_EQ("asd4234aX", FlattenSlowUntil(nb, "aX")); ASSERT_EQ("asd4", FlattenSlowUntil(nb, "4")); ASSERT_EQ("asd4234aX", FlattenSlowUntil(nb, "4aX")); } TEST(NoncontiguousBuffer, FlattenSlowUntil4) { NoncontiguousBuffer nb; nb.Append(MakeNativeBuffer("AB")); nb.Append(MakeNativeBuffer("CDEFGGGGHHHH")); ASSERT_EQ("ABCDEFGGGG", FlattenSlowUntil(nb, "GGGG")); } TEST(NoncontiguousBufferBuilder, Append) { NoncontiguousBufferBuilder nbb; nbb.Append(MakeForeignBuffer("")); nbb.Append(MakeForeignBuffer("small")); nbb.Append(MakeForeignBuffer(std::string(8192, 'a'))); nbb.Append(CreateBufferSlow("")); nbb.Append(CreateBufferSlow("small")); nbb.Append(CreateBufferSlow(std::string(8192, 'a'))); EXPECT_EQ("small" + std::string(8192, 'a') + "small" + std::string(8192, 'a'), FlattenSlow(nbb.DestructiveGet())); } TEST(NoncontiguousBufferBuilder, Reserve) { auto temp_block = MakeNativeBufferBlock(); auto max_bytes = temp_block->size(); NoncontiguousBufferBuilder nbb; auto ptr = nbb.data(); auto ptr2 = nbb.Reserve(10); ASSERT_EQ(ptr, ptr2); ASSERT_EQ(ptr + 10, nbb.data()); nbb.Append(std::string(max_bytes - 10 - 1, 'a')); ptr = nbb.data(); ptr2 = nbb.Reserve(1); // Last byte in the block. ASSERT_EQ(ptr, ptr2); ASSERT_EQ(max_bytes, nbb.SizeAvailable()); nbb.Append(std::string(max_bytes - 1, 'a')); ptr = nbb.data(); ptr2 = nbb.Reserve(2); ASSERT_NE(ptr, ptr2); ASSERT_EQ(ptr2 + 2, nbb.data()); } TEST(NoncontiguousBufferBuilder, DestructiveGet1) { NoncontiguousBufferBuilder nbb; nbb.Append("asdf1234", 6); nbb.Append("1122", 4); ASSERT_EQ( "asdf12" "1122", FlattenSlow(nbb.DestructiveGet())); } TEST(NoncontiguousBufferBuilder, DestructiveGet2) { NoncontiguousBufferBuilder nbb; nbb.Append("aabbccd"); ASSERT_EQ("aabbccd", FlattenSlow(nbb.DestructiveGet())); } TEST(NoncontiguousBufferBuilder, DestructiveGet3) { NoncontiguousBufferBuilder nbb; nbb.Append(std::string(1000000, 'A')); ASSERT_EQ(std::string(1000000, 'A'), FlattenSlow(nbb.DestructiveGet())); } TEST(NoncontiguousBufferBuilder, DestructiveGet4) { NoncontiguousBufferBuilder nbb; nbb.Append('c'); ASSERT_EQ("c", FlattenSlow(nbb.DestructiveGet())); } TEST(NoncontiguousBufferBuilder, DestructiveGet5) { NoncontiguousBufferBuilder nbb; nbb.Append(CreateBufferSlow("c")); ASSERT_EQ("c", FlattenSlow(nbb.DestructiveGet())); } TEST(NoncontiguousBufferBuilder, DestructiveGet6) { NoncontiguousBufferBuilder nbb; nbb.Append("11"sv, "2"sv, "3"s, "45"s, "6"sv); nbb.Append("1122", 4); ASSERT_EQ("11234561122", FlattenSlow(nbb.DestructiveGet())); } TEST(MakeReferencingBuffer, Simple) { NoncontiguousBufferBuilder nbb; nbb.Append(MakeReferencingBuffer("abcdefg", 7)); EXPECT_EQ("abcdefg", FlattenSlow(nbb.DestructiveGet())); } TEST(MakeReferencingBuffer, WithCallbackSmallBufferOptimized) { int x = 0; NoncontiguousBufferBuilder nbb; nbb.Append("aaa", 3); // Small buffers are copied by `Append` and freed immediately. nbb.Append(MakeReferencingBuffer("abcdefg", 7, [&] { ++x; })); // Therefore the callback should have fired on return of `Append`. EXPECT_EQ(1, x); auto buffer = nbb.DestructiveGet(); EXPECT_EQ("aaaabcdefg", FlattenSlow(buffer)); } TEST(MakeReferencingBuffer, WithCallback) { static const std::string kBuffer(12345, 'a'); int x = 0; { NoncontiguousBufferBuilder nbb; nbb.Append("aaa", 3); nbb.Append(MakeReferencingBuffer(kBuffer.data(), 1024, [&] { ++x; })); EXPECT_EQ(0, x); auto buffer = nbb.DestructiveGet(); EXPECT_EQ(0, x); EXPECT_EQ("aaa" + kBuffer.substr(0, 1024), FlattenSlow(buffer)); } EXPECT_EQ(1, x); } TEST(MakeForeignBuffer, String) { NoncontiguousBufferBuilder nbb; nbb.Append(MakeForeignBuffer("abcdefg"s)); EXPECT_EQ("abcdefg", FlattenSlow(nbb.DestructiveGet())); } TEST(MakeForeignBuffer, VectorOfChar) { std::vector<char> data{'a', 'b', 'c', 'd', 'e', 'f', 'g'}; NoncontiguousBufferBuilder nbb; nbb.Append(MakeForeignBuffer(std::move(data))); EXPECT_EQ("abcdefg", FlattenSlow(nbb.DestructiveGet())); } TEST(MakeForeignBuffer, VectorOfBytes) { std::vector<std::byte> data; data.resize(7); memcpy(data.data(), "abcdefg", 7); NoncontiguousBufferBuilder nbb; nbb.Append(MakeForeignBuffer(std::move(data))); EXPECT_EQ("abcdefg", FlattenSlow(nbb.DestructiveGet())); } TEST(MakeForeignBuffer, VectorOfUInt8) { std::vector<std::uint8_t> data; data.resize(7); memcpy(data.data(), "abcdefg", 7); NoncontiguousBufferBuilder nbb; nbb.Append(MakeForeignBuffer(std::move(data))); EXPECT_EQ("abcdefg", FlattenSlow(nbb.DestructiveGet())); } } // namespace flare FLARE_TEST_MAIN
Would the world be a better place if everyone smoked weed? Well we won't know until it's been decriminalized...so saying it won't isn't accurate or true either. As mentioned, it has made a difference where it has been. Time will tell, because it will -at the very least- be decriminalized here. It should be mandatory to even try it. Then, if you don't like it, don't use it. It should also be legalized world-wide.
package space.nyuki.questionnaire.service; import org.apache.shiro.authz.AuthorizationInfo; import org.apache.shiro.authz.SimpleAuthorizationInfo; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.data.mongodb.core.MongoTemplate; import org.springframework.data.mongodb.core.query.Criteria; import org.springframework.data.mongodb.core.query.Query; import org.springframework.data.mongodb.core.query.Update; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import space.nyuki.questionnaire.exception.UserExistsException; import space.nyuki.questionnaire.pojo.User; import space.nyuki.questionnaire.utils.MD5Util; import space.nyuki.questionnaire.utils.MapUtil; import java.util.Date; import java.util.List; import java.util.Map; import java.util.Objects; @Service public class UserService { @Autowired private MongoTemplate mongoTemplate; @Transactional public void createUser(User user) { User userByUserName = getUserByUserName(user.getUsername()); if (Objects.nonNull(userByUserName)) { throw new UserExistsException(); } user.setCreatedTime(new Date()); user.setPasswd(MD5Util.saltMd5(user.getPasswd())); mongoTemplate.save(user); } @Transactional public void deleteUser(String id) { Update update = new Update(); update.set("is_delete", 1); mongoTemplate.findAndModify( Query.query(Criteria.where("_id").is(id)), update, User.class ); } @Transactional public void updateUser(User user) { String id = user.getId(); Map<String, Object> stringObjectMap = MapUtil.objectToMap(user); Update update = new Update(); String passwd = (String) stringObjectMap.get("passwd"); if (Objects.nonNull(passwd)) { stringObjectMap.put("passwd", MD5Util.saltMd5(passwd)); } stringObjectMap.forEach(update::set); mongoTemplate.findAndModify( Query.query(Criteria.where("_id").is(id)), update, User.class ); } @Transactional public User getLoginInfo(String username) { User user = this.getUserByUserName(username); Update update = new Update(); update.set("last_login", new Date()); mongoTemplate.findAndModify( Query.query(Criteria.where("_id").is(user.getId())), update, User.class ); return user; } public List<User> getUsers() { return mongoTemplate.find( Query.query(Criteria.where("is_delete").is(0)), User.class ); } public User getUserByUserName(String username) { return mongoTemplate.findOne( Query.query(Criteria.where("username").is(username).and("is_delete").is(0)), User.class); } /** * 获取用户权限 * * @param username * @return */ public AuthorizationInfo getAuthorizationInfo(String username) { User user = this.getUserByUserName(username); SimpleAuthorizationInfo simpleAuthorizationInfo = new SimpleAuthorizationInfo(); MapUtil.objectToMap(user.getPermission()).forEach((k, v) -> { simpleAuthorizationInfo.addStringPermission(k+":"+v); }); return simpleAuthorizationInfo; } public User getUserById(String id) { return mongoTemplate.findOne(Query.query(Criteria.where("_id").is(id)), User.class); } }
A Performance Evaluation of Load Balancing and QoS-aware Gateway Discovery Protocol for VANETs Recently we have witnessed a growing interest in Vehicular Ad hoc Networks from the ITS community. Several potential applications of Vehicular Networks aiming at integrating service discovery mechanisms and the latest Internet access technologies are envisioned. However, several challenging issues, such as gateway discovery among others, remain to be resolved before VANET technology becomes a commodity. Indeed, in this case, it is very essential to provide clients with gateway services that better suit their requests while balancing the load on the gateways, in order to reduce the gateways congestion problem. In this paper, we propose to evaluate the performance of a load balanced and introduce a new QoS gateway discovery protocol (Collaged) that permits the connection to heterogeneous wireless networks. The protocol guarantees balancing the load at the gateways levels, as well as the routing paths between gateways and gateway requesters. It also permits gateway requesters to choose and connect to the gateways that satisfy the QoS required by the vehicles. We present our protocol, and discuss its performance evaluation using an extensive set of simulation experiments. Our results indicate that significant improvement were achieved using QoSLAGAD protocol when compared to LAGAD while guaranteeing QoS and balancing the gateways' load.
Kentucky Sen. Rand Paul criticized President Obama on Sunday for his administration's repeated dire warnings over the approaching debt limit deadline, saying “It's irresponsible of the president and his men to even talk about default" and insisting there is no chance the government will miss a payment on its debt. Appearing on NBC's "Meet the Press," Paul noted that the government takes in more in revenue each month than it pays out in interest on Treasury bonds, and that President Obama has not acted on a bill passed by House Republicans that would have instructed the Treasury to prioritize payments on the debt over other government obligations, ensuring that Treasury bonds will not fail. "This is kind of of like closing the World War II Memorial," Paul said of the Obama administration. "They all get out on TV and they say, 'oh, we're going to default.' They're the ones scaring the marketplace." But when asked by host Savannah Guthrie if a delay in other government payments other than the debt could also harm the economy, Paul avoided answering the question directly. Instead, he mentioned that the 2011 debt ceiling standoff resulted in a credit downgrade for U.S. debt because the ratings agency Standard & Poor's claimed the resulting deficit deal did not do enough to lower the debt. In the interview, Paul also blamed the president for the ongoing government shutdown, claiming that the House GOP's approach of passing small funding bills for different government functions is the same or even better than voting for a larger bill that would fund the government with no strings attached. Passing appropriations bills to fund the NIH, veterans' benefits, and other programs is "actually a much better way to run government," said Paul, "because right now you're sticking everything into one bill, and that's why the leverage of shutting the government down occurs. But if you did things appropriately and you passed appropriation bills one at a time, no one would be able to shut down government ever"
This week, we almost had a Cheap Eat$ disaster. We were on our way to hunt out a restaurant for the column when Katrina mentioned she really needed to eat something soon, else she was going to faint. I stepped on the gas so fast, I almost ran a red light. I wish I had some flashing lights and a siren to affix to my vehicle in times of need like that. Therefore, we went to the first place we could think of that was cheap enough: the Avalon Diner. The Avalon Diner offers great diner fare at Cheap Eat$ prices. Besides, burgers and the like always sound good to me. Upon arriving, Jorg mentioned he was quite famished as well. "You know you're really hungry when you go to a diner and everything (on the menu) looks good," he said. On the menu were the diner usuals -- burgers, sandwiches and fried dishes -- and some not-so-usual plates. "Hey, they have Vienna sausages. It's like diner Beanie Weenie," Jorg said. He ended up getting the Chicken Fried Chicken Breast ($5.75), which came with fries or mashed potatoes (potatoes picked here), salad and Texas toast. "The chicken was juicy and well-spiced," Jorg said. The plate also came with a rather large cup of white gravy on the side for the diner to administer to the food, so much that "you could swim in it," according to Jorg. He then related a story to us about a person he knew from Montreal that tried chicken fried steak once and complained about the lard on top of it. "I can't imagine anybody who doesn't like cream gravy -- that's un-American," Jorg said. "Let me say that the gravy was a perfect complement to the chicken," he added. Katrina ordered one of the day's specials, Chicken and Dumplings ($5.95). Along with some dinner rolls, she picked sides of mashed potatoes, collared greens and okra to accompany it. "My blood sugar is going up," Katrina mentioned after a few bites. She looked much relieved now that she wasn't going to faint. I had a hard time deciding what to order on this particular day. I knew the burgers were tasty, having eaten there before, but I thought maybe I should try something different. I ended up getting a burger. But it wasn't just a burger. It was a Mushroom Cheesburger ($3.95), a beefy treat topped with cheese (of course) lettuce, tomato, pickles, onions, mustard and mayonnaise on a lightly toasted bun. I'm pleased with a burger when the meat isn't pink and the veggies are crisp, so this burger met my qualifications. It wasn't nearly as big or juicy as the one I had at Lankford Grocery, but not every burger can taste like theirs. Although I felt full enough not to need to eat again for a while, I urged my Cheap Eat$ buddies to order some shakes for dessert. "I wouldn't think of dining here without having a shake," Jorg said. So we ordered a round of chocolate ones ($2.95 each, any flavor) from our waitress, Velma. Thick and milky, I thought I died and went to shake heaven. There's nothing like a good rush of cocoa to your brain to complete your day (well, OK, your meal). Also on the menu is a variety of menu items, available all day. Included are eggs and omlettes, french toast, waffles and pancakes. A children's menu for breakfast and lunch is also provided. Total cost: $8.84 per person, but don't forget that we had dessert. Without it, the average would be less than $6. Either way, it's a good way to fill your tummy. Besides, you can enjoy viewing plenty of photos from Houston's earlier days, prominently displayed near the booths. My favorite is the one of the old Alabama Theater (which is now a Bookstop). So belly up to the counter or grab a booth for some good cookin'. And just smile politely when the wait staff refers to you as "baby."
/*! \file bcmltx_port_vlan_xlate_icfi_mapped.c * * PORT_VLAN_XLATE.ICFI_MAPPED Transform Handler * * This file contains field transform information for PORT_VLAN_XLATE.ICFI_MAPPED. * * Element 'N' of PORT_VLAN_XLATE.ICFI_MAPPED array corresponds to the field * arg->rfield[N](forward transform), arg->field[N](reverse transform). * PORT_VLAN_XLATE.ICFI_MAPPED index should be * between 0 and arg->rfields - 1 (forward transform) * between 0 and arg->fields - 1 (reverse transform). */ /* * This license is set out in https://raw.githubusercontent.com/Broadcom-Network-Switching-Software/OpenBCM/master/Legal/LICENSE file. * * Copyright 2007-2020 Broadcom Inc. All rights reserved. */ #include <shr/shr_debug.h> #include <bsl/bsl.h> #include <bcmltd/chip/bcmltd_id.h> #include <bcmltx/bcmtnl/bcmltx_tnl_mpls_policy_sel.h> #define BSL_LOG_MODULE BSL_LS_BCMLTX_TNL /* * \brief TNL_DEFAULT_POLICY.POLICY_CMD_SELECT/POLICY_OBJ_SELECT array forward transform * * \param [in] unit Unit number. * \param [in] in PORT_VLAN_XLATE.ICFI_MAPPED field array. * \param [out] out PORT_TAB.ICFI_0_MAPPING/ICFI_1_MAPPING fields. * \param [in] arg Handler arguments. * * Transform direction is logical to physical. * * \retval SHR_E_NONE OK * \retval !SHR_E_NONE ERROR */ int bcmltx_tnl_mpls_policy_sel_transform(int unit, const bcmltd_fields_t *in, bcmltd_fields_t *out, const bcmltd_transform_arg_t *arg) { size_t i; /* logical in field index */ uint32_t cfi_idx; int rv = SHR_E_NONE; SHR_FUNC_ENTER(unit); LOG_VERBOSE(BSL_LOG_MODULE, (BSL_META_U(unit, "\t bcmltx_tnl_mpls_policy_sel_transform\n"))); out->count = 0; for (i = 0; i < in->count; i++) { cfi_idx = in->field[i]->idx; out->field[out->count]->id = arg->rfield[cfi_idx]; out->field[out->count]->data = in->field[i]->data; out->count++; } SHR_ERR_EXIT(rv); exit: SHR_FUNC_EXIT(); } /* * \brief TNL_DEFAULT_POLICY.POLICY_CMD_SELECT/POLICY_OBJ_SELECT array reverse transform * * \param [in] unit Unit number. * \param [in] in PORT_TAB/ICFI_0_MAPPING/ICFI_1_MAPPING fields. * \param [out] out PORT_VLAN_XLATE.ICFI_MAPPED field array. * \param [in] arg Handler arguments. * * Transform direction is physical to logical. * * \retval SHR_E_NONE OK * \retval !SHR_E_NONE ERROR */ int bcmltx_tnl_mpls_policy_sel_rev_transform(int unit, const bcmltd_fields_t *in, bcmltd_fields_t *out, const bcmltd_transform_arg_t *arg) { size_t idx; int rv = SHR_E_NONE; SHR_FUNC_ENTER(unit); LOG_VERBOSE(BSL_LOG_MODULE, (BSL_META_U(unit, "\t bcmltx_tnl_mpls_policy_sel_rev_transform\n"))); for (idx = out->count = 0; idx < in->count; idx++) { if (idx < arg->fields) { out->field[out->count]->id = arg->rfield[0]; out->field[out->count]->idx = idx; out->field[out->count]->data = in->field[idx]->data; out->count++; } else { rv = SHR_E_FAIL; break; } } SHR_ERR_EXIT(rv); exit: SHR_FUNC_EXIT(); }
Update operations : a In this paper we review several knowledge base update operations that have recently been proposed in order to handle disjunctive updates and updates under integrity constraints. Updating with disjunctive information is problematic for minimal change semantics, in particular for Winslett's Possible Models Approach (PMA). Zhang and Foo have recently deened the MCD semantics to overcome this drawback. In this paper we show that the MCD is still problematic, and propose a correction. Then we address a second important problem plaguing the PMA and its variations, viz. the handling of integrity constraints. We investigate a dependence-based semantics, which Liberatore has shown to be coNP-complete. We give a simple automated deduction method, and show that integrity constraints as well as dis-junctive updates are treated correctly in that semantics, and discuss the Katsuno-Mendelzon postulates.
/** * Initialize all the values * * @param ss SignalStrength as ASU value * @param ber is Bit Error Rate * * @hide */ public void initialize(int ss, int ber) { mSignalStrength = ss; mBitErrorRate = ber; mTimingAdvance = Integer.MAX_VALUE; }
package org.embulk.output.orc; import com.amazonaws.auth.AWSCredentials; import com.google.common.base.Throwables; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; import org.apache.hadoop.util.VersionInfo; import org.apache.orc.CompressionKind; import org.apache.orc.OrcFile; import org.apache.orc.TypeDescription; import org.apache.orc.Writer; import org.embulk.config.ConfigDiff; import org.embulk.config.ConfigSource; import org.embulk.config.TaskReport; import org.embulk.config.TaskSource; import org.embulk.spi.Column; import org.embulk.spi.Exec; import org.embulk.spi.OutputPlugin; import org.embulk.spi.Page; import org.embulk.spi.PageReader; import org.embulk.spi.Schema; import org.embulk.spi.TransactionalPageOutput; import org.embulk.spi.time.TimestampFormatter; import org.embulk.spi.type.Type; import org.embulk.spi.util.Timestamps; import org.embulk.util.aws.credentials.AwsCredentials; import java.io.IOException; import java.util.List; public class OrcOutputPlugin implements OutputPlugin { @Override public ConfigDiff transaction(ConfigSource config, Schema schema, int taskCount, OutputPlugin.Control control) { PluginTask task = config.loadConfig(PluginTask.class); // retryable (idempotent) output: // return resume(task.dump(), schema, taskCount, control); // non-retryable (non-idempotent) output: control.run(task.dump()); return Exec.newConfigDiff(); } @Override public ConfigDiff resume(TaskSource taskSource, Schema schema, int taskCount, OutputPlugin.Control control) { throw new UnsupportedOperationException("orc output plugin does not support resuming"); } @Override public void cleanup(TaskSource taskSource, Schema schema, int taskCount, List<TaskReport> successTaskReports) { } @Override public TransactionalPageOutput open(TaskSource taskSource, Schema schema, int taskIndex) { PluginTask task = taskSource.loadTask(PluginTask.class); if (task.getOverwrite()) { AWSCredentials credentials = AwsCredentials.getAWSCredentialsProvider(task).getCredentials(); OrcOutputPluginHelper.removeOldFile(buildPath(task, taskIndex), task); } final PageReader reader = new PageReader(schema); Writer writer = createWriter(task, schema, taskIndex); return new OrcTransactionalPageOutput(reader, writer, task); } private String buildPath(PluginTask task, int processorIndex) { final String pathPrefix = task.getPathPrefix(); final String pathSuffix = task.getFileNameExtension(); final String sequenceFormat = task.getSequenceFormat(); return pathPrefix + String.format(sequenceFormat, processorIndex) + pathSuffix; } private TypeDescription getSchema(Schema schema) { TypeDescription oschema = TypeDescription.createStruct(); for (int i = 0; i < schema.size(); i++) { Column column = schema.getColumn(i); Type type = column.getType(); switch (type.getName()) { case "long": oschema.addField(column.getName(), TypeDescription.createLong()); break; case "double": oschema.addField(column.getName(), TypeDescription.createDouble()); break; case "boolean": oschema.addField(column.getName(), TypeDescription.createBoolean()); break; case "string": oschema.addField(column.getName(), TypeDescription.createString()); break; case "timestamp": oschema.addField(column.getName(), TypeDescription.createTimestamp()); break; default: System.out.println("Unsupported type"); break; } } return oschema; } private Configuration getHadoopConfiguration(PluginTask task) { Configuration conf = new Configuration(); // see: https://stackoverflow.com/questions/17265002/hadoop-no-filesystem-for-scheme-file conf.set("fs.hdfs.impl", DistributedFileSystem.class.getName()); conf.set("fs.file.impl", LocalFileSystem.class.getName()); // see: https://stackoverflow.com/questions/20833444/how-to-set-objects-in-hadoop-configuration AwsCredentials.getAWSCredentialsProvider(task); if (task.getAccessKeyId().isPresent()) { conf.set("fs.s3a.access.key", task.getAccessKeyId().get()); conf.set("fs.s3n.awsAccessKeyId", task.getAccessKeyId().get()); } if (task.getSecretAccessKey().isPresent()) { conf.set("fs.s3a.secret.key", task.getSecretAccessKey().get()); conf.set("fs.s3n.awsSecretAccessKey", task.getSecretAccessKey().get()); } if (task.getEndpoint().isPresent()) { conf.set("fs.s3a.endpoint", task.getEndpoint().get()); conf.set("fs.s3n.endpoint", task.getEndpoint().get()); } return conf; } private Writer createWriter(PluginTask task, Schema schema, int processorIndex) { final TimestampFormatter[] timestampFormatters = Timestamps .newTimestampColumnFormatters(task, schema, task.getColumnOptions()); Configuration conf = getHadoopConfiguration(task); TypeDescription oschema = getSchema(schema); // see: https://groups.google.com/forum/#!topic/vertx/lLb-slzpWVg Thread.currentThread().setContextClassLoader(VersionInfo.class.getClassLoader()); Writer writer = null; try { // Make writerOptions OrcFile.WriterOptions writerOptions = createWriterOptions(task, conf); // see: https://stackoverflow.com/questions/9256733/how-to-connect-hive-in-ireport // see: https://community.hortonworks.com/content/kbentry/73458/connecting-dbvisualizer-and-datagrip-to-hive-with.html writer = OrcFile.createWriter( new Path(buildPath(task, processorIndex)), writerOptions.setSchema(oschema) .version(OrcFile.Version.V_0_12) ); } catch (IOException e) { Throwables.propagate(e); } return writer; } private OrcFile.WriterOptions createWriterOptions(PluginTask task, Configuration conf) { final Integer bufferSize = task.getBufferSize(); final Integer stripSize = task.getStripSize(); final Integer blockSize = task.getBlockSize(); final String kindString = task.getCompressionKind(); CompressionKind kind = CompressionKind.valueOf(kindString); return OrcFile.writerOptions(conf) .bufferSize(bufferSize) .blockSize(blockSize) .stripeSize(stripSize) .compress(kind); } class OrcTransactionalPageOutput implements TransactionalPageOutput { private final PageReader reader; private final Writer writer; public OrcTransactionalPageOutput(PageReader reader, Writer writer, PluginTask task) { this.reader = reader; this.writer = writer; } @Override public void add(Page page) { synchronized (this) { try { // int size = page.getStringReferences().size(); final TypeDescription schema = getSchema(reader.getSchema()); final VectorizedRowBatch batch = schema.createRowBatch(); // batch.size = size; reader.setPage(page); while (reader.nextRecord()) { final int row = batch.size++; reader.getSchema().visitColumns( new OrcColumnVisitor(reader, batch, row) ); if (batch.size >= batch.getMaxSize()) { writer.addRowBatch(batch); batch.reset(); } } if (batch.size != 0) { writer.addRowBatch(batch); batch.reset(); } } catch (IOException e) { e.printStackTrace(); } } } @Override public void finish() { try { writer.close(); } catch (IOException e) { Throwables.propagate(e); } } @Override public void close() { } @Override public void abort() { } @Override public TaskReport commit() { return Exec.newTaskReport(); } } }
def create(party_id): party = _get_party_or_404(party_id) contest = bungalow_contest_service.find_contest_by_party_id(party.id) if contest is not None: flash_error('A contest already exists for this party.') return redirect_to('.view', party_id=party.id) bungalow_contest_service.create_contest(party.id, ATTRIBUTE_TITLES) flash_success('Der Bungalow-Contest für diese Party wurde angelegt.') return redirect_to('.view', party_id=party.id)
// SetFile Setting log file output func (log *Logger) SetFile(filepath string, archive ...bool) { log.DisableConsoleColor() logArchive := len(archive) > 0 && archive[0] if logArchive { c := cron.New() _, _ = c.Add("0 0 * * *", func() { log.setLogfile(filepath, logArchive) }) c.Run() } log.setLogfile(filepath, logArchive) }
package tqs.domus.restapi.exception; import org.springframework.http.HttpStatus; import org.springframework.web.bind.annotation.ResponseStatus; /** * @author <NAME> * @date 06/mai/2020 * @time 14:13 */ @ResponseStatus(value = HttpStatus.NOT_FOUND) public class ResourceNotFoundException extends Exception { public ResourceNotFoundException(String message) { super(message); } }
Every time a Kangaroo farts, a little less greenhouse gas is produced in the world. That bit of intel comes from biologist Adam Munn, of the University of Wollongong, who spent several months stalkin’ roos and catching their farts to learn of their low methane levels, which could help farmers in the fight against climate change. The hope is that Adam’s research, published in The Journal Of Experimental Biology, will lead to the introduction of a carbon pricing scheme that gives concessions to farmers with fewer ‘methane-intense’ animals on their property, or more ‘roos, thus offering an incentive to reduce their emissions. That’d be pretty friggin’ great, because a report earlier this year found the agriculture / farming industry accounts for 15% of Australia’s total greenhouse gas output. Back to the bottling of flatulence: how does one go about that, exactly? Well, you find yourself a pack of kangaroos (red and western ones were used in this study) and put them in comfy, sealed chambers; then you analyse the gases going in and out of the chamber for variance. Less gas = happy planet = healthy humans. Via news.com.au. Lead image Via Getty.
Study of US Veterans Points to a Connection Between Late-Onset Seizures and Dementia are also concerned that the practice expenses component is at risk. This may comprise as much as 40 percent of total reimbursement on some E/M visits, Dr. Cohen estimated. Some payers are already questioning the need to pay those, despite the regulations that mandate reimbursement parity during the COVID-19 crisis, he said. Dr. Cohen also fears that new limits may be placed on the number of office face-to-face visits within a specific period of time, and that we may start seeing changes from payers that could channel at least some out-patient visits to telemedicine. As Dr. Cohen contemplates a worstcase financial scenario, he believes that the ramifications for private practices are as equally grave for academic and hospital-based departments, which rely on practice expense RVUs. Our employers are likely to place restrictions on the number of staff we can hire and even reduce our salaries due to the impact on the bottom line, Dr. Cohen said. Physicians may not be immune from the economic impact, he predicted, The only businesses that may benefit are telemedicine companies, but even this market will shrink from competition by academic centers which have expanded and escalated their telehealth services, and will now be able to fill this void, Dr. Cohen explained. And physicians who are already employed by telehealth entities may see their salaries decline due to the flood of new entrants to this business model, he added. nt
(Reuters) — Chinese e-commerce company Alibaba would launch a product mimicking Amazon.com Inc’s “Echo” next week, according to a source familiar with the matter. “Amazon Echo”, launched in 2014, is a speaker which one can leave on all day and give voice commands to, similar to Siri on an Apple Inc iPhone. Alibaba’s new product would be made available only in China and speak only Mandarin, the source told Reuters. Apple and Google, a unit of Alphabet Inc, have unveiled products similar to Echo with the HomePod and Google Home. The Information, a technology website, was the first to report the news on Thursday. Alibaba did not immediately respond to a request for comment, outside business hours. (Reporting by Aishwarya Venugopal in Bengaluru and Peter Henderson in San Francisco; Editing by Shounak Dasgupta)
Effect of bogie fairings on the flow behaviours and aerodynamic performance of a high-speed train ABSTRACT This work presents a numerical investigation of unsteady aerodynamic performance of a high-speed train with different configurations of bogie fairings using an improved delayed detached-eddy simulation at Re=1.85106. The accuracy of the numerical method has been validated against experimental results. According to the analysis of numerical results, the influences of bogie fairings on the aerodynamic drag, underbody flow features, and velocity and pressure distributions in the wake have been elucidated. The results show that the full-size bogie fairings are found to decrease the boundary layer thickness, lower the underneath flow velocity in the ground clearance and weaken the flow impact on the rear plates of bogie cavities as compared to the existing bogie fairings. Furthermore, the full-size fairings increase the pressure distribution in the near wake region, thereby contributing to a lower pressure drag force. As a result, all these changes in this train flow bring to approximate 12.5% drag reduction for a three-car train model.
Twin Valu Twin Valu is a defunct U.S. hypermarket and was one of the first hypermarkets in the Akron, Ohio, area. The original Twin Valu store was located on Howe Avenue in Cuyahoga Falls, Ohio. The $10 million, 180,000-square-foot (17,000 m²) store was open 24 hours a day, 7 days a week. It boasted 52 checkout lanes, a full bakery, deli, meat department, smokehouse, food court, floral department, jewelry department, electronics department, photo department, vision center, and pharmacy in addition to groceries, clothing, housewares, and general merchandise. A Star Bank branch location was added later. The hypermarket opened to mass crowds and union pickets. Members of the UFCW Local 880 picketed the nonunion store. The Cuyahoga Falls store opened on February 26, 1989, and closed on March 1, 1995. The building was sold and converted into a Target and Best Buy location. A second location was opened in Euclid, Ohio, in 1990 and closed in 1995. The building became a Super Kmart around 1996, then closed in 2003. The building was briefly reopened as the Great Lakes Expo Center in 2009, which stole the Home and Garden Show from the IX Center in 2010. The concept failed and closed in 2011. Grocery-only locations were in Maple Heights, Ohio, and Belden Village (Jackson Township, Ohio; the latter, previously a Zayre location (now home to Burlington Coat Factory) closed on January 16, 1996. Twin Valu was a division of Supervalu Inc and Shopko Stores Inc. Television commercials for Twin Valu featured Phil Hartman as the spokesman.
/* Copyright © 2019 InfraQL <EMAIL> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cmd import ( "bytes" "io" "os" "github.com/spf13/cobra" "infraql/internal/iql/driver" "infraql/internal/iql/entryutil" "infraql/internal/iql/handler" "infraql/internal/iql/iqlerror" "infraql/internal/iql/writer" ) // execCmd represents the exec command var execCmd = &cobra.Command{ Use: "exec", Short: "Run one or more InfraQL commands or queries", Long: `Run one or more InfraQL commands or queries from the command line or from an input file. For example: infraql exec \ "select id, name from compute.instances where project = 'infraql-demo' and zone = 'australia-southeast1-a'" \ --keyfilepath /mnt/c/tmp/infraql-demo.json --output csv infraql exec -i iqlscripts/listinstances.iql --keyfilepath /mnt/c/tmp/infraql-demo.json --output json infraql exec -i iqlscripts/create-disk.iql --keyfilepath /mnt/c/tmp/infraql-demo.json `, Run: func(cmd *cobra.Command, args []string) { var err error var rdr io.Reader switch runtimeCtx.InfilePath { case "stdin": if len(args) == 0 || args[0] == "" { cmd.Help() os.Exit(0) } rdr = bytes.NewReader([]byte(args[0])) default: rdr, err = os.Open(runtimeCtx.InfilePath) iqlerror.PrintErrorAndExitOneIfError(err) } sqlEngine, err := entryutil.BuildSQLEngine(runtimeCtx) iqlerror.PrintErrorAndExitOneIfError(err) handlerCtx, err := entryutil.BuildHandlerContext(runtimeCtx, rdr, queryCache, sqlEngine) iqlerror.PrintErrorAndExitOneIfError(err) iqlerror.PrintErrorAndExitOneIfNil(&handlerCtx, "Handler context error") RunCommand(&handlerCtx, nil, nil) }, } func getOutputFile(filename string) (*os.File, error) { switch filename { case "stdout": return os.Stdout, nil case "stderr": return os.Stderr, nil default: return os.Create(filename) } } func RunCommand(handlerCtx *handler.HandlerContext, outfile io.Writer, outErrFile io.Writer) { if outfile == nil { outfile, _ = getOutputFile(handlerCtx.RuntimeContext.OutfilePath) } if outErrFile == nil { outErrFile, _ = getOutputFile(writer.StdErrStr) } handlerCtx.Outfile = outfile handlerCtx.OutErrFile = outErrFile if handlerCtx.RuntimeContext.DryRunFlag { driver.ProcessDryRun(handlerCtx) return } driver.ProcessQuery(handlerCtx) }
Shane Whitfield can do just about anything on the basketball court. He can score, drive, shoot, pass, defend and rebound. The Pamlico native, who finished high school at Word of God in Raleigh, is showing he can do all of that at the college level. Whitfield, a freshman at Lehigh University, played his best college game on Sunday � netting nine points and grabbing six rebounds in the Mountain Hawks� 76-64 win over Sacred Heart. �My role is to come in the game, provide energy and utilize my versatility on the court by defending, scoring, passing and helping my team get wins,� Whitfield said. The 6-foot-7 forward made three of his four field goal attempts on Sunday, but was 3-for-6 from the free throw line. With 4:31 remaining in the first half, Whitfield stole the ball and pushed it into a layup while getting fouled. In the second half, he ripped down a rebound and finished with a layup on the other end. Whitfield has played in all eight games, starting one, and averaging 3.5 points and 2.5 boards per game. He�s played at least 21 minutes in four games. �My expectations are to do anything I can to help my team get a victory,� he said. �I�m just going to go out there and playing hard every second of every game. Lehigh�s non-conference schedule has taken Whitfield to big-college opponents Minnesota, Houston and Pittsburgh. He racked up seven points against Houston and six against Pitt. Through eight games, the Mountain Hawks are 3-5 with wins over Rider, Texas Southern and Sacred Heart. Lehigh is just two years removed from its NCAA Tournament season, a team that stunned Duke in the first round. This year�s squad has just two seniors mixed in with 10 freshmen and sophomores. Despite the youth, Whitfield believes the Mountain Hawks, playing their home games in Bethlehem, Pa., has talent. �We have a great group of young guys,� Whitfield said. �We are good enough to win the Patriot League and the Patriot League Tournament, and hopefully get into the NCAA Tournament.
<filename>src/graphics/backend_gfx/texture.rs use image; use gfx::format::{ChannelTyped, SurfaceTyped}; use gfx_core::factory::Factory; use gfx_device_gl as gl; use super::format::{Channel, Surface}; use super::types::{RawTexture, ShaderResource, TargetView}; use crate::graphics::vector::Vector; use crate::graphics::Transformation; #[derive(Clone, Debug)] pub struct Texture { texture: RawTexture, view: ShaderResource, width: u16, height: u16, layers: u16, } impl Texture { pub(super) fn new( factory: &mut gl::Factory, image: &image::DynamicImage, ) -> Texture { let rgba = image.to_rgba(); let width = rgba.width() as u16; let height = rgba.height() as u16; let (texture, view) = create_texture_array( factory, width, height, Some(&[&rgba]), gfx::memory::Bind::SHADER_RESOURCE | gfx::memory::Bind::TRANSFER_SRC, ); Texture { texture, view, width, height, layers: 1, } } pub(super) fn new_array( factory: &mut gl::Factory, layers: &[image::DynamicImage], ) -> Texture { let first_layer = &layers[0].to_rgba(); let width = first_layer.width() as u16; let height = first_layer.height() as u16; let rgba: Vec<Vec<u8>> = layers.iter().map(|i| i.to_rgba().into_raw()).collect(); let raw_layers: Vec<&[u8]> = rgba.iter().map(|i| &i[..]).collect(); let (texture, view) = create_texture_array( factory, width, height, Some(&raw_layers[..]), gfx::memory::Bind::SHADER_RESOURCE | gfx::memory::Bind::TRANSFER_SRC, ); Texture { texture, view, width, height, layers: layers.len() as u16, } } pub(super) fn handle(&self) -> &RawTexture { &self.texture } pub(super) fn view(&self) -> &ShaderResource { &self.view } pub fn width(&self) -> u16 { self.width } pub fn height(&self) -> u16 { self.height } } #[derive(Clone)] pub struct Drawable { texture: Texture, target: TargetView, } impl Drawable { pub fn new(factory: &mut gl::Factory, width: u16, height: u16) -> Drawable { let (texture, view) = create_texture_array( factory, width, height, None, gfx::memory::Bind::SHADER_RESOURCE | gfx::memory::Bind::RENDER_TARGET, ); let texture = Texture { texture, view, width, height, layers: 1, }; let render_desc = gfx::texture::RenderDesc { channel: Channel::get_channel_type(), level: 0, layer: Some(0), }; let target = factory .view_texture_as_render_target_raw(texture.handle(), render_desc) .expect("View texture as render target"); Drawable { texture, target } } pub fn texture(&self) -> &Texture { &self.texture } pub fn target(&self) -> &TargetView { &self.target } pub fn render_transformation() -> Transformation { Transformation::nonuniform_scale(Vector::new(1.0, -1.0)) } } // Helpers fn create_texture_array( factory: &mut gl::Factory, width: u16, height: u16, layers: Option<&[&[u8]]>, bind: gfx::memory::Bind, ) -> (RawTexture, ShaderResource) { let kind = gfx::texture::Kind::D2Array( width, height, layers.map(|l| l.len()).unwrap_or(1) as u16, gfx::texture::AaMode::Single, ); let info = gfx::texture::Info { kind: kind, levels: 1, format: Surface::get_surface_type(), bind: bind, usage: gfx::memory::Usage::Data, }; let channel_type = Channel::get_channel_type(); let texture = factory .create_texture_raw( info, Some(channel_type), layers.map(|l| (l, gfx::texture::Mipmap::Provided)), ) .expect("Texture array creation"); let descriptor = gfx::texture::ResourceDesc { channel: channel_type, layer: None, min: 0, max: texture.get_info().levels - 1, swizzle: gfx::format::Swizzle::new(), }; let view = factory .view_texture_as_shader_resource_raw(&texture, descriptor) .expect("View texture as a shader resource"); let typed_view: gfx::handle::ShaderResourceView< _, <gfx::format::Srgba8 as gfx::format::Formatted>::View, > = gfx::memory::Typed::new(view); (texture, typed_view) }
If you've been craving a fix of the CBC original podcast, On Drugs, the wait is nearly over. Season 2 launches February 13. This is a big year for drugs in Canada. The opioid overdose crisis is still growing, according to the latest numbers, and later this year, Canada will become the first major industrialized nation to legalize marijuana for recreational use. Season 2 of On Drugs will take deep dives into both of those subjects, but there's a whole lot more in store as well. Host Geoff Turner will explore the fascinating history of the world's most widely used psychoactive drug: caffeine. We'll learn about the sometimes harrowing world of clinical drug trials. We'll take you behind the bars of a Canadian prison to learn how drugs, crime and punishment intersect. Season 2 leaps straight into the deep end in the first episode — it's an examination of ways people have used drugs, especially psychedelics like ayahuasca and LSD, in the pursuit of spiritual enlightenment. You don't want to miss a moment, so make sure you've subscribed through Apple Music, or wherever you get your favourite podcasts.
Calculation of the Time-Domain Transmission Coefficient of the Layered Lossy Media In this paper, a new method for calculating the time-domain transmission wave from the layered lossy media is presented. The time-domain transmission coefficients of layered lossy medium are obtained by the time-domain recurrence formula, and the time-domain interfacial transmission coefficients are obtained by the time-domain propagating mode method. The numerical results show the accuracy and validity of the proposed method.
/** * Simple XML to JSON converter. * * <p> * This uses the default <a href="http://www.json.org/java/index.html">json.org implementation</a> to convert between JSON and XML. * When converting <strong>to XML</strong>, it will add a root element called {@code json} (this is configurable via * {@link #setJsonTag(String)}) as the required XML root element. When converting <strong>from XML</strong> then it expects the same * tag as the root element of the XML. * </p> * <p> * If your input is a relatively JSON object, then this is the transformation driver to use; {@link DefaultJsonTransformationDriver} * adds a layer of XML complexity that you may not need. The key differentiator is that where the output <strong>should be</strong> * a JSON array with a single element; it will not be supported by this driver implementation. You can still use it, but you will * have to execute a {@link JsonTransformService} afterwards to change the cardinality. * </p> * <p> * If the input is a JSON array, then {@code json-array} (not configurable) will be added as the root element, that wraps the JSON * array. * </p> * * @config simple-transformation-driver * * @author gdries */ @XStreamAlias("simple-transformation-driver") @ComponentProfile(summary = "Simple JSON/XML Transformation driver", tag = "json,xml,transformation") public class SimpleJsonTransformationDriver implements TransformationDriver { private static final String ELEMENT_NAME_ARRAY = "array-item"; private static final String ELEMENT_NAME_JSON = "json"; private transient Logger log = LoggerFactory.getLogger(this.getClass()); private String jsonTag = ELEMENT_NAME_JSON; /** * {@inheritDoc}. */ @Override public String transform(final String input, final TransformationDirection direction) throws ServiceException { switch (direction) { case JSON_TO_XML: return jsonToXML(input); case XML_TO_JSON: return xmlToJSON(input); default: throw new IllegalArgumentException("direction"); } } private String xmlToJSON(final String input) throws ServiceException { try { final JSONObject xmlJsonObject = XML.toJSONObject(input); final JSONObject tagJsonObject = xmlJsonObject.getJSONObject(jsonTag); return tagJsonObject.toString(); } catch (final JSONException e) { throw new ServiceException("Exception while converting XML to JSON", e); } } private String jsonToXML(final String input) throws ServiceException { try { final JSONObject jsonObject = toJSONObject(input); return XML.toString(jsonObject, jsonTag); } catch (final JSONException e) { throw new ServiceException("Exception while converting JSON to XML", e); } } private JSONObject toJSONObject(final String input) { JSONObject result = null; try { result = new JSONObject(new JSONTokener(input)); } catch (final JSONException e) { log.debug("Exception [{}], attempting re-process as JSON Array", e.getMessage()); result = new JSONObject(); result.put(ELEMENT_NAME_ARRAY, new JSONArray(new JSONTokener(input))); } return result; } /** * Get the JSON tag. * * @return The JSON tag. */ public String getJsonTag() { return jsonTag; } /** * Set the JSON tag. * * @param jsonTag * The JSON tag. */ public void setJsonTag(final String jsonTag) { this.jsonTag = jsonTag; } }
Susceptibility and Inverted Hysteresis Loop of Prussian Blue Analogs with Orthorhombic Structure The magnetic susceptibility of ternary metal Prussian blue analogues with orthorhombic structure is studied using Ising model. Within the frame work of effective-field theory with correlations, the roles of the mole fraction y, uniaxial magnetic anisotropy, transverse and longitudinal magnetic field are discussed in details. The temperature dependence of the magnetic susceptibility is also investigated. The interesting phenomenon of the inverted magnetic hysteresis loop has been found. The results can help to understand the experimental work of the molecule-based ferri-ferrimagnet.
ReportsWeb.com published “Radiography Market” from its database. The report covers the market landscape and its growth prospects over the coming years. The report also includes a discussion of the key vendors operating in this market. (EMAILWIRE.COM, July 11, 2017 ) Radiography is a technique that uses gamma rays and X-rays to produce images of objects. These images can be used to identify certain deformities or defects that are not visible to the bare eye. The object to be tested is subjected to radiations using a generator, while a film or digital detector is used to take the image of the object. The darker areas on the image indicate high levels of radiation, whereas lighter areas indicate low levels. Publisher's analysts forecast the global radiography market to grow at a CAGR of 5.61% during the period 2017-2021. For more information about this report: http://www.reportsweb.com/global-radiography-market-2017-2021 . The report covers the present scenario and the growth prospects of the global radiography market for 2017-2021. To calculate the market size, the report considers the revenue generated from the sales of radiography equipment. Publisher's report, Global Radiography Market 2017-2021, has been prepared based on an in-depth market analysis with inputs from industry experts. The report covers the market landscape and its growth prospects over the coming years. The report also includes a discussion of the key vendors operating in this market. Request Sample Copy at http://www.reportsweb.com/inquiry&RW0001684435/sample . Inquire for Report at http://www.reportsweb.com/inquiry&RW0001684435/buying .
package infstudio.realnetwork.tileentity; import infstudio.realnetwork.core.NetWork; import infstudio.realnetwork.item.ItemAppliance; import net.minecraft.init.Items; import net.minecraft.item.Item; import net.minecraft.item.ItemStack; import net.minecraft.nbt.NBTTagCompound; import net.minecraft.tileentity.TileEntityFurnace; import net.minecraft.util.EnumFacing; import net.minecraftforge.common.capabilities.Capability; import net.minecraftforge.fluids.FluidRegistry; import net.minecraftforge.fluids.FluidStack; import net.minecraftforge.fluids.FluidTank; import net.minecraftforge.fluids.capability.CapabilityFluidHandler; import net.minecraftforge.items.CapabilityItemHandler; import net.minecraftforge.items.ItemStackHandler; import javax.annotation.Nullable; public class TileEntityGeneratorFurnace extends TileEntityGenerator { private ItemStackHandler invFuel = new ItemStackHandler() { @Override protected void onContentsChanged(int slot) { super.onContentsChanged(slot); TileEntityGeneratorFurnace.this.markDirty(); } }; private ItemStackHandler invApp = new ItemStackHandler() { @Override protected void onContentsChanged(int slot) { super.onContentsChanged(slot); TileEntityGeneratorFurnace.this.markDirty(); } }; private ItemStackHandler invFluid = new ItemStackHandler() { @Override protected void onContentsChanged(int slot) { super.onContentsChanged(slot); TileEntityGeneratorFurnace.this.markDirty(); } }; private double lastP = 0.0D; private int tick = 0; public FluidTank tank = new FluidTank(16000) { @Override public boolean canFillFluidType(FluidStack fluid) { return fluid.getFluid().equals(FluidRegistry.WATER) && super.canFillFluidType(fluid); } }; private int burnTime = 0, curItemBurnTime = 0; public TileEntityGeneratorFurnace() { super(); } public TileEntityGeneratorFurnace(double R, double Em, double phi, double capacity, String name) { super(R, Em, phi, capacity, name); } @Override public boolean hasCapability(Capability<?> capability, @Nullable EnumFacing facing) { if (CapabilityItemHandler.ITEM_HANDLER_CAPABILITY.equals(capability)) { return true; } if (CapabilityFluidHandler.FLUID_HANDLER_CAPABILITY.equals(capability)) { return true; } return super.hasCapability(capability, facing); } @Nullable @Override public <T> T getCapability(Capability<T> capability, @Nullable EnumFacing facing) { if (CapabilityItemHandler.ITEM_HANDLER_CAPABILITY.equals(capability)) { if (facing == EnumFacing.UP) return CapabilityItemHandler.ITEM_HANDLER_CAPABILITY.cast(invFuel); else if (facing == EnumFacing.DOWN) return CapabilityItemHandler.ITEM_HANDLER_CAPABILITY.cast(invApp); else return CapabilityItemHandler.ITEM_HANDLER_CAPABILITY.cast(invFluid); } if (CapabilityFluidHandler.FLUID_HANDLER_CAPABILITY.equals(capability)) { return CapabilityFluidHandler.FLUID_HANDLER_CAPABILITY.cast(tank); } return super.getCapability(capability, facing); } @Override public void readFromNBT(NBTTagCompound compound) { super.readFromNBT(compound); this.invFuel.deserializeNBT(compound.getCompoundTag("Fuel")); this.invApp.deserializeNBT(compound.getCompoundTag("App")); this.invFluid.deserializeNBT(compound.getCompoundTag("Fluid")); this.tank.readFromNBT(compound); this.burnTime = compound.getInteger("BurnTime"); this.curItemBurnTime = compound.getInteger("ItemBurnTime"); } @Override public NBTTagCompound writeToNBT(NBTTagCompound compound) { compound.setTag("Fuel", this.invFuel.serializeNBT()); compound.setTag("App", this.invApp.serializeNBT()); compound.setTag("Fluid", this.invFluid.serializeNBT()); compound = this.tank.writeToNBT(compound); compound.setInteger("BurnTime", this.burnTime); compound.setInteger("ItemBurnTime", this.curItemBurnTime); return super.writeToNBT(compound); } public int getBurnTime() { return this.burnTime; } public boolean isBurning() { return getBurnTime() > 0; } public int getCurItemBurnTime() { return this.curItemBurnTime; } public int getFluidAmount() { return this.tank.getFluidAmount(); } public int getFluidCapacity() { return this.tank.getCapacity(); } @Override public void update() { super.update(); boolean flag = false; if (this.isBurning()) { this.burnTime--; if (getFluidAmount() > 0) { if (damageTime > 0) damageTime--; if (this.tank.drain(2, false) != null) { this.tank.drain(2, true); this.incEnergy(220.0D); } } else { damageTime++; this.setDamage(damage+1e-4*Math.exp(1e-2*damageTime)); } flag = true; } if (!this.world.isRemote) { tick = (tick+1)%100; if (tick == 0) this.lastP = 0; if (this.getEnergy() >= lastP && Math.abs(this.getE().A.get(0)) < 1e-8){ this.setE(311.0D, 0.0D); new NetWork(this.world, this.pos); this.lastP = this.getP(); if (this.getEnergy() < this.lastP) { this.setE(0.0D, 0.0D); new NetWork(this.world, this.pos); } else flag = true; } if (this.getEnergy() < this.getP() && this.getE().A.get(0) > 0) { this.setE(0.0D, 0.0D); new NetWork(this.world, this.pos); flag = true; } if (this.isWorking()) { this.decEnergy(this.getP()); flag = true; } ItemStack stackApp = this.invApp.getStackInSlot(0); if (this.getEnergy() > 0.0D && !stackApp.isEmpty()) { ItemAppliance itemApp = (ItemAppliance)stackApp.getItem(); if (!itemApp.isFull(stackApp)) { double amount = Math.min(Math.min(itemApp.getCapacity(stackApp)-itemApp.getEnergy(stackApp), 220.0D), this.getEnergy()); itemApp.incEnergy(stackApp, amount); this.decEnergy(amount); } } if (this.isBurning()) { flag = true; } else { ItemStack stackFuel = this.invFuel.getStackInSlot(0); if (!stackFuel.isEmpty()) { if (TileEntityFurnace.isItemFuel(stackFuel)) { this.burnTime = TileEntityFurnace.getItemBurnTime(stackFuel); this.curItemBurnTime = this.burnTime; Item itemFuel = stackFuel.getItem(); stackFuel.shrink(1); flag = true; if (stackFuel.isEmpty()) { ItemStack itemFuel1 = itemFuel.getContainerItem(stackFuel); this.invFuel.setStackInSlot(0, itemFuel1); } } } } ItemStack stackFluid = this.invFluid.getStackInSlot(0); if (!stackFluid.isEmpty()) { if (stackFluid.getItem().equals(Items.WATER_BUCKET)) { if (this.tank.fill(new FluidStack(FluidRegistry.WATER, 1000), false) > 0) { this.tank.fill(new FluidStack(FluidRegistry.WATER, 1000), true); Item itemFluid = stackFluid.getItem(); stackFluid.shrink(1); flag = true; if (stackFluid.isEmpty()) { ItemStack itemFluid1 = itemFluid.getContainerItem(stackFluid); this.invFluid.setStackInSlot(0, itemFluid1); } } } } } if (flag) { this.markDirty(); } } }
#!/usr/bin/env python -W ignore::DeprecationWarning import os import ast import pathlib import pandas as pd import numpy as np import random import itertools from tqdm import tqdm from skimage import measure from scipy import stats def warn(*args, **kwargs): pass import warnings warnings.warn = warn import logging log = logging.getLogger('werkzeug') log.setLevel(logging.ERROR) logging.captureWarnings(True) import inputfuncs import ccprocess import libcc import segmfuncs import parcelfuncs import webbrowser from threading import Timer import dash import dash_table import dash_core_components as dcc import dash_html_components as html import dash_bootstrap_components as dbc import dash_daq as daq from dash.dependencies import Input, Output, State, ALL, MATCH from dash.exceptions import PreventUpdate from dash_extensions import Download from dash_extensions.snippets import send_data_frame import plotly.io as pio import plotly.figure_factory as ff import plotly.express as px import plotly.graph_objs as go from plotly.subplots import make_subplots class Error(Exception): pass theme = 'plotly' print(' ') # GENERAL DEFINITIONS ------------------------------------------------------------------------- dict_segmentation_functions = {'ROQS': segmfuncs.segm_roqs, 'Watershed': segmfuncs.segm_watershed, 'Imported Masks': segmfuncs.segm_mask} dict_parcellation_functions = {'Witelson': parcelfuncs.parc_witelson, 'Hofer': parcelfuncs.parc_hofer, 'Chao': parcelfuncs.parc_chao, 'Cover': parcelfuncs.parc_cover, 'Freesurfer': parcelfuncs.parc_freesurfer} dict_3d_segmentation_functions = {'Watershed3d': segmfuncs.segm_watershed_3d} scalar_list = ['FA', 'MD', 'RD', 'AD'] colors_list = px.colors.qualitative.Plotly dict_parcellation_methods = {'Witelson': 'witelson', 'Hofer & Frahm': 'hofer', 'Chao et al':'chao', 'Cover et al': 'cover', 'Freesurfer':'freesurfer'} dict_segmentation_methods = {'ROQS': 'roqs', 'Watershed': 'watershed'} dict_3d_segmentation_methods = {'Watershed3d':'watershed3d'} # DATA IMPORTING ----------------------------------------------------------------------------- # Arg parser opts = inputfuncs.get_parser().parse_args() if opts.staple is True: dict_segmentation_functions['STAPLE'] = segmfuncs.segm_staple dict_segmentation_methods['STAPLE'] = 'staple' df_categories = pd.DataFrame() df_numerical = pd.DataFrame() # Read external data if opts.ext_data is not None: external_data_path = opts.ext_data external_data = pd.read_excel(external_data_path, dtype={'Subjects':'object'}) external_data = external_data.set_index('Subjects') # Clear NaNs on index external_data = external_data[external_data.index.notnull()] # Remove unnamed columns external_data = external_data.loc[:,~external_data.columns.str.match("Unnamed")] df_categories = external_data.select_dtypes(include=['object']) df_numerical = external_data.select_dtypes(include=['number']) col_categories = ['Method'] + list(df_categories.columns) # Get indicated directories path_dict = {} if opts.folders is not None: for directory in opts.folders: if directory is not None: if inputfuncs.check_directory(directory, opts.basename): path_dict[os.path.basename(directory)] = os.path.join(directory, '') # Import the subjects inside the parents folders group_dict = {} if opts.parents is not None: for parent in opts.parents: if parent is not None: directory_dict, dict_folders = inputfuncs.import_parent(parent, opts.basename) path_dict.update(directory_dict) # Create dict with subjects as keys and group (parents names) as values group_dict.update(dict_folders) df_group = pd.DataFrame.from_dict(group_dict, orient='index', columns=["Folder"]) df_categories = pd.concat([df_categories, df_group], axis = 1) # Warning for no imported subjects if len(path_dict.values()) == 0: print('Error: No subjects were imported') print('Terminating program.\n') raise SystemExit(0) # Check if we are importing masks if opts.maskname is not None: mask_basename = opts.maskname dict_segmentation_methods['Imported Masks'] ='imported_mask' else: mask_basename = None # DATA PROCESSING ----------------------------------------------------------------------------- # Create dataframe for each segmentation method scalar_statistics_names = ['FA','FA StdDev','MD','MD StdDev','RD','RD StdDev','AD','AD StdDev'] scalar_midline_names = list(range(0,200)) loaded_subjects = [] dict_segmentation_masks = {} dict_scalar_maps = {} dict_scalar_statistics = {} dict_scalar_midlines = {} dict_error_prob = {} dict_parcellations_masks = {} dict_parcellations_statistics = {} dict_thickness = {} dict_removed_subjects = {} dict_scalar_outliers = {} # Segment and get info for subject_path in tqdm(path_dict.values()): for segmentation_method in dict_segmentation_methods.keys(): if segmentation_method not in dict_scalar_statistics.keys(): dict_segmentation_masks[segmentation_method] = {} dict_scalar_maps[segmentation_method] = {} dict_scalar_statistics[segmentation_method] = {} dict_scalar_midlines[segmentation_method] = {} dict_error_prob[segmentation_method] = {} dict_parcellations_masks[segmentation_method] = {} dict_thickness[segmentation_method] = {} dict_removed_subjects[segmentation_method] = [] dict_scalar_outliers[segmentation_method] = [] # Get data path info folderpath = subject_path + 'inCCsight/' filename = 'segm_' + dict_segmentation_methods[segmentation_method] + '_data.npy' subject_name = os.path.basename(os.path.dirname(subject_path)) # Process/Load data if segmentation_method == 'Imported Masks': if ccprocess.check_mask(subject_path, mask_basename) is False: continue data_tuple = ccprocess.segment(subject_path, segmentation_method, dict_segmentation_functions, dict_parcellation_functions, opts.basename, mask_basename) if data_tuple is None: continue #except: # print('> Warning: Segmentation failed for subject {} with method {}'.format(subject_name, segmentation_method)) # continue segmentation_mask, scalar_maps, scalar_statistics, scalar_midlines, error_prob, parcellations_masks = data_tuple # Get thickness try: thick, _, _ = libcc.thickness(segmentation_mask, 200) except: thick = np.empty(200) # Assemble dictionaries dict_segmentation_masks[segmentation_method][subject_name] = segmentation_mask dict_scalar_maps[segmentation_method][subject_name] = scalar_maps dict_scalar_statistics[segmentation_method][subject_name] = scalar_statistics dict_scalar_midlines[segmentation_method][subject_name] = scalar_midlines dict_error_prob[segmentation_method][subject_name] = error_prob dict_parcellations_masks[segmentation_method][subject_name] = parcellations_masks dict_thickness[segmentation_method][subject_name] = thick # Save array with subject keys loaded_subjects.append(subject_name) loaded_subjects = list(set(loaded_subjects)) loaded_subjects.sort() for segmentation_method in dict_segmentation_methods.keys(): # Convert to pandas dataframe dict_scalar_statistics[segmentation_method] = pd.DataFrame.from_dict(dict_scalar_statistics[segmentation_method], orient='index', columns=scalar_statistics_names) dict_scalar_midlines[segmentation_method] = pd.DataFrame.from_dict(dict_scalar_midlines[segmentation_method], orient='index') dict_thickness[segmentation_method] = pd.DataFrame.from_dict(dict_thickness[segmentation_method], orient='index') dict_error_prob[segmentation_method] = pd.DataFrame.from_dict(dict_error_prob[segmentation_method], columns=['error_prob'], orient='index') dict_parcellations_statistics[segmentation_method] = inputfuncs.parcellations_dfs_dicts(dict_scalar_maps[segmentation_method], dict_parcellations_masks[segmentation_method], segmentation_method) # Get FA and/or other scalars ouliers for scalar in ['FA']: df = dict_scalar_statistics[segmentation_method][scalar] outliers = df[~df.between(df.quantile(.1), df.quantile(.9))].index dict_scalar_outliers[segmentation_method] += list(outliers) # VISUALIZATION ------------------------------------------------------------------------------- app = dash.Dash(__name__, meta_tags=[{"name": "viewport", "content": "width=device-width, initial-scale=.8, maximum-scale=.8"}], external_stylesheets = [dbc.themes.BOOTSTRAP], prevent_initial_callbacks=True) server = app.server app.config["suppress_callback_exceptions"] = True app.title = 'inCCsight' # ------------------------------- BUILD FUNCS ----------------------------------------------- def build_banner(): return html.Div( id="banner", className="twelve columns", children=[ html.Div( className='twelve columns', style=dict(display='flex', justifyContent='flex-start'), children=[ html.Img(src=app.get_asset_url("unicampw.png"), style=dict(height='9rem', marginBottom='2rem', padding='1rem')), html.Img(src=app.get_asset_url("miclab.png"), style=dict(height='9rem', marginBottom='2rem', padding='1rem')), ]), html.Div( className='twelve columns', style=dict(display='flex', justifyContent='center'), children=[ html.Img(src=app.get_asset_url("inccsight.png"), style=dict(height='25rem')), ]), ], ) def build_graph_title(title): return html.P(className="graph-title", children=title) # DataViz ------------------------------------------------------------------------------------ def build_group_segm_boxplot(mode='Method', segmentation_method='ROQS', extra_dims=list(df_numerical.columns)): std_colors = pio.templates[theme]['layout']['colorway'] if mode == 'Method': scalar_names = ['FA', 'MD', 'RD', 'AD'] + extra_dims subplots = make_subplots(rows=1, cols=len(scalar_names), subplot_titles=scalar_names) for i, scalar in enumerate(scalar_names): for j, segmentation_method in enumerate(dict_segmentation_methods.keys()): if len(extra_dims) == 0: df = pd.DataFrame() else: df = df_numerical[extra_dims] df = pd.concat([df, dict_scalar_statistics[segmentation_method]], axis=1) df = df.drop(dict_removed_subjects[segmentation_method]) subplots.add_trace(go.Box(y=df[scalar], name=segmentation_method, legendgroup=segmentation_method, hovertext=df.index, marker=dict(color=std_colors[j])), row=1, col=i+1) if i == 0: subplots.data[-1].update(name=segmentation_method, legendgroup=segmentation_method) else: subplots.data[-1].update(showlegend=False) else: scalar_names = ['FA', 'MD', 'RD', 'AD'] + extra_dims subplots = make_subplots(rows=1, cols=len(scalar_names), subplot_titles=scalar_names) if len(extra_dims) == 0: df = pd.DataFrame() else: df = df_numerical[extra_dims] df = pd.concat([df, df_categories[mode], dict_scalar_statistics[segmentation_method]], axis=1) df = df.drop(dict_removed_subjects[segmentation_method]) df = df.dropna(axis=0) categories = set(df[mode]) for i, scalar in enumerate(scalar_names): for j, category in enumerate(categories): subplots.add_trace(go.Box(y=df[df[mode] == category][scalar], name=category, legendgroup=category, hovertext=df.index, marker=dict(color=std_colors[j])), row=1, col=i+1) if i == 0: subplots.data[-1].update(name=category, legendgroup=category) else: subplots.data[-1].update(showlegend=False) subplots.update_layout(height=400, paper_bgcolor='rgba(0,0,0,0)', legend_orientation="h") subplots.update_layout(font=dict(family="Open Sans, sans-serif", size=12), margin=dict(r=0, l=0, t=60)) return subplots def build_parcel_boxplot(scalar='FA', mode='Method', segmentation_method='ROQS', parcellation_method='Witelson'): std_colors = pio.templates[theme]['layout']['colorway'] list_regions = ['P1', 'P2', 'P3', 'P4', 'P5'] subplots = make_subplots(rows=1, cols=5, subplot_titles=list_regions) if mode == 'Method': for i, region in enumerate(list_regions): for j, segmentation_method in enumerate(dict_segmentation_methods.keys()): df = dict_parcellations_statistics[segmentation_method][parcellation_method][region][scalar] df = df.drop(dict_removed_subjects[segmentation_method]) df = df.dropna(axis=0) subplots.add_trace(go.Box(y=df, name=segmentation_method, legendgroup=segmentation_method, hovertext=df.index, marker=dict(color=std_colors[j])), row=1, col=i+1) if i == 0: subplots.data[-1].update(name=segmentation_method, legendgroup=segmentation_method) else: subplots.data[-1].update(showlegend=False) else: categories = list(set(df_categories[mode])) df = pd.DataFrame() for category in categories: df_aux = pd.DataFrame() for region in list_regions: df_aux = pd.concat([df_aux, dict_parcellations_statistics[segmentation_method][parcellation_method][region][scalar][df_categories[mode] == category]], axis=1) df_aux[mode] = category df = pd.concat([df, df_aux], axis=0) df = df.drop(dict_removed_subjects[segmentation_method]) df = df.dropna(axis=0) names = list_regions + [mode] df.columns = names categories = set(df[mode]) for i, region in enumerate(list_regions): for j, category in enumerate(categories): subplots.add_trace(go.Box(y=df[df[mode] == category][region], name=category, legendgroup=category, hovertext=df.index, marker=dict(color=std_colors[j])), row=1, col=i+1) if i == 0: subplots.data[-1].update(name=category, legendgroup=category) else: subplots.data[-1].update(showlegend=False) subplots.update_layout(height=400, paper_bgcolor='rgba(0,0,0,0)', legend_orientation="h") subplots.update_layout(font=dict(family="Open Sans, sans-serif", size=12), margin=dict(r=0, l=0, t=60)) return subplots def build_segm_scatterplot(mode='Method', segmentation_method = 'ROQS', scalar_x = 'FA', scalar_y = 'MD', trendline=None): df = pd.DataFrame() if mode == 'Method': for segmentation_method in dict_segmentation_methods.keys(): df_aux = dict_scalar_statistics[segmentation_method] df_aux['Method'] = segmentation_method df = pd.concat([df, df_aux], axis=0) else: df = pd.concat([df_categories[mode], dict_scalar_statistics[segmentation_method]], axis=1) df = df.join(df_numerical, how='outer') df = df.drop(dict_removed_subjects[segmentation_method]) df = df.dropna(axis=0) print(df) fig = px.scatter(df, x=scalar_x, y=scalar_y, color=mode, marginal_y="violin", marginal_x="histogram", hover_name=df.index, trendline=trendline) fig.update_layout(height=800, paper_bgcolor='rgba(0,0,0,0)') fig.update_layout(legend=dict(orientation="h", yanchor="bottom", y=1.02, xanchor='right', x=1)) fig.update_layout(font=dict(family="Open Sans, sans-serif", size=12)) return fig def build_segm_scattermatrix(mode='Method', segmentation_method = 'ROQS', extra_dims = list(df_numerical.columns)): dimensions = ['FA','MD','RD','AD'] df = pd.DataFrame() if mode == 'Method': for segmentation_method in dict_segmentation_methods.keys(): df_aux = dict_scalar_statistics[segmentation_method] df_aux['Method'] = segmentation_method df = pd.concat([df, df_aux], axis=0) else: df = pd.concat([df_categories[mode], dict_scalar_statistics[segmentation_method]], axis=1) if len(extra_dims) > 0: dimensions = ['FA','MD','RD','AD'] + extra_dims df = pd.concat([df, df_numerical], axis=1) df = df.drop(dict_removed_subjects[segmentation_method]) df = df.dropna(axis=0) fig = px.scatter_matrix(df, dimensions=dimensions, color=mode, hover_name=df.index) if mode == 'Method': n_cats = len(dict_segmentation_methods.keys()) else: n_cats = len(set(df_categories[mode].dropna(axis=0))) fig.update_layout(legend=dict(orientation="h", yanchor="bottom", y=1.02, xanchor='right', x=1)) fig.update_layout(height=500+250*n_cats, paper_bgcolor='rgba(0,0,0,0)') fig.update_layout(font=dict(family="Open Sans, sans-serif", size=12), margin=dict(r=0, l=0)) return fig def build_midline_plot(scalar='FA', mode='Method', segmentation_method='ROQS'): df = pd.DataFrame() if mode == 'Method': for segmentation_method in dict_segmentation_methods.keys(): if scalar in ['FA', 'MD', 'AD', 'RD']: df_aux = pd.DataFrame.from_dict(dict(dict_scalar_midlines[segmentation_method][scalar]), orient='index', columns=scalar_midline_names) elif scalar == 'Thickness': df_aux = dict_thickness[segmentation_method] df_aux['Method'] = segmentation_method df = pd.concat([df, df_aux], axis=0) df = df.drop(dict_removed_subjects[segmentation_method]) df = df.dropna(axis=0) df_grouped = df.groupby('Method').mean().transpose() df_melt = pd.melt(df_grouped.reset_index(), id_vars='index', value_vars=set(df[mode])) else: if scalar in ['FA', 'MD', 'AD', 'RD']: df_aux = pd.DataFrame.from_dict(dict(dict_scalar_midlines[segmentation_method][scalar]), orient='index', columns=scalar_midline_names) elif scalar == 'Thickness': df_aux = dict_thickness[segmentation_method] df = pd.concat([df_categories[mode], df_aux], axis=1) df = df.drop(dict_removed_subjects[segmentation_method]) df = df.dropna(axis=0) df_grouped = df.groupby(mode).mean().transpose() df_melt = pd.melt(df_grouped.reset_index(), id_vars='index', value_vars=set(df[mode])) fig = px.line(df_melt, x='index', y='value', color=mode) fig.update_layout(legend=dict(orientation="h", yanchor="bottom", y=1.02, xanchor='right', x=1)) fig.update_layout(height=400, paper_bgcolor='rgba(0,0,0,0)') fig.update_layout(font=dict(family="Open Sans, sans-serif", size=12)) fig.update_layout(xaxis_title='Points along CC body', yaxis_title=scalar, legend_title=mode) return fig def build_bubble_grouped(mode='Method', segmentation_method='ROQS', scalar='Thickness', size=True): def build_bubble_plot(scalar='FA', segmentation_method='Watershed', size = True, category_index = None): df_pts = pd.read_pickle('./assets/bubble_plot_xy.pkl') segm_contour = np.load('./assets/bubble_plot_contour.npy') if scalar == 'Thickness': df_aux = dict_thickness[segmentation_method][list(np.linspace(0,195,40))+[199]] else: df_aux = pd.DataFrame.from_dict(dict(dict_scalar_midlines[segmentation_method][scalar]), orient='index', columns=scalar_midline_names)[list(np.linspace(0,195,40))+[199]] df_aux = df_aux.drop(dict_removed_subjects[segmentation_method]) if category_index is not None: df_aux = df_aux.loc[np.intersect1d(df_aux.index, category_index)] df_aux = df_aux.dropna(axis=0) df_pts[scalar] = df_aux.mean().reset_index()[0] if size == True: fig = px.scatter(df_pts, x="x", y="y", color = scalar, size = df_pts[scalar]) fig.update_traces(marker=dict(sizeref = 2. * max(df_pts[scalar]) / (45 ** 2))) else: fig = px.scatter(df_pts, x="x", y="y", color=scalar) fig.update_traces(marker=dict(size=25)) fig.update_layout(paper_bgcolor='rgba(0,0,0,0)', legend_orientation="h") fig.update_layout(font=dict(family="Open Sans, sans-serif", size=12)) fig.add_trace(go.Contour(z=segm_contour, contours=dict(start=0, end=70, size=70, coloring='none'), showscale=False, line_width=3, line=dict(color='rgba(0,0,0,.5)', dash='dot'))) return fig if mode == 'Method': n_cats = len(dict_segmentation_methods.keys()) fig = make_subplots(rows=n_cats, cols=1, vertical_spacing=0.1/n_cats) for i, segmentation_method in enumerate(dict_segmentation_methods): fig.add_trace(build_bubble_plot(scalar=scalar, segmentation_method=segmentation_method, size=size)['data'][0], row=i+1, col=1) fig.update_yaxes(title_text="{} for<br>{} method".format(scalar, segmentation_method), row=i+1, col=1) else: df = df_categories[mode] df = df.drop(dict_removed_subjects[segmentation_method]) df = df.dropna(axis=0) n_cats = len(set(df)) fig = make_subplots(rows=n_cats, cols=1, vertical_spacing=0.1/n_cats) for i, category in enumerate(set(df)): category_index = df_categories.loc[df_categories[mode] == category].index fig.add_trace(build_bubble_plot(scalar=scalar, segmentation_method=segmentation_method, size=size, category_index=category_index)['data'][0], row=i+1, col=1) fig.update_yaxes(title_text="{} for<br>{} category".format(scalar, category), row=i+1, col=1) fig.update_xaxes(title_text="Points along CC body", row=n_cats, col=1) fig.update_layout(height=250*n_cats, paper_bgcolor='rgba(0,0,0,0)') fig.update_layout(font=dict(family="Open Sans, sans-serif", size=12), margin=dict(r=0, l=0, t=60)) return fig def build_bubble_grouped_pvalue(mode='Method', segmentation_method='ROQS', scalar='Thickness', threshold=0.05): def build_bubble_pvalue(pvalue, threshold=0.05, size=False, gray=False): df_pts = pd.read_pickle('./assets/bubble_plot_xy.pkl') segm_contour = np.load('./assets/bubble_plot_contour.npy') marker_color = 'rgba(100,100,100,0.5)' df_pts['p-value'] = pvalue if gray: fig = px.scatter(df_pts, x="x", y="y", hover_data=['p-value']) fig.update_traces(marker=(dict(color=marker_color))) else: fig = px.scatter(df_pts.loc[df_pts['p-value'] < threshold], x="x", y="y", color = 'p-value') fig.update_traces(marker=dict(size=25)) fig.update_layout(paper_bgcolor='rgba(0,0,0,0)', legend_orientation="h") fig.update_layout(font=dict(family="Open Sans, sans-serif", size=12)) return fig if mode == 'Method': categories = list(itertools.combinations(dict_segmentation_methods.keys(), 2)) n_cats = len(categories) if n_cats == 0: return empty_figure_with_text('Not enough categories to calculate p-values.') fig = make_subplots(rows=n_cats, cols=1) for i, category in enumerate(categories): if scalar == 'Thickness': pvalue = stats.ttest_ind(dict_thickness[category[0]], dict_thickness[category[1]]).pvalue else: pvalue = stats.ttest_ind(pd.DataFrame.from_dict(dict(dict_scalar_midlines[category[0]][scalar]), orient='index', columns=scalar_midline_names), pd.DataFrame.from_dict(dict(dict_scalar_midlines[category[1]][scalar]), orient='index', columns=scalar_midline_names)).pvalue pvalue = np.take(pvalue, list(np.linspace(0,195,40))) new_gray_fig = build_bubble_pvalue(pvalue, gray=True) new_fig = build_bubble_pvalue(pvalue, threshold)['data'] if len(new_fig) > 0: fig.add_trace(new_gray_fig['data'][0], row=i+1, col=1) fig.add_trace(new_fig[0], row=i+1, col=1) else: fig.add_trace(new_gray_fig['data'][0], row=i+1, col=1) fig.update_yaxes(title_text="{} x {}".format(category[0], category[1]), row=i+1, col=1) else: df = df_categories[mode] df = df.dropna(axis=0) categories = list(itertools.combinations(set(df), 2)) n_cats = len(categories) if n_cats == 0: return empty_figure_with_text('Not enough categories to calculate p-values.') fig = make_subplots(rows=n_cats, cols=1, x_title='Statistic Meaningful Differences (p < 0.05)') for i, category in enumerate(categories): if scalar == 'Thickness': pvalue = stats.ttest_ind(dict_thickness[segmentation_method].loc[df_categories[mode] == category[0]], dict_thickness[segmentation_method].loc[df_categories[mode] == category[1]]).pvalue else: pvalue = stats.ttest_ind(pd.DataFrame.from_dict(dict(dict_scalar_midlines[segmentation_method][scalar]), orient='index', columns=scalar_midline_names).loc[df_categories[mode] == category[0]], pd.DataFrame.from_dict(dict(dict_scalar_midlines[segmentation_method][scalar]), orient='index', columns=scalar_midline_names).loc[df_categories[mode] == category[1]]).pvalue pvalue = np.take(pvalue, list(np.linspace(0,195,40))) new_gray_fig = build_bubble_pvalue(pvalue, gray=True) new_fig = build_bubble_pvalue(pvalue, threshold)['data'] if len(new_fig) > 0: fig.add_trace(new_gray_fig['data'][0], row=i+1, col=1) fig.add_trace(new_fig[0], row=i+1, col=1) else: fig.add_trace(new_gray_fig['data'][0], row=i+1, col=1) fig.update_yaxes(title_text="{} x {}".format(category[0], category[1]), row=i+1, col=1) fig.update_layout(height=400*n_cats) fig.update_xaxes(title_text="Points along CC body", row=n_cats, col=1) fig.update_layout(paper_bgcolor='rgba(0,0,0,0)', legend_orientation="h") fig.update_layout(font=dict(family="Open Sans, sans-serif", size=12)) return fig # Dropdowns -------------------------------------------------------------------------------- def build_midlineplot_dropdown(): options = [{'label': scalar, 'value': scalar} for scalar in scalar_list+['Thickness']] layout = html.Div([ html.Div([ html.H6('Scalar:', className='table-options-title', style={'padding':'0px 10px 0px 10px'}), dcc.Dropdown(id='dropdown-midline-scalars', options=options, multi=False, value='FA', style={'width':'120px'}), ], className='row', style=dict(display='flex', justifyContent='left', verticalAlign='center')), ] ) return layout def build_segm_scatterplot_dropdowns(): options = [{'label': scalar, 'value': scalar} for scalar in scalar_list + list(df_numerical.columns)] options_trendlines = [{'label': scalar, 'value': scalar} for scalar in ['None', 'OLS', 'Lowess']] layout = html.Div([ html.Div([ html.Div([ html.H6('Scalar Y:', className='table-options-title', style={'padding':'0px 10px 0px 0px'}), dcc.Dropdown(id='dropdown-scalars-left', options=options, multi=False, value='FA', style={'width':'90px'}), ], className='row', style={'margin':'0px 0px 0px 10px'}), html.Div([ html.H6('Scalar X:', className='table-options-title', style={'padding':'0px 10px 0px 0px'}), dcc.Dropdown(id='dropdown-scalars-right', options=options, multi=False, value='MD', style={'width':'90px'}), ], className='row', style={'margin':'0px 0px 0px 30px'}), html.Div([ html.H6('Trendline:', className='table-options-title', style={'padding':'0px 10px 0px 0px'}), dcc.Dropdown(id='dropdown-scalars-trendline', options=options_trendlines, multi=False, value='None', style={'width':'120px'}), ], className='row', style={'margin':'0px 0px 0px 30px'}), ], className='row', style=dict(display='flex', justifyContent='left', verticalAlign='center')), ] ) return layout def build_parcel_boxplot_dropdowns(): options_scalars = [{'label': scalar, 'value': scalar} for scalar in scalar_list] options_parcel_method = [{'label': parc, 'value': parc} for parc in dict_parcellations_statistics[segmentation_method].keys()] layout = html.Div([ html.Div([ html.H6('Parc. Method:', className='table-options-title', style={'padding':'0px 10px 0px 10px'}), dcc.Dropdown(id='dropdown-parcel-boxplot-left', options=options_parcel_method, multi=False, value=list(dict_parcellation_methods.keys())[0], style={'width':'150px'}), html.H6('Scalar:', className='table-options-title', style={'padding':'0px 10px 0px 30px'}), dcc.Dropdown(id='dropdown-parcel-scalars-right', options=options_scalars, multi=False, value='FA', style={'width':'120px'}) ], className='row', style=dict(display='flex', justifyContent='left', verticalAlign='center')), ] ) return layout def build_bubbleplot_dropdowns(): options_pvalue = [{'label': scalar, 'value': scalar} for scalar in ['Scalar', 'p-value']] options_scalars = [{'label': scalar, 'value': scalar} for scalar in scalar_list+['Thickness']] options_size = [{'label': scalar, 'value': scalar} for scalar in ['True', 'False']] layout = html.Div([ html.Div([ html.Div([ html.H6('Mode:', className='table-options-title', style={'padding':'0px 10px 0px 0px'}), dcc.Dropdown(id='dropdown-bubbleplot-mode', options=options_pvalue, multi=False, value='Scalar', style={'width':'150px'}), ], className='row', style={'margin':'0px 0px 0px 10px'}), html.Div([ html.H6('Scalar:', className='table-options-title', style={'padding':'0px 10px 0px 0px'}), dcc.Dropdown(id='dropdown-bubbleplot-left', options=options_scalars, multi=False, value='Thickness', style={'width':'150px'}), ], className='row', style={'margin':'0px 0px 0px 30px'}), html.Div([ html.H6('Size:', className='table-options-title', style={'padding':'0px 10px 0px 0px'}), dcc.Dropdown(id='dropdown-bubbleplot-right', options=options_size, multi=False, value='True', style={'width':'120px'}) ], className='row', style={'margin':'0px 0px 0px 30px'}, id='div-bubble-dropdown-right'), html.Div([ html.H6('Threshold:', className='table-options-title', style={'padding':'0px 10px 0px 0px'}), dcc.Dropdown(id='dropdown-bubbleplot-threshold', options=[{'label': num/100, 'value': num/100} for num in list(np.arange(0, 10, 1)) + list(np.arange(15, 95, 5))], multi=False, value=0.05, style={'width':'120px'}) ], className='row', style=dict(display='none', margin='0px 0px 0px 30px'), id='div-bubble-dropdown-threshold'), ], className='row', style=dict(display='flex', justifyContent='left', verticalAlign='center', marginBottom='50px')), ] ) return layout def build_fissure_image_dropdown(subject_id, segmentation_methods_available=dict_segmentation_methods.keys()): options = [{'label': segmentation_method, 'value': segmentation_method} for segmentation_method in list(segmentation_methods_available)+['None']] options_scalars = [{'label': scalar, 'value': scalar} for scalar in ['wFA']+scalar_list] layout = html.Div([ html.Div([ html.Div( children=[ html.H6('Segm. Method:', className='table-options-title', style={'padding':'0px 20px 0px 10px'}), dcc.Dropdown(id='dropdown-subj-collapse-segm-methods', options=options, multi=False, value=segmentation_methods_available[0], style={'width':'120px'}), ], className='row'), html.Div( dbc.Button('Remove segmentation', outline=True, color='danger', id=dict(type='btn-remove-subject', index=subject_id), style=dict(padding='0 15px', margin='0px 0px 0px 20px', fontSize='1.2rem') ), id=f"tooltip-div-wrapper-{subject_id}"), ], className='row', style=dict(display='flex', justifyContent='left', verticalAlign='center', marginLeft='0')), html.Div( children=[ html.H6('Scalar:', className='table-options-title', style={'padding':'0px 10px 0px 10px'}), dcc.Dropdown(id='dropdown-subj-collapse-scalars', options=options_scalars, multi=False, value='wFA', style={'width':'150px'}), ], className='row', style=dict(display='flex', justifyContent='left', verticalAlign='center', marginTop='2px')), dbc.Tooltip("This will remove only the ROQS segmentation, indicated on the dropdown to the left", target=f"tooltip-div-wrapper-{subject_id}", id='subject-collapse-tooltip', style=dict(fontSize='12pt'), placement='bottom', ), ] ) return layout def build_individual_subject_segm_table_dropdown(segmentation_methods_available=dict_segmentation_methods.keys()): options = [{'label': segmentation_method, 'value': segmentation_method} for segmentation_method in segmentation_methods_available] options_stddev = [{'label': scalar, 'value': scalar} for scalar in ['Show', 'Hide']] layout = html.Div([ html.Div([ html.H6('Segm. method:', className='table-options-title', style={'padding':'0px 10px 0px 10px'}), dcc.Dropdown(id='dropdown-subj-collapse-table-segm-methods', options=options, multi=False, value=segmentation_methods_available[0], style={'width':'150px'}), html.H6('Show Std.Dev.:', className='table-options-title', style={'padding':'0px 10px 0px 30px'}), dcc.Dropdown(id='dropdown-subj-collapse-table-segm-std-dev', options=options_stddev, multi=False, value='Hide', style={'width':'120px'}) ], className='row', style=dict(display='flex', justifyContent='left', verticalAlign='center')), ] ) return layout def build_individual_subject_parcel_table_dropdown(segmentation_methods_available=dict_segmentation_methods.keys()): options = [{'label': segmentation_method, 'value': segmentation_method} for segmentation_method in segmentation_methods_available] options_scalars = [{'label': scalar, 'value': scalar} for scalar in scalar_list] options_parcel_method = [{'label': parc, 'value': parc} for parc in dict_parcellations_statistics[segmentation_method].keys()] layout = html.Div([ html.Div([ html.H6('Segm. method:', className='table-options-title', style={'padding':'0px 10px 0px 10px'}), dcc.Dropdown(id='dropdown-subj-collapse-table-parcel-segm-methods', options=options, multi=False, value=segmentation_methods_available[0], style={'width':'150px'}), html.H6('Parcel. method:', className='table-options-title', style={'padding':'0px 10px 0px 30px'}), dcc.Dropdown(id='dropdown-subj-collapse-table-parcel-methods', options=options_parcel_method, multi=False, value=list(dict_parcellation_methods.keys())[0], style={'width':'150px'}), html.H6('Scalar:', className='table-options-title', style={'padding':'0px 10px 0px 30px'}), dcc.Dropdown(id='dropdown-subj-collapse-table-parcel-scalars', options=options_scalars, multi=False, value='FA', style={'width':'120px'}) ], className='row', style=dict(display='flex', justifyContent='left', verticalAlign='center')), ] ) return layout # Collapses and others --------------------------------------------------------------------- def build_subjects_list(): ''' button_group = dbc.ButtonGroup( [dbc.Button(i, color='light', size='lg', style=dict(width='100%')) for i in loaded_subjects], vertical=True, style=dict(width='100%') ) ''' button_group = [] for i, subject_id in enumerate(loaded_subjects): if i%2 == 0: background = 'rgba(50,50,70,.5)' else: background = 'rgba(60,60,70,.5)' button_group.append(html.Button(subject_id, style=dict(fontSize='1.8rem', width='100%', backgroundColor=background, marginBottom='2px', color='rgb(255,255,255)'), id={'type': 'subject-list-btns', 'index': subject_id})) return html.Div(button_group, style=dict(width='100%')) def build_quality_collapse(): layout = html.Div([ html.Div([ build_graph_title("Quality evaluation"), html.Button('X', style=dict(fontSize='1.5rem', margin='10px', padding='0 13px', fontFamily= 'Open Sans', borderRadius='20px'), id='btn-exit-quality') ], className='twelve columns', style=dict(display='flex', justifyContent='space-between')), html.Div([ html.Div([ html.H6("Threshold:", style=dict(fontSize='1.8rem')), dcc.Dropdown(id='dropdown-quality-threshold', options= [{'label': num/100, 'value': num/100} for num in np.arange(95, 5, -5)], multi=False, value='0.7', style={'width':'100px', 'marginLeft':'5px'}), html.H6("Scalar:", style=dict(fontSize='1.8rem', marginLeft='2rem')), dcc.Dropdown(id='dropdown-quality-scalar', options= [{'label': i, 'value': i} for i in ['wFA']+scalar_list], multi=False, value='wFA', style={'width':'100px', 'marginLeft':'5px'}), ], className='row', style=dict(margin='20px 0 0 12px')), ], className='twelve columns', style=dict(display="flex", justifyContent="space-between")), html.Div([ dbc.Button("Restore Removed", color='info', outline=True, size='lg', id='restore_btn', style=dict(marginRight='1rem')), dbc.Button("Remove Selected", color='danger', outline=True, size='lg', id='remove_btn', style=dict(marginRight='2rem')), ], className='twelve columns', style=dict(display='flex', justifyContent='flex-end')), html.Div([ dbc.Button("Unselect all", color='primary', outline=True, size='lg', id='unselect_all_btn', style=dict(marginRight='1rem')), dbc.Button("Select all", color='primary', outline=True, size='lg', id='select_all_btn', style=dict(marginRight='2rem')), ], className='twelve columns', style=dict(display='flex', verticalAlign='center', justifyContent='flex-end', padding='10px 0px 10px 0px', marginBottom='-1rem')), html.Div(children=build_quality_images(), className="twelve columns", id='photo-container', style=dict(margin="0rem 0rem 2rem 0rem")), ], style={'margin':'20px', 'height':"100vh", 'backgroundColor':'#FAFAFA', 'border-radius':'20px', 'border':'1px solid rgba(0,0,0,.125)'}) return layout def build_quality_images(threshold=0.7, scalar='wFA'): def get_quality_tab_children(segmentation_method, scalar='wFA'): children = [] # Get error probs df = dict_error_prob[segmentation_method] df = df.drop(dict_removed_subjects[segmentation_method]) # Order by error probs index_error_probs = df.query('error_prob > '+str(threshold)).index.tolist() index_error_probs.sort() # Get ouliers index_outliers = dict_scalar_outliers[segmentation_method] index_outliers = [x for x in index_outliers if x not in dict_removed_subjects[segmentation_method]] index_outliers.sort() index_no_quality = list(set(index_error_probs + index_outliers)) # Retrieve images and segmentation for subject_id in index_no_quality: children.append(dcc.Loading( html.Div([ html.Div([ html.H6("Subject: {}".format(subject_id), style=dict(fontSize='2rem')), dcc.Checklist( id={'type': 'remove-cbx', 'index': 'cbx-{}-{}'.format(segmentation_method, subject_id)}, options=[{'label': 'Remove', 'value': 'Remove'}], value=[], style=dict(fontSize='1.8rem'), inputStyle=dict(marginRight="10px")), ], className='twelve columns', style=dict(width='100%', display='flex', verticalAlign='center', justifyContent='space-between')), build_quality_badges(subject_id, index_error_probs, index_outliers), html.Div([ dcc.Graph(figure=build_fissure_image(subject_id, segmentation_method, scalar)) ], className='twlve columns'), ], className = 'twelve columns'))) return children def get_quality_tab(segmentation_method): tab = dbc.Tab(label=segmentation_method, children=html.Div(get_quality_tab_children(segmentation_method, scalar), style=dict(height='80vh', overflowY="auto", padding='20px 20px 20px 20px'))) return tab tabs = [] for segmentation_method in dict_segmentation_methods.keys(): tabs.append(get_quality_tab(segmentation_method)) return dbc.Tabs(tabs, style=dict(height='40px', verticalAlign='center', padding='0px 10px 0px 10px')) def build_fissure_image(subject_id, segmentation_method, scalar = 'FA'): scalar_maps = dict_scalar_maps[segmentation_method][subject_id] scalar_maps_list = ['wFA','FA','MD','RD','AD'] scalar_map = scalar_maps[scalar_maps_list.index(scalar)] fig = px.imshow(scalar_map, color_continuous_scale='gray', aspect='auto') if segmentation_method != 'None': segmentation = dict_segmentation_masks[segmentation_method][subject_id] contours = measure.find_contours(segmentation, 0.1) contour = sorted(contours, key=lambda x: len(x))[-1] fig.add_trace(go.Scatter(x=contour[:, 1], y=contour[:, 0])) fig.update_layout(height=250, width=450, paper_bgcolor='rgba(0,0,0,0)', legend_orientation="h", coloraxis_showscale=True) fig.update_layout(margin = dict(l=0, r=0,t=0,b=30)) return fig def build_3d_visualization(subject_id): folderpath = path_dict[subject_id] + 'inCCsight/' filepath = folderpath + 'segm_watershed3d.npy' if os.path.exists(filepath): _, segmentation3d, wFA_v, _ = np.load(filepath, allow_pickle=True) verts, faces, normals, values = measure.marching_cubes_lewiner(wFA_v, 0) tri_FA = ff.create_trisurf(x=verts[:,0], y=verts[:,2], z=verts[:,1]*-1+70, simplices=faces, colormap=[(1,0,0), (1,0,0)], aspectratio=dict(x=1, y=1, z=.66), plot_edges = False, show_colorbar = False) tri_FA['data'][0].update(opacity=0.2) verts, faces, normals, values = measure.marching_cubes_lewiner(segmentation3d, 0) tri_CC = ff.create_trisurf(x=verts[:,0], y=verts[:,2], z=verts[:,1]*-1+70, simplices=faces, colormap=[(0,0,1), (0,0,1)], aspectratio=dict(x=1, y=1, z=.66), plot_edges = False, show_colorbar = False) tri_CC['data'][0].update(opacity=0.1) fig = go.Figure(tri_FA) fig.add_trace(tri_CC.data[0]) fig.update_layout(title="3D Visualization") fig.update_layout(plot_bgcolor='rgba(0,0,0,0)', paper_bgcolor='rgba(0,0,0,0)') return fig else: return [] def build_subject_collapse(segmentation_method='ROQS', scalar_map='wFA', subject_id = list(path_dict.keys())[0]): segmentation_methods_available = [] for segm_method in dict_segmentation_methods.keys(): if subject_id in dict_scalar_statistics[segm_method].index: segmentation_methods_available.append(segm_method) layout = dbc.Card([ html.Div([ html.Div([ build_graph_title("Subject " + subject_id), ], className='row'), html.Button('X', style=dict(fontSize='1.5rem', margin='10px', padding='0 13px', fontFamily= 'Open Sans', borderRadius='20px'), id=dict(type='btn-exit-subject', index=subject_id)) ], className='twelve columns', style=dict(display='flex', justifyContent='space-between')), html.Div([ # Segmentation image html.Div([ build_graph_title("Scalar maps"), #dcc.Graph(figure=build_3d_visualization(subject_id)) dcc.Graph(figure=build_fissure_image(subject_id, segmentation_methods_available[0], scalar = scalar_map), id='subject_collapse_fissure_img'), build_fissure_image_dropdown(subject_id, segmentation_methods_available), ], className = 'four columns', style=dict(display='grid', justifyContent='center')), html.Div(className='one column'), html.Div([ build_graph_title("Segmentation data"), build_individual_subject_segm_table(subject_id, segmentation_methods_available[0]), build_individual_subject_segm_table_dropdown(segmentation_methods_available), build_graph_title("Parcellation data"), build_individual_subject_parcel_table(subject_id, segmentation_methods_available[0], parcellation_method='Witelson', scalar='FA'), build_individual_subject_parcel_table_dropdown(segmentation_methods_available), ], className='six columns'), ], className='row', style={'justifyContent':'center'}), ], style={'margin':'20px', 'backgroundColor':'#FAFAFA', 'border-radius':'20px', 'padding':'0px 0px 50px 0px'}) return layout def build_quality_badges(subject_id, index_error_probs, index_outliers): children = [] if subject_id in index_error_probs: children.append(dbc.Badge("Abnormal Shape", color="secondary", pill=True, style=dict(marginRight='10px', fontSize='12pt', fontWeight='600'))) if subject_id in index_outliers: children.append(dbc.Badge("FA Outlier", color="secondary", pill=True, style=dict(marginRight='10px', fontSize='12pt', fontWeight='600'))) return html.Div(children=children, className='twelve columns', style=dict(marginLeft='27px')) # DataTable functions --------------------------------------------------------------------- def extend_colorscale(colormap, factor = 5): from plotly.colors import n_colors new_colormap = [] for i in range(len(colormap)-1): new_colormap += n_colors(eval(colormap[i][3:]), eval(colormap[i+1][3:]), factor) for i, color in enumerate(new_colormap): new_colormap[i] = 'rgb' + str(color) return new_colormap def color_table(df, use_values_limits = True, mean = None, stdev = None): if 'id' in df: numeric_columns = df.select_dtypes('number').drop(['index'], axis=1) else: numeric_columns = df.select_dtypes('number') colormap = px.colors.diverging.RdBu[::-1][2:-2] colormap = extend_colorscale(colormap, 4) styles = [] for col in numeric_columns: values = df[col] if use_values_limits is True: mean = np.mean(values) min_value = np.min(values) max_value = np.max(values) else: min_value = mean - stdev max_value = mean + stdev limits = np.linspace(min_value, max_value, len(colormap)) for value in values: idx = (np.abs(limits - value)).argmin() styles.append({ 'if': { 'filter_query': '{{{col}}} = {value}'.format( col = col, value = value ), 'column_id': col }, 'backgroundColor': colormap[idx] }) styles.append({ 'if': { 'filter_query': '{{{col}}} = {value}'.format(col = col, value = value), 'state': 'selected', 'column_id': col, }, 'backgroundColor': colormap[idx] }) styles.append({ 'if': { 'filter_query': '{{{col}}} = {value}'.format(col = col, value = value), 'state': 'selected', 'column_id': 'index', }, 'backgroundColor': 'rgb(228, 228, 255)' }) return styles def stripped_rows(): style_data_conditional = [] style_data_conditional.append({'if': {'row_index': 'odd'}, 'backgroundColor': 'rgb(248, 248, 248)'}) style_data_conditional.append({'if': {'row_index': 'odd', 'state': 'selected'}, 'backgroundColor': 'rgb(228, 228, 228)'}) style_data_conditional.append({'if': {'row_index': 'even'}, 'backgroundColor': 'rgb(255, 255, 255)'}) style_data_conditional.append({'if': {'row_index': 'even', 'state': 'selected'}, 'backgroundColor': 'rgb(235, 235, 235)'}) style_data_conditional.append({'if': {'state': 'selected'}, "border": "3px solid blue"}) return style_data_conditional def build_segm_table(mode = 'Method', segmentation_method = 'ROQS', show_stdev = False, color = False, pvalue=False): list_scalars = ['FA','RD','AD','MD'] if mode == 'subjects': df = dict_scalar_statistics[segmentation_method] df = df.drop(dict_removed_subjects[segmentation_method], errors='ignore') df = df.reset_index().rename(columns={"index": "Subject"}) names = ['Subject'] + list_scalars else: if mode == 'Method': df = pd.DataFrame() for segmentation_method in dict_segmentation_methods.keys(): df_aux = dict_scalar_statistics[segmentation_method] df_aux = df_aux.drop(dict_removed_subjects[segmentation_method], errors='ignore') df_aux = df_aux.reset_index() df_aux['Method'] = segmentation_method df = pd.concat([df, df_aux], axis=0) category_list = list(dict_segmentation_methods.keys()) else: df = dict_scalar_statistics[segmentation_method] df = df.drop(dict_removed_subjects[segmentation_method], errors='ignore') df = pd.concat([df_categories[mode], df], axis=1) category_list = list(set(df_categories[mode])) if pvalue is True: dict_pvalues = {} for cat1, cat2 in itertools.combinations(category_list, 2): df1 = df[df[mode] == cat1] df2 = df[df[mode] == cat2] dict_pvalues['{} x {}'.format(cat1, cat2)] = get_column_pvalues(df1, df2) df = pd.DataFrame().from_dict(dict_pvalues, orient='index', columns=scalar_statistics_names).reset_index() df = df.rename(columns = {'index':mode}) names = [mode] + list_scalars else: df = df.groupby(mode).mean().reset_index() names = [mode] + list_scalars df = df.round(6).sort_index() if show_stdev is False: columns=[{"name": i, "id": i} for i in names] data=df[names] else: columns=[{"name": i, "id": i} for i in df.columns[:-1]] data=df layout = dash_table.DataTable( id = 'segm_table', columns = columns, data = data.to_dict('records'), page_action = 'none', fixed_rows = {'headers': True}, style_table={ 'maxHeight': '300px', 'overflowY': 'auto'}, style_header = { 'fontWeight': 'bold', }, style_cell = { 'font_family': 'Open Sans', 'font_size': '18px', 'text_align': 'center' }, style_as_list_view = True, export_format='xlsx', export_headers='display', style_data_conditional = stripped_rows(), ) return layout def build_parcel_table(mode = 'Method', segmentation_method = 'ROQS', parcellation_method = 'Witelson', scalar = 'FA', color = False, pvalue = False): list_regions = ['P1', 'P2', 'P3', 'P4', 'P5'] if mode == 'subjects': df = pd.DataFrame() for region in list_regions: df = pd.concat([df, dict_parcellations_statistics[segmentation_method][parcellation_method][region][scalar]], axis=1) df.columns = list_regions df = df.drop(dict_removed_subjects[segmentation_method], errors='ignore') df = df.reset_index() df = df.rename(columns={"index": "Subject"}) names = ['Subject'] + list_regions else: if mode == 'Method': df = pd.DataFrame() for segmentation_method in dict_segmentation_methods.keys(): df_aux = pd.DataFrame() for region in list_regions: df_aux = pd.concat([df_aux, dict_parcellations_statistics[segmentation_method][parcellation_method][region][scalar]], axis=1) df_aux = df_aux.drop(dict_removed_subjects[segmentation_method], errors='ignore') df_aux['Method'] = segmentation_method df = pd.concat([df, df_aux], axis=0) category_list = list(set(dict_segmentation_methods.keys())) else: category_list = list(set(df_categories[mode])) df = pd.DataFrame() for category in category_list: df_aux = pd.DataFrame() for region in list_regions: df_aux = pd.concat([df_aux, dict_parcellations_statistics[segmentation_method][parcellation_method][region][scalar][df_categories[mode] == category]], axis=1) df_aux[mode] = category df = pd.concat([df, df_aux], axis=0) df = df.drop(dict_removed_subjects[segmentation_method], errors='ignore') if pvalue is True: df.columns = list_regions + [mode] dict_pvalues = {} for cat1, cat2 in itertools.combinations(category_list, 2): df1 = df[df[mode] == cat1] df2 = df[df[mode] == cat2] dict_pvalues['{} x {}'.format(cat1, cat2)] = get_column_pvalues(df1, df2) df = pd.DataFrame().from_dict(dict_pvalues, orient='index', columns=list_regions).reset_index() df = df.rename(columns = {'index':mode}) names = [mode] + list_regions else: df = df.groupby(mode).mean() df.columns = list_regions names = [mode] + list_regions layout = dash_table.DataTable( id = 'parcel_table', columns = [{"name": name, "id": name} for name in names], data = df.round(6).reset_index().to_dict('records'), page_action = 'none', fixed_rows = {'headers': True}, style_table={ 'maxHeight': '300px', 'overflowY': 'auto'}, style_header = { 'fontWeight': 'bold', }, style_cell = { 'font_family': 'Open Sans', 'font_size': '18px', 'text_align': 'center' }, style_as_list_view = True, export_format='xlsx', export_headers='display', style_data_conditional=stripped_rows() ) return layout def export_parcel_table(segmentation_method = 'ROQS', parcellation_method = 'Witelson', include_segm=True, include_parc=True, include_cat=True, groupby_cat=None, include_stdev=True): # Get data, organize with multiindex columns if include_parc is True: df = pd.concat(dict_parcellations_statistics[segmentation_method][parcellation_method].values(), axis = 1, keys = dict_parcellations_statistics[segmentation_method][parcellation_method].keys()) # Include info for whole CC if include_segm is True: df_segm = pd.concat(dict(CC = dict_scalar_statistics[segmentation_method].drop(columns=['Method'])), axis=1) if include_parc is True: df = pd.concat([df_segm, df], axis=1) else: df = df_segm # Remove info from removed subjects if groupby_cat is None: df.at[dict_removed_subjects[segmentation_method], df.columns] = '' else: df = df.drop(dict_removed_subjects[segmentation_method]) # Add categories info if include_cat is True or groupby_cat is not None: df_aux = pd.concat(dict(Categories=df_categories), axis=1) df = pd.concat([df_aux, df], axis=1) # Group by category if groupby_cat is not None: df = df.groupby(('Categories', groupby_cat)).mean() # Drop or not Std Dev columns if include_stdev is False: for col_tuple in df.columns: if 'StdDev' in col_tuple[-1]: df = df.drop(columns=col_tuple) # Define filename if include_segm is True and include_parc is False: filename = "inCCsight_data_{}.xlsx".format(segmentation_method) elif include_segm is False and include_parc is True: filename = "inCCsight_data_{}.xlsx".format(parcellation_method) elif include_segm is True and include_parc is True: filename = "inCCsight_data_{}_{}.xlsx".format(segmentation_method, parcellation_method) else: filename = "inCCsight_data.xlsx" return send_data_frame(df.to_excel, filename) def build_export_parcel_table_modal(): options_segm_config = [{'label': item, 'value': item} for item in dict_segmentation_methods.keys()] options_parc_config = [{'label': item, 'value': value} for item, value in zip(list(dict_parcellation_methods.keys()), ['Witelson', 'Hofer', 'Chao', 'Cover', 'Freesurfer'])] options_data_config = [{'label': item, 'value': item} for item in ['Categories', 'Segmentation', 'Parcellation']] options_stdv_config = [{'label': item, 'value': item} for item in ['Show Std. Dev.']] options_groupby_check = [{'label': item, 'value': item} for item in ['Group by:']] options_groupby_config = [{'label': item, 'value': item} for item in df_categories.columns] segm_config = dbc.FormGroup( row=True, children = [ dbc.Label("Segmentation Method:", width=4, style=dict(justifyContent='flex-end')), dbc.Col( dcc.Dropdown( id="export_segm_method", options=options_segm_config, value='ROQS'), width=8)], ) parc_config = dbc.FormGroup( row=True, children = [ dbc.Label("Parcellation Method:", width=4, style=dict(justifyContent='flex-end')), dbc.Col( dcc.Dropdown( id="export_parc_method", options=options_parc_config, value='Witelson'), width=8)], ) data_config = dbc.FormGroup( row=True, children = [ dbc.Label("Include data from:", width=4, style=dict(justifyContent='flex-end')), dbc.Col( dcc.Checklist( id="export_data_config", options=options_data_config, value=['Categories', 'Segmentation', 'Parcellation'], inputStyle=dict(marginRight='5px')), width=8)], ) extra_config = dbc.FormGroup( row=True, children = [ dbc.Col( width=4, children = [ dcc.Checklist( id="export_show_stdev", options=options_stdv_config, value=['Show Std. Dev.'], inputStyle=dict(marginRight='5px')) ], ), dbc.Col( width=8, children = [ dcc.Checklist( id="export_groupby_check", options=options_groupby_check, inputStyle=dict(marginRight='5px')), dcc.Dropdown( id="export_groupby_config", options=options_groupby_config, value='Folder'), ], ) ]) form = dbc.Form([segm_config, parc_config, data_config, extra_config]) modal = dbc.Modal([ dbc.ModalHeader("Download table data"), dbc.ModalBody(form, style=dict(fontSize='14pt')), dbc.ModalFooter( children=[ dbc.Label(id='disable_download_message', style=dict(marginRight='10px', opacity='0.5')), dbc.Button("Download", id="download_data_btn", size='lg', style=dict(marginRight='10px')), Download(id="parcel_download_all"), dbc.Button("Close", id="close_modal_btn", size='lg') ], style=dict(justifyContent='flex-end')) ], size='lg', centered=True, id="export_data_modal"), return modal def build_individual_subject_segm_table(subject_id, segmentation_method = 'ROQS', show_stdev=False): df = dict_scalar_statistics[segmentation_method].reset_index() df = df[df['index'] == subject_id].rename(columns={'index':'Subject'}) if show_stdev is True: names = ['Subject'] + scalar_statistics_names else: names = ['Subject', 'FA', 'RD', 'AD', 'MD'] layout = dash_table.DataTable( id = 'individual_subject_segm_table', columns = [{"name": name, "id": name} for name in names], data = df.round(6).reset_index().to_dict('records'), page_action = 'none', fixed_rows = {'headers': True}, style_table={ 'maxHeight': '300px', 'overflowY': 'auto'}, style_header = { 'fontWeight': 'bold', }, style_cell = { 'font_family': 'Open Sans', 'font_size': '18px', 'text_align': 'center' }, style_as_list_view = True, export_format='xlsx', export_headers='display', style_data_conditional=stripped_rows() ) return html.Div(layout, style=dict(margin='-30px 0px 2px 0px'), id='individual_subject_segm_table_container') def build_individual_subject_parcel_table(subject_id, segmentation_method = 'ROQS', parcellation_method='Witelson', scalar='FA'): list_regions = ['P1', 'P2', 'P3', 'P4', 'P5'] df = pd.DataFrame() for region in list_regions: df = pd.concat([df, dict_parcellations_statistics[segmentation_method][parcellation_method][region][scalar]], axis=1) df.columns = list_regions df = df.reset_index() df = df[df['index'] == subject_id].rename(columns={'index':'Subject'}) names = ['Subject'] + list_regions layout = dash_table.DataTable( id = 'individual_subject_parcel_table', columns = [{"name": name, "id": name} for name in names], data = df.round(6).reset_index().sort_index().to_dict('records'), page_action = 'none', fixed_rows = {'headers': True}, style_table={ 'maxHeight': '300px', 'overflowY': 'auto'}, style_header = { 'fontWeight': 'bold', }, style_cell = { 'font_family': 'Open Sans', 'font_size': '18px', 'text_align': 'center' }, style_as_list_view = True, export_format='xlsx', export_headers='display', style_data_conditional=stripped_rows() ) return html.Div(layout, style=dict(margin='-30px 0px 2px 0px'), id='individual_subject_parcel_table_container') # Extra functions def get_number_of_folders(): if len(list(group_dict.values())) > 0: return len(set(np.hstack(list(group_dict.values())))) else: return 1 def get_column_pvalues(df1, df2): df1 = df1.dropna()._get_numeric_data() df2 = df2.dropna()._get_numeric_data() pvalues = [] if list(df1.columns) == list(df2.columns): for r in df1.columns: for c in df2.columns: if r == c: pvalues.append(stats.ttest_ind(df1[r], df2[c], nan_policy='omit')[1]) return pvalues else: raise ValueError("DataFrame column values don't match") def empty_figure_with_text(text): return { "layout": { "height": 300, "paper_bgcolor": 'rgba(0,0,0,0)', "xaxis": { "visible": False }, "yaxis": { "visible": False }, "annotations": [ { "text": text, "xref": "paper", "yref": "paper", "showarrow": False, "font": { "size": 28 } } ] } } # ---------------------------------- LAYOUT ----------------------------------------------- app.layout = html.Div( children=[ html.Div( id="top-row", children=[ # Top row left html.Div( className="row", id="top-row-header", children=[ html.Div( id="header-container", children=[ build_banner(), html.Div( html.H5( children=[ html.H6("This is a data exploration and visualization tool for diffusion tensor images of the corpus callosum."), html.H6("Further information can be found in the "), html.A(href='https://github.com/thaiscaldeira/inCCsight)', children='GitHub page.', style=dict(marginTop='8px', marginLeft='7px')), ], id="instruct", className='row', ), className='twelve columns'), html.Div(className='twelve columns', style=dict(display='flex', justifyContent='center', verticalAlign='center'), children=[ html.H5("Category:", style=dict(color='white', padding='0 10px 0 54px')), dcc.Dropdown(id='dropdown_category', options=[{'label': method, 'value': method} for method in ['Method']+list(set(df_categories.columns))], multi=False, value='Method', style={'width':'150px'}) ], id="div_category", ), html.Div(className='twelve columns', style=dict(display='flex', justifyContent='center', verticalAlign='center'), children=[ html.H5("Segm. Method:", style=dict(color='white', padding='0 10px 0 0')), dcc.Dropdown(id='dropdown_segm_methods', options=[{'label': method, 'value': method} for method in dict_segmentation_methods.keys()], multi=False, value=list(dict_segmentation_methods.keys())[0], style={'width':'150px'}) ], id="div_select_segm_method", ), html.Div(className='twelve columns', id='quality-button-container', children=[ dbc.Button( "Check quality", size='lg', color="light", id='quality-button', style=dict(marginRight='20px') ), dbc.Button( 'Download tabled data', size='lg', color="light", id='parcel_download_all_btn', ), ]), ], ) ], ), # Top row right html.Div( className="row", id="top-row-graphs", children=[ html.Div([ #Subjects list html.Div([ build_graph_title("Subjects"), html.Div( build_subjects_list(), style=dict(height='550px', margin='0 0 50px 0', overflowY='auto', minWidth='300px', maxWidth='450px'), id="subject-table-container")], className = "column", id="subject-list-container", ), html.Div(className="four columns"), ], className="one-half column", id="left-panel"), html.Div([ html.Div( id='summary-card-container', className='column', children=[ dbc.Card( id = "summary-card", children = [ html.H1(str(len(set(list(path_dict.keys())))), className='numcard'), html.H5("Subjects", className='titlecard'), html.H1(str(get_number_of_folders()), className='numcard', style={'margin-top': '10px'}), html.H5("Folders", className='titlecard'), ], ), ]), ], className="one-half column", id="right-panel"), ], ), ], ), # Dashboard row html.Div( className='row', children=[ dbc.Collapse( dbc.Card( dbc.CardBody( children=build_quality_collapse(), style=dict(padding='0') ), style=dict(border='1px solid rgba(0,0,0,0)'), ), id="quality-collapse", className='quality columns', ), html.Div( id='dashboard', style=dict(height='100vh', overflowY='auto', overflowX='hidden'), className='twelve columns', children=[ dbc.Collapse( dbc.Card(dbc.CardBody()), id="subject-collapse", ), html.Div( className = 'row', id = 'tables-row', children = [ html.Div( id = 'segm_table_super_container', className = 'six columns', children = [ build_graph_title("Segmentation data"), html.Div( id = 'segm_table_container', children = [build_segm_table(show_stdev = False, color = True)]), html.Div( id = 'segm_table_options', className = 'table-options', children = [ html.H6('Mode:', className='table-options-title'), dcc.Dropdown( id='segm_table_dropdown_mode', className = 'options-dropdown', options=[{'label': 'Overall', 'value': 'overall'}, {'label': 'Subjects', 'value': 'subjects'}, {'label': 'p-values', 'value': 'pvalue'}], multi=False, value='overall'), html.H6('Std.Dev.:', className='table-options-title'), dcc.Dropdown( id = 'segm_table_dropdown_stdev', className = 'options-dropdown', options = [ {'label': 'Show', 'value': True}, {'label': 'Hide', 'value': False}, ], value = False, ), ], ), ], ), html.Div( id = 'parcel_table_super_container', className = 'six columns', children = [ build_graph_title("Parcellation data"), html.Div( id = 'parcel_table_container', children = [build_parcel_table( segmentation_method = 'ROQS', parcellation_method = 'Witelson', scalar = 'FA')]), html.Div( id = 'parcel_table_options', className = 'table-options', children = [ html.H6('Mode:', className='table-options-title'), dcc.Dropdown( id='parcel_table_dropdown_mode', className = 'options-dropdown', options=[{'label': 'Overall', 'value': 'overall'}, {'label': 'Subjects', 'value': 'subjects'}, {'label': 'p-values', 'value': 'pvalue'}], multi=False, value='overall', ), html.H6('Parcel. method:', className='table-options-title'), dcc.Dropdown( id = 'parcel_table_dropdown_method', className = 'options-dropdown', options = [ {'label': 'Witelson', 'value': 'Witelson'}, {'label': 'Hofer & Frahm', 'value': 'Hofer'}, {'label': 'Chao et al.', 'value': 'Chao'}, {'label': 'Cover et al.', 'value': 'Cover'}, {'label': 'Freesurfer', 'value': 'Freesurfer'}, ], value = 'Witelson', ), html.H6('Scalar:', className='table-options-title'), dcc.Dropdown( id = 'parcel_table_dropdown_scalar', className = 'options-dropdown', options = [ {'label': 'FA', 'value': 'FA'}, {'label': 'MD', 'value': 'MD'}, {'label': 'RD', 'value': 'RD'}, {'label': 'AD', 'value': 'AD'}, ], value = 'FA' ), ], ), ], ) ] ), html.Div( className="row", id="bottom-row", style=dict(marginRight='20px'), children=[ html.Div( id="bottom-row-left-column", className="seven columns", children=[ html.Div( id="boxplot-container1", style=dict(marginBottom='-30px'), children=[ build_graph_title("Segmentation BoxPlots"), dcc.Loading(dcc.Graph(id="segm_boxplots", figure=build_group_segm_boxplot())), ], ), html.Div( id="boxplot-container2", #style=dict(maginTop='0px'), children=[ build_graph_title("Parcellation BoxPlots"), dcc.Loading(dcc.Graph(id="parc_boxplots", figure=build_parcel_boxplot())), build_parcel_boxplot_dropdowns(), ], ), ], ), # Scatterplot html.Div( id="scatterplot-container", className="five columns", children=[ build_graph_title("Scatter Plot"), dcc.Loading(dcc.Graph(id="scatter_plot", figure=build_segm_scatterplot())), build_segm_scatterplot_dropdowns(), ], ), ], ), html.Div( className="row", id="second-bottom-row", style=dict(marginRight='20px'), children=[ # Scattermatrix html.Div( id="scattermatrix-container", className="seven columns", children=[ build_graph_title("Scatter Matrix"), dcc.Loading(dcc.Graph(id="scatter_matrix", figure=build_segm_scattermatrix())), ], ), html.Div( className="five columns", children = [ # Midline plots html.Div( id="midline-container", children=[ build_graph_title("Midline Plots"), dcc.Loading(dcc.Graph(id="midline_graph", figure=build_midline_plot())), build_midlineplot_dropdown(), ], ), # Bubble plot html.Div( id="bubbleplot-container", children=[ build_graph_title("Bubble Plots"), dcc.Loading(dcc.Graph(id="bubble_plots", figure=build_bubble_grouped())), build_bubbleplot_dropdowns(), ], ), ], ), ], ), html.Div( className="row", id="third-bottom-row", style=dict(marginRight='20px'), children=[ html.Div(build_export_parcel_table_modal(), id='example_div'), html.Div(id='bridge-div'), ], ), ] ) ] ) ], ) # --------------------------------- CALLBACKS --------------------------------------------- # Banner -------------------------------------------------------------- # Enable/Disable segm method dropdown @app.callback( Output("dropdown_segm_methods","disabled"), [Input("dropdown_category","value")]) def update_dropdown_disabled(mode): if mode == 'Method': return True else: return False # Tables -------------------------------------------------------------- # Change segm table mode @app.callback( Output("segm_table_container", "children"), [Input("segm_table_dropdown_mode", "value"), Input("dropdown_segm_methods", "value"), Input("dropdown_category","value"), Input("segm_table_dropdown_stdev", "value"), Input('photo-container', 'children')]) def change_segm_table_mode(table_mode, segmentation_method, mode, show_stdev, removed): pvalue = False if table_mode == 'subjects': mode = 'subjects' elif table_mode == 'pvalue': pvalue = True return [build_segm_table(mode = mode, segmentation_method = segmentation_method, show_stdev = show_stdev, color = False, pvalue=pvalue)] # Change parcel table mode @app.callback( Output("parcel_table_container", "children"), [Input("dropdown_category","value"), Input("dropdown_segm_methods", "value"), Input("parcel_table_dropdown_mode", "value"), Input("parcel_table_dropdown_method", "value"), Input("parcel_table_dropdown_scalar", "value"), Input('photo-container', 'children')]) def change_parcel_table_mode(mode, segmentation_method, table_mode, parcellation_method, scalar, removed): pvalue = False if table_mode == 'subjects': mode = 'subjects' elif table_mode == 'pvalue': pvalue = True return [build_parcel_table(mode = mode, segmentation_method = segmentation_method, parcellation_method = parcellation_method, scalar = scalar, color = False, pvalue = pvalue)] # Highlight table row borders upon clicking @app.callback( Output("segm_table", "style_data_conditional"), [Input("segm_table", "selected_cells")], [State("segm_table", "data"), State('segm_table', 'style_data_conditional')]) def paint_segm_table(selected_cells, table_data, style_data_conditional): n_rows = len(table_data) for row in range(n_rows): rule = {"if": {"row_index": row}, "border": "3px solid blue"} if rule in style_data_conditional: style_data_conditional.remove(rule) if selected_cells is not None: for cell in selected_cells: row = cell['row'] style_data_conditional.append({"if": {"row_index": row}, "border": "3px solid blue"}) return style_data_conditional # Highlight table row borders upon clicking @app.callback( Output("parcel_table", "style_data_conditional"), [Input("parcel_table", "selected_cells")], [State("parcel_table", "data"), State('parcel_table', 'style_data_conditional')]) def paint_parcel_table(selected_cells, table_data, style_data_conditional): n_rows = len(table_data) for row in range(n_rows): rule = {"if": {"row_index": row}, "border": "3px solid blue"} if rule in style_data_conditional: style_data_conditional.remove(rule) if selected_cells is not None: for cell in selected_cells: row = cell['row'] style_data_conditional.append({"if": {"row_index": row}, "border": "3px solid blue"}) return style_data_conditional # Open export data modal @app.callback( Output("export_data_modal", "is_open"), [Input("parcel_download_all_btn", "n_clicks"), Input("close_modal_btn", "n_clicks")]) def toggle_export_modal(open_clicks, close_clicks): if open_clicks is not None: trigger = dash.callback_context.triggered[0] if trigger['prop_id'] == 'parcel_download_all_btn.n_clicks': return True else: return False # Disable download button @app.callback( [Output("download_data_btn", "disabled"), Output("disable_download_message", "children")], [Input("export_data_config", "value")]) def toggle_download_btn(value): if value == []: return True, "You must include at least one type of data." else: return False, "" @app.callback( Output("parcel_download_all", "data"), [Input("download_data_btn", "n_clicks")], [State("export_segm_method", "value"), State("export_parc_method", "value"), State("export_data_config", "value"), State("export_groupby_check", "value"), State("export_groupby_config", "value"), State("export_show_stdev", "value"), ]) def download_parceldata(n_clicks, segmentation_method, parcellation_method, data_config, groupby_check, groupby_config, stdev_config): if n_clicks is not None: include_cat = False include_segm = False include_parc = False include_stdev = False groupby_cat = None if data_config is not None: if 'Categories' in data_config: include_cat = True if 'Segmentation' in data_config: include_segm = True if 'Parcellation' in data_config: include_parc = True if groupby_check == ['Group by:']: groupby_cat = groupby_config if stdev_config == ['Show Std. Dev.']: include_stdev = True return export_parcel_table(segmentation_method, parcellation_method, include_segm, include_parc, include_cat, groupby_cat, include_stdev) # Graph updates ------------------------------------------------------- # Update segm box-plots @app.callback( Output("segm_boxplots", "figure"), [Input("dropdown_segm_methods","value"), Input("dropdown_category","value"), Input('photo-container', 'children')]) def update_segm_boxplots(segm_method, mode, removed): return build_group_segm_boxplot(mode=mode, segmentation_method=segm_method) # Update segm box-plots @app.callback( Output("parc_boxplots", "figure"), [Input("dropdown_segm_methods","value"), Input("dropdown_category","value"), Input("dropdown-parcel-boxplot-left","value"), Input("dropdown-parcel-scalars-right","value"), Input('photo-container', 'children')]) def update_segm_boxplots(segm_method, mode, parc_method, scalar, removed): return build_parcel_boxplot(scalar=scalar, mode=mode, segmentation_method=segm_method, parcellation_method=parc_method) # Update midline plot @app.callback( Output("midline_graph", "figure"), [Input("dropdown_segm_methods","value"), Input("dropdown_category","value"), Input("dropdown-midline-scalars","value"), Input('photo-container', 'children')]) def update_midlineplot(segm_method, mode, scalar, removed): return build_midline_plot(mode=mode, segmentation_method=segm_method, scalar=scalar) # Update scatter matrix @app.callback( Output("scatter_matrix", "figure"), [Input("dropdown_segm_methods", "value"), Input("dropdown_category","value"), Input('photo-container', 'children')]) def update_scattermatrix(segm_method, mode, removed): return build_segm_scattermatrix(mode=mode, segmentation_method=segm_method) # Update scatter plot @app.callback( Output("scatter_plot","figure"), [Input("dropdown_segm_methods","value"), Input("dropdown_category","value"), Input("dropdown-scalars-right","value"), Input("dropdown-scalars-left","value"), Input("dropdown-scalars-trendline","value"), Input('photo-container', 'children')]) def update_scatterplot(segm_method, mode, scalar_x, scalar_y, trendline, removed): if trendline == 'None': trendline = None else: trendline = trendline.lower() return build_segm_scatterplot(mode=mode, segmentation_method=segm_method, scalar_x=scalar_x, scalar_y=scalar_y, trendline=trendline) # Update bubble plot @app.callback( Output("bubble_plots","figure"), [Input("dropdown_segm_methods","value"), Input("dropdown_category","value"), Input("dropdown-bubbleplot-mode", "value"), Input("dropdown-bubbleplot-left", "value"), Input("dropdown-bubbleplot-right", "value"), Input("dropdown-bubbleplot-threshold", "value"), Input('photo-container', 'children')]) def update_bubbleplot(segm_method, mode, bubble_mode, scalar, size, threshold, removed): if size == 'True': size = True elif size == 'False': size = False if bubble_mode == 'Scalar': return build_bubble_grouped(mode=mode, segmentation_method=segm_method, scalar=scalar, size=size) else: return build_bubble_grouped_pvalue(mode=mode, segmentation_method=segm_method, scalar=scalar, threshold=threshold) # Update bubble plot dropdown options @app.callback( [Output('div-bubble-dropdown-right', 'style'), Output('div-bubble-dropdown-threshold', 'style')], [Input('dropdown-bubbleplot-mode', 'value')], [State('div-bubble-dropdown-right', 'style'), State('div-bubble-dropdown-threshold', 'style')]) def update_bubbleplot_dropdown(bubble_mode, style_right, style_threshold): if bubble_mode == 'Scalar': style_right['display'] = 'flex' style_threshold['display'] = 'none' else: style_right['display'] = 'none' style_threshold['display'] = 'flex' return [style_right, style_threshold] # Subject collapse ---------------------------------------------------- # Open subject collapse @app.callback( [Output("subject-collapse", "is_open"), Output("subject-collapse", "children")], [Input({'type': 'subject-list-btns', 'index': ALL}, 'n_clicks'), Input({'type': 'btn-exit-subject', 'index': ALL}, 'n_clicks')], [State({'type': 'subject-list-btns', 'index': ALL}, 'id')]) def open_subject_collapse(n_clicks, exit_clicks, ids): if n_clicks is not None: trigger = dash.callback_context.triggered[0] if (trigger['value'] is None): return dash.no_update btn_id = ast.literal_eval(trigger["prop_id"][:-9]) if btn_id['type'] == 'btn-exit-subject': return False, [] subject_id = btn_id['index'] global selected_subject_id selected_subject_id = subject_id return True, [build_subject_collapse(subject_id = subject_id)] else: return dash.no_update @app.callback( Output('individual_subject_segm_table_container', 'children'), [Input('dropdown-subj-collapse-table-segm-methods', 'value'), Input('dropdown-subj-collapse-table-segm-std-dev', 'value')]) def update_segm_table_subj_collapse(segmentation_method, show_stdev): if show_stdev == 'Show': show_stdev = True else: show_stdev = False return build_individual_subject_segm_table(selected_subject_id, segmentation_method, show_stdev) @app.callback( Output('individual_subject_parcel_table_container', 'children'), [Input('dropdown-subj-collapse-table-parcel-scalars', 'value'), Input('dropdown-subj-collapse-table-parcel-segm-methods', 'value'), Input('dropdown-subj-collapse-table-parcel-methods', 'value')]) def update_parc_table_subj_collapse(scalar, segmentation_method, parcellation_method): return build_individual_subject_parcel_table(selected_subject_id, segmentation_method, parcellation_method, scalar) @app.callback( Output('subject_collapse_fissure_img', 'figure'), [Input('dropdown-subj-collapse-segm-methods', 'value'), Input('dropdown-subj-collapse-scalars', 'value')]) def update_fissure_image(segmentation_method, scalar): return build_fissure_image(subject_id=selected_subject_id, segmentation_method=segmentation_method, scalar=scalar) # Update remove subject tooltip @app.callback( Output('subject-collapse-tooltip', 'children'), [Input('dropdown-subj-collapse-segm-methods', 'value')]) def update_remove_subject_tooltip(segmentation_method): return "This will remove only the {} segmentation, indicated on the dropdown to the left".format(segmentation_method) # Quality collapse ---------------------------------------------------- # Bridge div @app.callback( Output('bridge-div', 'className'), [Input({'type':'btn-remove-subject', 'index': ALL}, 'n_clicks')], [State('dropdown-subj-collapse-segm-methods', 'value')]) def quality_control_bridge(remove_clicks, dropdown_segmentation_method): global dict_removed_subjects trigger = dash.callback_context.triggered[0] btn_id = ast.literal_eval(trigger["prop_id"][:-9]) if remove_clicks != [None]: dict_removed_subjects[dropdown_segmentation_method].append(btn_id['index']) return 'trash_'+btn_id['index'] else: return dash.no_update # Remove selected subjects @app.callback( Output('photo-container', 'children'), [Input('remove_btn', 'n_clicks'), Input('restore_btn', 'n_clicks'), Input('dropdown-quality-threshold', 'value'), Input('dropdown-quality-scalar', 'value'), Input('bridge-div', 'className')], [State({'type': 'remove-cbx', 'index': ALL}, 'id'), State({'type': 'remove-cbx', 'index': ALL}, 'value')]) def remove_quality_images(n_clicks, restore_clicks, threshold, scalar, bridge_flag, ids, values): global dict_removed_subjects trigger = dash.callback_context.triggered[0] removed_counter = 0 if restore_clicks is not None and trigger['prop_id'] == 'restore_btn.n_clicks': for segmentation_method in dict_segmentation_methods.keys(): removed_counter += len(dict_removed_subjects[segmentation_method]) dict_removed_subjects[segmentation_method] = [] elif n_clicks is not None and trigger['prop_id'] == 'remove_btn.n_clicks': for dict_id, value in zip(ids,values): if value == ['Remove']: segmentation_method, subject_key = dict_id['index'].rsplit('-')[-2:] dict_removed_subjects[segmentation_method].append(subject_key) dict_removed_subjects[segmentation_method] = list(set(dict_removed_subjects[segmentation_method])) removed_counter += 1 elif trigger['prop_id'] == 'dropdown-quality-scalar.value' or trigger['prop_id']=='dropdown-quality-threshold.value': return build_quality_images(threshold, scalar) elif trigger['prop_id'] == 'bridge-div.className': return build_quality_images(threshold, scalar) if removed_counter > 0: return build_quality_images(threshold, scalar) else: return dash.no_update # (Un)Select all subjects (check all remove checkboxes) @app.callback( Output({'type': 'remove-cbx', 'index': ALL}, 'value'), [Input('select_all_btn', 'n_clicks'), Input('unselect_all_btn', 'n_clicks')], [State({'type': 'remove-cbx', 'index': ALL}, 'value')]) def select_all_subjects(select_clicks, unselect_clicks, list_bts): trigger = dash.callback_context.triggered[0] if select_clicks is not None and trigger['prop_id'] == 'select_all_btn.n_clicks': return [['Remove']] * len(list_bts) elif unselect_clicks is not None and trigger['prop_id'] == 'unselect_all_btn.n_clicks': return [[]] * len(list_bts) else: return dash.no_update # Open quality collapse @app.callback( [Output("dashboard", "className"), Output("quality-collapse","is_open")], [Input("quality-button","n_clicks"), Input("btn-exit-quality","n_clicks")], [State("dashboard", "className")]) def open_quality_collapse(n_clicks, exit_clicks, className): if n_clicks is not None: trigger = dash.callback_context.triggered[0] if trigger["prop_id"].split(".")[0][-18:-2] == 'btn-exit-quality': return ["twelve columns", False] else: if className == 'notquality columns': return ["twelve columns", False] elif className == 'twelve columns': return ["notquality columns", True] else: return ["twelve columns", False] ''' @app.callback( [Output('scatter_plot_2', 'figure'), Output('scatter_matrix_2', 'figure')], [Input('scatter_plot', 'selectedData'), Input('scatter_matrix', 'selectedData')], [State('scatter_plot', 'figure'), State('scatter_matrix', 'figure')]) def get_selected_data(selection1, selection2, fig1, fig2): if fig1 is None: raise PreventUpdate else: selectedpoints = [] for selected_data in [selection1, selection2]: if selected_data and selected_data['points']: selectedpoints = selectedpoints + [p['customdata'] for p in selected_data['points']] if fig1 is not None and fig2 is not None: fig1 = go.Figure(fig1) fig1.update_traces(selectedpoints=selectedpoints, customdata=list(path_dict.keys())) fig2 = go.Figure(fig2) fig2.update_traces(selectedpoints=selectedpoints, customdata=list(path_dict.keys())) return fig1, fig2 ''' ''' @app.callback( Output('scatter_matrix', 'figure'), [Input('scatter_matrix', 'selectedData'), Input('scatter_plot', 'selectedData')], [State("dropdown-scalars-right","value"), State("dropdown-scalars-left","value")]) def update_selected_points(selection1, selection2, scalar_x, scalar_y): selected_points = [] for selected_data in [selection1, selection2]: if selected_data is not None: for point_dict in selected_data['points']: print(point_dict) selected_points.append(point_dict['pointIndex']) if selected_points == []: selected_points = None return build_segm_scattermatrix(selected_points=selected_points) ''' # SERVER CONFIG --------------------------------------------------------------------------------- port = opts.port def open_browser(): url = "http://localhost:{0}".format(port) webbrowser.open(url) if __name__ == "__main__": Timer(1.25, open_browser).start() app.run_server(debug=False, port=port, host='0.0.0.0')
A growing number of large health-care plans are asking patients to grade their doctors: How long are they kept waiting in the office? Can the doctor be reached at night and on weekends? Does the doctor listen as patients describe symptoms? How well is a treatment explained? Some health maintenance organizations use the grades as one criterion in paying the doctors. Not surprisingly, many doctors think this is a bad idea. At least 34 million Americans are enrolled in H.M.O. plans, and more than 2.9 million are in plans that use patient evaluations to help determine doctors' bonuses. The number of such plans is steadily increasing. Measuring patient satisfaction is one step in a growing effort to set national standards of health care, doctors and health-care executives say. It is also a way for the health plans to persuade clients that their opinions are valued, in the hope of keeping them as customers. The plans pay doctors a set fee of $10 a month, on average, for each family member enrolled, no matter how much or how little care is required. The system is intended to discourage doctors from ordering expensive tests and procedures that may not be needed. Critics say a patient's health may sometimes suffer because doctors are rewarded for withholding care to meet budget goals. Indeed, some people enrolled in the plans have complained of the difficulty of persuading their general-care physicians to refer them to specialists. The H.M.O.'s say the patients' evaluations - the grades are excellent, very good, good, fair, poor - count for 25 to 33 percent of a doctor's overall rating. The doctor's ability to hold down costs provides 33 to 50 percent of the rating. The balance of the rating reflects other measures of care, including an examination of patients' charts to determine whether the care seems appropriate. Doctors with poor ratings lose out on bonuses, and some plans reduce the set fees the doctors receive for each member under their care. If the scores do not improve, the plans often stop referring new members to the doctors or simply drop the doctors from their roster. U.S. Healthcare Inc., one of the first health plans to link patient evaluations to doctors' bonuses, said it drops 1 or 2 percent of its 3,600 primary-care physicians each year because of low ratings. It began the program in September 1987. But Maryann O'Sullivan, executive director of Health Access, a consumer advocacy coalition based in San Francisco, welcomed the idea of a report card. ''Anything that bolsters the doctor-patient relationship and educates doctors in establishing those relationships has got to be good for the patient's health,'' she said. Health-care executives and economists say the health plans have strong business reasons for keeping their members satisfied. The plans rely on keeping a large number of relatively healthy members so that costs are widely spread. ''People who are essentially well are the basis of the whole thing,'' said John E. Ware Jr., a senior scientist at the New England Medical Center in Boston. Dissatisifed members ''have feet -they'll walk away from you,'' said Leonard Abramson, president and chief executive of U.S. Healthcare. The company, based in Blue Bell, Pa., sent questionnaires to 909,000 adult members last year and received 201,000 responses. Several of the nation's largest health-coverage plans now offer bonuses in addition to the fixed payments. And Dr. Michael R. Soper, national medical director of the Cigna Corporation's H.M.O.'s, said they had increased the doctors' basic pay under the new system. The bonus systems are an attempt by health maintenance plans to find a way to replace the widespread practice of withholding part of the doctors' fees and not making full payment unless the plan meets its annual budget goals. If a plan agreed to pay a doctor $1,000 a month to treat 100 patients, for example, it might pay $800 a month and distribute the other $200 a month in a lump sum at the end of the year - but only if the plan stayed within its budget. Plans that use the patient report cards as one measure of performance include U.S. Healthcare, Cigna, at least one Blue Cross H.M.O., some units of the Kaiser Foundation Health Plans in Northern California and AV-MED Inc., an H.M.O. based in Miami. The Aetna Life and Casualty Company, which has 1.5 million people enrolled in its health maintenance organizations, is planning a similar program, said Dr. David L. Potash, Aetna's vice president of medical affairs. U.S. Healthcare also reviews patient charts to determine, for example, if children receive immunizations and if doctors order appropriate periodic tests, like checking patients' blood pressure and cholesterol levels. The H.M.O.'s also look at such practices with an eye to holding down costs. When family-care physicians, internists and pediatricians are found to send patients to specialists and hospitals more often than average for those participating in a plan, they are often put on notice that they are exceeding the plan's budget goals. The Kaiser Foundation, whose plans have 6.3 million members, said its medical centers regularly surveyed patients. In some Kaiser centers, including the one in San Rafael, Calif., ''the results become a basis of decisions on merit and bonuses,'' said Dr. David Lawrence, a senior vice president. He said Kaiser patients in Hawaii were surveyed in an ''exit poll'' as they left the waiting room after an appointment. Some companies insist that the administrators running their employee health plans make sure their patients are satisfied. But evaluating the evaluations is not always easy. And one concern for doctors is that they often cannot control some problems that annoy patients, like delays in obtaining results of laboratory tests, said Dr. Donald M. Berwick, a pediatrics professor at Harvard Medical School and expert on health-care quality.
/* Package mysqldriver is a GC optimized MySQL driver Concurrency DB struct manages pool of connections to MySQL. Connection itself isn't thread-safe, so it should be obtained per every go-routine. It's important to return a connection back to the pool when it's not needed for further reuse. db := mysqldriver.NewDB("root@tcp(127.0.0.1:3306)/test", 10) for i := 0; i < 10; i++ { go func() { conn, err := db.GetConn() if err != nil { // handle error } defer db.PutConn(conn) // return connection to the pool // perform queries }() } Reading rows mysqldriver reads data from the DB in a sequential order which means the whole result set of first query must be read before executing another one. Number of read column's values and their types must match with the number of columns in a query. rows, err := conn.Query("SELECT id, name, married FROM people") if err != nil { // handle error } for rows.Next() { // always read all rows id := rows.Int() // order of columns must be preserved name := rows.String() // type of the column must match with DB type married := rows.Bool() // all column's values must be read } if err = rows.LastError(); err != nil { // Handle error if any occurred during reading packets from DB. // When error occurred during reading from the stream // connection must be manually closed to prevent further reuse. conn.Close() } When there is no need to read the whole result set, for instance when error occurred during parsing data, connection must be closed to prevent further reuse as it's in invalid state. conn, err := db.GetConn() if err != nil { // handle error } // It's safe to return closed connection to the pool. // It will be discarded and won't be reused. defer db.PutConn(conn) rows, err := db.Query("SELECT name FROM people") if err != nil { // handle error } for rows.Next() { rows.Int() // causes type error } if err = rows.LastError(); err != nil { // Close the connection to make sure // it won't be reused by the pool. conn.Close() } */ package mysqldriver
Acridine orangeRNA fluorescence of maturing neurons in the perinatal rat brain Cytoplasmic RNA was demonstrated in neurons of the developing rat brain using acridine orange (AO) as a histochemical marker. Fetuses of 18 days and postnatal rats of 1, 7, 14, and 21 days as well as adults several months old were studied. Neuroblasts of the germinal matrix exhibited minimal or no orangered AORNA fluorescence, but immature nerve cells in migration within the cerebral hemispheres of the rat showed a weak but definite orange colour. This finding contrasts with the absence of AORNA fluorescence in migrating human neuroblasts. Neurons of the neocortical plate showed uniformly strong fluorescence. In the hippocampus, the most pronounced increase in AOdemonstrated RNA was in pyramidal and granule cells during the first postnatal week. The cerebellum showed a paradoxically stronger fluorescence of granule cells in the 18day fetus than at birth, and almost no AORNA fluorescence of granule cells at 21 days of age or in adults. Motor neurons showed the strongest fluorescence of all neurons. It is likely that the increase in cytoplasmic RNA in neurons corresponds to the onset of neurotransmitter biosythesis, but transitory fetal neuropeptides may explain stronger fluorescence of some neurons in young individuals. The reliable and simple AO method provides a supplementary means of studying one aspect of neuronal maturation.
An unmanned aerial vehicle (UAV) or “drone” or unmanned aircraft system (UAS) is an aerial vehicle designed to be used without a human pilot onboard and controlled remotely or flown autonomously through software flight plans. UAVs may fly everywhere, including over private or public properties. UAVs have been used for various purposes including search and rescue, traffic monitoring, weather forecasting, crowd monitoring, agriculture management, commercial package deliveries, aerial photography, surveillance, and the like. However, despite these benefits, aerial surveillance by UAVs raises significant privacy issues. Due to the heights at which UAVs can fly and their sometimes small structure, they are often beyond the range of sight for most people, which can give a rise to disconcerting feeling that one may be monitored and information may be gathered by a UAV without one's knowledge. For example, one such UAV may be hovering at an apartment window and may anonymously perform video surveillance to gather information about an individual's private life.
/* \brief Inject particles during the simulation * \param injection_box: domain where particles should be injected. */ void PhysicalParticleContainer::ContinuousInjection (const RealBox& injection_box) { const int lev=0; AddPlasma(lev, injection_box); }
Epidemiology and screening: what's new? Colorectal cancer (CRC) is the third most common cancer worldwide. In the year 2012 in the 28 EU countries around 3.45 million new cancer cases and 1.75 million deaths due to cancer were estimated; colorectal cancer accounted for 13% of new cases (age standardized incidence 55.7/100 000 men and 34.6/ 100 000 women) and 12% of deaths (age standardized mortality 25.2/100 000 men and 15.4/100 000 women). The male-to-female ratio remains consistently higher for rectal than for colon cancer (1.3). In Italy, with population of 60 million, in the same year about 48 000 new cases of CRC (14% of the total) and 19 280 deaths (16% of the total) were projected. In the last decade, CRC mortality has moderately but steadily declined across Europe, particularly in western countries, but the disease related health-care (direct and indirect) cost remains huge. In the EU it accounted for 10% of the total health-care cost of cancer; a very similar rate was calculated for Italy. Such a remarkable cost in terms of morbidity, life losses and economic expenditure is very disappointing as CRC death is largely preventable by more widespread and equitable delivery of existing options of effective early detection and curative treatment. In 2003 the European Council recommended the prioritization of the activation of organized cancer screening programs and currently several EU countries have implemented a screening program. Early diagnosis represents a key tool to reduce CRC mortality and during the last three decades interest has focused on identifying cost-effective methods to screen asymptomatic people. Desirable characteristics of a good screening test are acceptability, a sensitivity that allows to detect not only early stage CRC but also advanced adenomas (i.e. adenomas sized ≥ 10 mm, tubulovillous or with high grade dysplasia), a high specificity to minimize the risks for cancerfree individuals and to maintain an acceptable cost of a population-based program. Noninvasive screening tests
Raman, Hyper-Raman, Hyper-Rayleigh and Two-Photon Excited Luminescence Microspectroscopy in an Optical Tweezers System Single beam optical tweezers have been used as a tool to manipulate biological material at cellular level, as well as to measure mechanical properties such as forces at femtonewton scale and stiffness or elasticity of membranes and single DNA macromolecule. We used the optical tweezers to study disease related to the mechanical properties of individual red blood cells and we demonstrated the importance of using spectroscopic techniques while manipulating particles and living cells. The ability of performing spectroscopy in a living microorganism optically trapped in any desired neighborhood means that we could dynamically observe the chemical reactions and/or mechanical properties change in real time. Therefore, we decided to set up an Optical Tweezers plus a Raman system like the one described in reference. We present a homemade set-up confocal spectrometer using a Spectra Pro 300 i Acton Research Corporation 30 cm triple grating monochromator equipped with a Princeton Instruments liquid cooled back illuminated CCD using a femtosecond Ti:sapphire laser, Spectra Physics Tsunami. Previous works used only cw lasers. In our system we have the possibility of using the Tsunami and/or a cw Ti:sapphire laser, Spectra Physics model 3900S. The use of a femtosecond laser with or without a cw laser opens up a great number of different applications and spectroscopies, as discussed below. The drawback of a femtosecond laser is the intrinsic broad linewidth associated with time-frequency Fourier transform 2 1 ≥ ∆ ∆. If the issue is a great spectral resolution we can use the narrow cw laser line or narrow down the femtosecond laserline with an intracavity slit. It is also possible to narrow the femtosecond laser line with an external band pass filter but loosing the power out of the filter range. With the cw laser we used two supernotch filter to reject the laser line from the monochromator. The linewidth of a 100 femtosecond pulse, however, is larger than the 350 cm of the supernotch filter, which leads to a leakage of the laser line out of the supernotch filter spectral range. Nonetheless, because the leaking power was low, it was still possible to observe the Raman lines superimposed to the tail of the Gaussian laser line leakage. Only using the band pass filter it was possible to avoid this leakage. Figure 1 shows the obtained Raman spectra for a trapped polystyrene sphere, a red blood cell and a ZnSe sample. One possible application for the pulsed laser is the excitation of the luminescence by two photon absorption (TPA). Two photon optical processes only happen when two photons meet at the same time and at the same spot, which happens much more frequently for a pulsed laser (photons at the same time) and at the laser focus (photons at the same spot). The TPA allow the simultaneously observation of the Raman, at the infrared region, and the luminescence, at the visible region. Figure 2 shows the TPA excited luminescence of ZnSe. The systems that use the absorption of two photons or more are confocal by itself, because at the right pump power, the signal is generated only at the spot size of the exciting laser. This makes the signal collection optics not so 164 Microsc Microanal 9(Suppl 2), 2003 Copyright 2003 Microscopy Society of America DOI: 10.1017/S1431927603440373
<gh_stars>1-10 package org.gramar.filestore; import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Properties; import org.gramar.IFileStore; import org.gramar.IGramarContext; import org.gramar.IGramarStatus; import org.gramar.exception.GramarException; import org.gramar.exception.GramarPlatformConfigurationException; import org.gramar.exception.NoSuchResourceException; import org.gramar.resource.UpdateResource; import org.gramar.util.GramarHelper; public abstract class FileStore implements IFileStore { protected ArrayList<UpdateResource> updates = new ArrayList<UpdateResource>(); private int minLogLevel = IGramarStatus.SEVERITY_ERROR; public FileStore() { } @Override public boolean sameBytes(String relpath, byte[] after) throws IOException { try { byte before[] = new byte[0]; before = GramarHelper.getBytes(getFileByteContent(relpath)); if (before.length != after.length) { return false; } for (int i = 0; i < before.length; i++) { if (before[i] != after[i]) { return false; } } } catch (NoSuchResourceException e) { return false; } return true; } @Override public void commit(String comment, IGramarContext context) throws GramarException { executeUpdates(comment, context); } protected void executeUpdates(String comment, IGramarContext context) { UpdateResource update[] = new UpdateResource[updates.size()]; updates.toArray(update); Arrays.sort(update); for (UpdateResource ru: update) { try { ru.execute(this); context.info(ru.report()); } catch (Exception e) { context.error(e); } } } @Override public void addUpdate(UpdateResource update) { updates.add(update); } @Override public void reset() { } @Override public void free() { updates = new ArrayList<UpdateResource>(); } @Override public void configure(Properties properties) throws GramarPlatformConfigurationException { } @Override public boolean logMessage(String message, int severity) { if (minLogLevel <= severity) { log(message,severity); return true; } return false; } @Override public int getMinLogLevel() { return minLogLevel; } @Override public void setMinLogLevel(int minLogLevel) { this.minLogLevel = minLogLevel; } }
Josephson current in a finite-size junction interrupting a superconducting ring We study the behavior of the Josephson current I J flowing in a finite-size Josephson junction in a superconducting ring in the presence of an externally applied magnetic field H, taking into account the effect of the shielding currents. The set of self-consistent equations for the system can be solved explicitly for I J in the small self-inductance coefficient limit for not negligible effective junction areas. It is found that the resulting I J versus H curve presents a Fraunhofer-like prefactor modulating a periodic quasisinusoidal odd function. @S0163-1829~99!10413-2#
package main import ( "database/sql" "fmt" "log" _ "github.com/lib/pq" _ "github.com/jinzhu/gorm/dialects/postgres" ) // DBConnection stores the reference to the connection to the DB type DBConnection struct { db *sql.DB } // NewDBConnection creates a new instance of a DBConnection. func NewDBConnection(addr string) *DBConnection { db := setupDB(addr) return &DBConnection{db: db} } func setupDB(addr string) *sql.DB { // Connect to CockroachDB db, err := sql.Open("postgres", addr) if err != nil { log.Fatal(fmt.Sprintf("Failed to connect to database: %v", err)) } // Create the "servers" and "domains" table. if _, err := db.Exec( `CREATE TABLE IF NOT EXISTS domains ( servers_changed BOOL NULL, ssl_grade VARCHAR(3) NULL, previous_ssl_grade VARCHAR(3) NULL, logo STRING NULL, title STRING NULL, is_down BOOL NULL, last_checked TIMESTAMPTZ NULL, name STRING NOT NULL, PRIMARY KEY (name), INDEX lastchecked (last_checked DESC)) `); err != nil { log.Fatal(err) } if _, err := db.Exec( `CREATE TABLE IF NOT EXISTS servers ( address STRING NOT NULL, ssl_grade VARCHAR(3) NULL, country STRING NULL, owner STRING NULL, domain_name STRING NOT NULL, FOREIGN KEY (domain_name) REFERENCES domains(name)) `); err != nil { log.Fatal(err) } return db } func (connection *DBConnection) createServer(server Server, domainName string) error { _, err := connection.db.Exec( `INSERT INTO servers (address, ssl_grade, country, owner, domain_name) VALUES ($1, $2, $3, $4, $5)`, server.Address, server.SSLGrade, server.Country, server.Owner, domainName, ) return err } func (connection *DBConnection) updateServer(server Server) error { _, err := connection.db.Exec( `UPDATE servers SET ssl_grade = $1, country = $2, owner = $3 WHERE address = $4`, server.SSLGrade, server.Country, server.Owner, server.Address, ) return err } func (connection *DBConnection) deleteServer(serverAddress string) error { _, err := connection.db.Exec( `DELETE FROM servers WHERE address = $1`, serverAddress, ) return err } func (connection *DBConnection) getDomains() ([]Domain, error) { var domains []Domain rows, err := connection.db.Query("SELECT * FROM domains ORDER BY last_checked DESC") if err != nil { log.Fatal(err) } defer rows.Close() for rows.Next() { var domain Domain err = rows.Scan( &domain.ServersChanged, &domain.SSLGrade, &domain.PreviousSSLGrade, &domain.Logo, &domain.Title, &domain.IsDown, &domain.LastChecked, &domain.Name, ) if err != nil { log.Fatal(err) } rows2, err := connection.db.Query( `SELECT address, ssl_grade, country, owner FROM servers WHERE domain_name = $1`, domain.Name, ) if err != nil { log.Fatal(err) } defer rows2.Close() for rows2.Next() { var server Server err := rows2.Scan(&server.Address, &server.SSLGrade, &server.Country, &server.Owner) if err != nil { log.Fatal(err) } domain.Servers = append(domain.Servers, server) } domains = append(domains, domain) } return domains, err } func (connection *DBConnection) getDomain(domainName string) (Domain, error) { var domain Domain rows, err := connection.db.Query(`SELECT * FROM domains WHERE name = $1`, domainName) if err != nil { log.Fatal(err) } defer rows.Close() if rows.Next() { err = rows.Scan( &domain.ServersChanged, &domain.SSLGrade, &domain.PreviousSSLGrade, &domain.Logo, &domain.Title, &domain.IsDown, &domain.LastChecked, &domain.Name, ) if err != nil { log.Fatal(err) } rows2, err := connection.db.Query( `SELECT address, ssl_grade, country, owner FROM servers WHERE domain_name = $1`, domain.Name, ) if err != nil { log.Fatal(err) } defer rows2.Close() for rows2.Next() { var server Server err := rows2.Scan(&server.Address, &server.SSLGrade, &server.Country, &server.Owner) if err != nil { log.Fatal(err) } domain.Servers = append(domain.Servers, server) } } return domain, err } func (connection *DBConnection) createDomain(domain Domain) { connection.db.Exec( `INSERT INTO domains (servers_changed, ssl_grade, previous_ssl_grade, logo, title, is_down, last_checked, name) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)`, domain.ServersChanged, domain.SSLGrade, domain.PreviousSSLGrade, domain.Logo, domain.Title, domain.IsDown, domain.LastChecked, domain.Name, ) for _, server := range domain.Servers { connection.createServer(server, domain.Name) } } func (connection *DBConnection) updateDomain(domain Domain) error { _, err := connection.db.Exec( `UPDATE domains SET servers_changed = $1, ssl_grade = $2, previous_ssl_grade = $3, logo = $4, title = $5, is_down = $6, last_checked = $7 WHERE name = $8`, domain.ServersChanged, domain.SSLGrade, domain.PreviousSSLGrade, domain.Logo, domain.Title, domain.IsDown, domain.LastChecked, domain.Name, ) return err }
Q: Why are laws of physics always of product forms? A first observation is that all the extant laws of physics are of product forms. This phenomenon is somewhat intriguing. The question is: why do law of physics always take, instead of a sum of two terms, sum-of-multiple-term (i.e., product) forms? Counterexample exists in other disciplines of natural sciences. For example, biology has the White's formula (http://www.maths.ed.ac.uk/~aar/papers/eggar.pdf). Since mathematics investigates every possibility, certainly it has results involving sum of exactly two terms. I am seeking after a reason other than technical reasons. For example, the importance of Lorentz transformation in physics does not lie in its mathematical necessity but in its physical implications, as revealed by A. Einstein. H. Poincare, E. Mach, and several others had realized the concept of relativity before Einstein, but, as the paper ``The structure of thoughts'' (For now I cannot recall the exact title) published in Nature points out, discerning a concept is not equivalent to discerning its meaning. It is Einstein that discerns the meaning of relativity. A: You give no link to "multiple sum product forms" and what you mean by "laws", so my answer will be general. There are lots of complicated formulae in physics, so much so that numerical methods need to be used to calculate any reasonable results. They are not laws, because a "law" in formal physics has to be of the same importance as a mathematical axiom, posited at the head of a theory. There used to be "laws" that we now derive from the postulates and axioms of physical theories. For example the "law" of conservation of energy and momentum is derivable by the application of Noether's theorem. In general physics theories are considered successful if they mathematically describe the data in as economic and elegant way as possible with a minimum of postulates( meaning physical assumptions that have to be obeyed as axioms by the mathematics of the theory).
Effect Of Withania Coagulans And Liraglutide On Serum Glp-1, Postprandial And Fasting Blood Glucose In Streptozotocin Induced Diabetic Rats Objective: To evaluate effect of Withania coagulans and liraglutide on serum Glucagon like peptide-1, Postprandial and Fasting Blood Glucose levels in streptozotocin induced diabetic rat. Study Design and Setting: This randomize control trile was conducted at Islamic International Medical College in collaboration with National Institute of Health Islamabad. Methodology: This randomized controlled study was performed on a total of forty male Sprague dawly rats, which were initially divided into two groups; Group A (n=10) and Experimental Group (n=30). Diabetes in the Experimental group B was induced by intraperitoneal administration of streptozotocin for 5 days (30mg/kg/day). Diabetes was checked in experimental group by measuring fasting blood glucose (mg/dl) on day 6. Experimental group was further divided into Group B (Diabetic control), Group C (Withania coagulans-treated) and Group D (Liraglutide-treated). Blood sampling was done at day 30 and serum GLP-1, postprandial and fasting blood glucose levels were measured and compared in all groups. Results: Fasting and postprandial blood glucose levels of group C and D were significantly reduced as compared to group B. Serum GLP-1 levels were significantly increased in group C and D as compared to group B. Conclusion: Withania coagulans reduces hyperglycemia in diabetic rats through increasing GLP-1 hormone.
Particle counter as a tool to control pre-hydrolyzed coagulant dosing and rapid filtration efficiency in a conventional treatment system. Filtration efficiency in a conventional water treatment system was analyzed in the context of pre-hydrolyzed coagulant overdosing. Two commercial coagulants of different aluminum speciation were tested. A study was carried out at a water treatment plant supplied with raw water of variable quality. The lack of stability of water quality caused many problems with maintaining the optimal coagulant dose. The achieved results show that the type of coagulant had a very strong influence on the effectiveness of filtration resulting from the application of an improper coagulant dose. The overdosing of high basicity coagulant (PAC85) caused a significant increase of fine particles in the outflow from the sedimentation tanks, which could not be retained in the filter bed due to high surface charge and the small size of hydrolysis products. When using a coagulant of lower basicity (PAC70), it was much easier to control the dose of coagulant and to adjust it to the changing water quality.
/** * Print the given class on screen */ public void list(String name) { try { JavaClass clazz; if (name.startsWith("[")) { return; } for (int idx = 0; idx < excludeName.size(); idx++) { if (name.startsWith((String) excludeName.elementAt(idx))) return; } if ((clazz = Repository.lookupClass(name)) == null) { clazz = new ClassParser(name).parse(); } printClassDependencies(name, clazz.getConstantPool()); } catch (IOException exc) { System.err.println("Error loading class " + name + " (" + exc.getMessage() + ")"); exc.printStackTrace(); } catch (Exception exc) { System.err.println("Error processing class " + name + " (" + exc.getMessage() + ")"); exc.printStackTrace(); } }
Cloning of complementary DNA for GAP-43, a neuronal growth-related protein. GAP-43 is one of a small subset of cellular proteins selectively transported by a neuron to its terminals. Its enrichment in growth cones and its increased levels in developing or regenerating neurons suggest that it has an important role in neurite growth. A complementary DNA (cDNA) that encodes rat GAP-43 has been isolated to study its structural characteristics and regulation. The predicted molecular size is 24 kilodaltons, although its migration in SDS-polyacrylamide gels is anomalously retarded. Expression of GAP-43 is limited to the nervous system, where its levels are highest during periods of neurite outgrowth. Nerve growth factor or adenosine 3',5'-monophosphate induction of neurites from PC12 cells is accompanied by increased GAP-43 expression. GAP-43 RNA is easily detectable, although at diminished levels, in the adult rat nervous system. This regulation of GAP-43 is concordant with a role in growth-related processes of the neuron, processes that may continue in the mature animal.
A comparative assessment of plant flammability through a functional approach: The case of woody species from Argentine Chaco region Recent changes to fire regimes in many regions of the world have led to renewed interest in plant flammability experiments to understand and predict the consequences of such changes. These experiments require the development of practical and standardised flammability testing protocols. The research aims were (i) to compare plant flammability assessments carried out using two different approaches, namely functional trait analysis and testing with a shootlevel device; and (ii) to evaluate the effect of disturbances and seasonal variability on flammability. The study area was located in the Western Chaco region, Argentina, and 11 species were selected based on their representativeness in forests. We studied six functional traits related to flammability, growth habit and foliar persistence, in forests without disturbances over the three last decades as well as in disturbed forests. The seasonal variation of these functional traits was evaluated over two consecutive years. Functional trait flammability index (FI) and shootlevel measurements followed standard protocols. Sixty per cent of the species measured presented a high to very high FI. The results of both assessment methods were significantly correlated. Both methods identified the same species as having medium flammability, but differed in regards to the most flammable species. Senegalia gilliesii was identified as the most flammable species when using functional trait analysis, whereas shootlevel assessments found Larrea divaricata and Schinus johnstonii to be the most flammable. There were no disturbance effects on the FI but there was seasonal variation. Our results validate the use of functional traits as a predictive method of flammability testing and represent the first global effort comparing flammability obtained through functional trait analysis with empirical measurements. The significant correlation between both methods allows the selection of the one that is more appropriate for the size of the area to be evaluated and for the availability of technical resources. Abstract in Spanish is available with online material.
<filename>bot/settings.py """Constants for the bot.""" import os from pathlib import Path TOKEN = os.environ.get("FRIENDO_TOKEN") MEME_USERNAME = os.environ.get("MEME_USERNAME") MEME_PASSWORD = os.environ.get("MEME_PASSWORD") # event api key EVENT_API_KEY = os.environ.get("EVENT_API_KEY") COMMAND_PREFIX = "." VERSION = "1.2." NAME = "Friendo" BASE_DIR = os.path.dirname(os.path.abspath(__file__)) IMG_CACHE = Path(BASE_DIR, "image_cache") BASE_GITHUB_REPO = "https://github.com/fisher60/Friendo_Bot"
/// <summary>Sets applicable pia.</summary> /// /// <remarks>This should be called after all the pia's have been calculated. /// It also sets the indicator for type of pia.</remarks> void PiaCal::setHighPia() { piaData.setIappn(-1); for (vector< PiaMethod * >::iterator iter = piaMethod.begin(); iter != piaMethod.end(); iter++) { if (piaData.highPia < (*iter)->piaEnt) { piaData.highPia = (*iter)->piaEnt; piaData.setIappn((*iter)->getMethod()); highPiaMethod = (*iter); } } if (highPiaMethod != static_cast<PiaMethod *>(0)) { highPiaMethod->setApplicable(PiaMethod::HIGH_PIA); } setPifc(); }
/** * Affiche l'explorateur d'images * @param view Le bouton */ public void onShowImage(View view) { PhotoPickerIntent intent = new PhotoPickerIntent(MainActivity.this); intent.setPhotoCount(9); intent.setShowCamera(false); intent.setShowGif(false); startActivityForResult(intent, REQUEST_CODE); }
Differential expression of nerve terminal protein isoforms in VAChTcontaining varicosities of the spinal cord ventral horn Of the different types of synaptic contacts with the mammalian spinal motoneuron, the synapse made by the cholinergic, socalled Cterminal of unknown origin and function has special morphological characteristics. Thus, in this synapse, there is no postsynaptic density but rather a large subsynaptic cistern in the motoneuron. To see whether this particular arrangement imposes special demands on the transmitter release machinery, we examined the presence of nerve terminal proteins in the Cterminal by using immunohistochemistry. Cholinergic nerve fibers and terminals in the spinal cord ventral horn were identified with an antiserum to the vesicular acetylcholine transporter (VAChT) protein. Immunohistochemistry in combination with confocal laser microscopy showed the presence of synaptosomalassociated protein of 25 kDa (SNAP25), syntaxin, cysteine string protein (CSP), synuclein, synapsin I, synapsin I/II, synaptotagmin I, synaptotagmin I/II, synaptophysin, and synaptobrevin2like immunoreactivity (LI) in VAChTcontaining Cterminals. Synaptotagmin III and synaptobrevin 1 could not be demonstrated in this type of terminal. VAChTcontaining varicosities in the Renshaw cell area, with a probable origin from motoneuron axon collaterals, exhibited CSP, synapsin I/II, and synaptobrevin1LI, but not SNAP25, syntaxin, synuclein, synapsin I, synaptotagmin I, synaptotagmin I/II, synaptophysin and synaptobrevin2LI. The results suggest a differential content of nerve terminal proteins and their isoforms in cholinergic Cterminals apposing motoneurons and in the Renshaw cell area. It is concluded that Cterminals contain synaptic proteins necessary for fast transmitter release, and their origin should not be the motoneurons themselves. J. Comp. Neurol. 411:578590, 1999. © 1999 WileyLiss, Inc.
// ProposalByToken returns the single proposal identified by the provided token. func (db *ProposalDB) ProposalByToken(proposalToken string) (*pitypes.ProposalInfo, error) { if db == nil || db.dbP == nil { return nil, errDef } db.mtx.RLock() defer db.mtx.RUnlock() return db.proposal("TokenVal", proposalToken) }
<reponame>totemsoft/jms package au.gov.qld.fire.jms.domain.ase; import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.FetchType; import javax.persistence.Id; import javax.persistence.IdClass; import javax.persistence.JoinColumn; import javax.persistence.ManyToOne; import javax.persistence.Table; import javax.persistence.Transient; import au.gov.qld.fire.domain.Auditable; import au.gov.qld.fire.domain.location.Address; /** * @author <NAME> (mailto:<EMAIL>) */ @Entity @IdClass(SubPanelPK.class) @Table(name = "SUB_PANEL") public class SubPanel extends Auditable<SubPanelPK> { /** serialVersionUID */ private static final long serialVersionUID = 489361584959452939L; /** identifier field */ private Long aseFileId; /** identifier field */ private Long subPanelOrderId; private String buildingName; private AseFile aseFile; private Address address; @Transient public SubPanelPK getId() { if (super.getId() == null) { super.setId(new SubPanelPK(getAseFileId(), getSubPanelOrderId())); } return super.getId(); } @Id @Column(name = "ASE_FILE_ID", nullable = false) public Long getAseFileId() { return this.aseFileId; } public void setAseFileId(Long aseFileId) { this.aseFileId = aseFileId; } @Id @Column(name = "SUB_PANEL_ORDER_ID", nullable = false) public Long getSubPanelOrderId() { return this.subPanelOrderId; } public void setSubPanelOrderId(Long subPanelOrderId) { this.subPanelOrderId = subPanelOrderId; } /** * @hibernate.property * column="BUILDING_NAME" * length="250" * not-null="true" * */ @Column(name = "BUILDING_NAME", nullable = false) public String getBuildingName() { return this.buildingName; } public void setBuildingName(String buildingName) { this.buildingName = buildingName; } /** * @hibernate.many-to-one * update="false" * insert="false" * * @hibernate.column * name="ASE_FILE_ID" * */ @ManyToOne(fetch = FetchType.LAZY) @JoinColumn(name = "ASE_FILE_ID", nullable = false, insertable = false, updatable = false) public AseFile getAseFile() { return this.aseFile; } public void setAseFile(AseFile aseFile) { this.aseFile = aseFile; } /** * @hibernate.many-to-one * not-null="true" * @hibernate.column name="ADDRESS_ID" * */ @ManyToOne(fetch = FetchType.LAZY) @JoinColumn(name = "ADDRESS_ID", nullable = false) public Address getAddress() { return this.address; } public void setAddress(Address address) { this.address = address; } }
Inferring calendar event attendance The digital personal calendar has long been established as an effective tool for supporting workgroup coordination. For the new class of ubiquitous computing applications, however, the calendar can also be seen as a sensor, providing both location and availability information to these applications. In most cases, however, the calendar represents a sequence of events that people could (or should) attend, not their actual daily activities. To assist in the accurate determination of user whereabouts and availability, we present Ambush, a calendar system extension that uses a Bayesian model to predict the likelihood of one's attendance at the events listed on his or her schedule. We also present several techniques for the visual display of these likelihoods in a manner intended to be quickly interpreted by users examining the calendar.
<gh_stars>10-100 export default function Logo() { return ( <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 137.39293 137.39399" className="logo" > <defs> <clipPath> <path d="M0 841.89h595.28V0H0v841.89z" /> </clipPath> </defs> <g> <path d="M68.69 116.873c-26.56 0-48.17-21.61-48.17-48.17 0-26.565 21.61-48.176 48.17-48.176 26.568 0 48.175 21.61 48.175 48.175 0 26.56-21.607 48.17-48.175 48.17" fill="#f40e1b" /> </g> <g> <path d="M68.69 128.073c-1.713 0-3.108-1.4-3.108-3.11 0-1.717 1.394-3.114 3.107-3.114 29.3 0 53.133-23.835 53.133-53.133 0-29.294-23.834-53.124-53.133-53.124-29.294 0-53.13 23.83-53.13 53.124 0 13.62 5.17 26.587 14.555 36.498 1.18 1.246 1.125 3.215-.122 4.397-1.248 1.186-3.213 1.12-4.398-.12C15.115 98.42 9.34 83.936 9.34 68.717 9.34 35.994 35.966 9.37 68.69 9.37c32.73 0 59.354 26.625 59.354 59.347 0 32.73-26.622 59.355-59.353 59.355" fill="#fabe49" /> </g> <g> <path d="M68.69 137.394C30.814 137.394 0 106.578 0 68.702c0-1.717 1.39-3.112 3.108-3.112 1.718 0 3.11 1.395 3.11 3.11 0 34.45 28.027 62.475 62.472 62.475 34.454 0 62.48-28.026 62.48-62.474 0-34.45-28.026-62.476-62.48-62.476-1.712 0-3.107-1.39-3.107-3.114C65.583 1.4 66.978 0 68.69 0c37.883 0 68.703 30.825 68.703 68.7 0 37.878-30.82 68.694-68.703 68.694" fill="#f40e1b" /> </g> <g> <path d="M61.516 57.212s-3.374 5.32-4.422 13.616c3.61 3.518 5.812 5.573 11.583 8.4 6.757-3.07 10.94-7.953 11.57-8.4-1.244-7.71-4.235-13.045-4.422-13.616-7.778-1.986-14.307 0-14.307 0" fill="#f40e1b" /> </g> <g> <path d="M58.028 50.886c2.11-5.21 4.79-6.316 7.146-8.313-2.774-2.007-5.37-2.316-6.983-2.332-4.625 1.763-8.69 4.653-11.863 8.34.03 1.057.203 2.91.874 5.186 3.58-1.602 6.05-2.232 10.83-2.88" fill="#f40e1b" /> </g> <g> <path d="M68.676 87.515c-3.832 2.52-7.364 3.465-10.366 3.703.93 2.846 1.414 4.076 1.832 4.767 2.7.82 5.564 1.262 8.533 1.262 2.978 0 5.86-.446 8.575-1.277.4-.676.853-1.91 1.782-4.753-3.002-.237-6.536-1.18-10.357-3.702" fill="#f40e1b" /> </g> <g> <path d="M79.343 50.886c4.785.647 7.258 1.278 10.832 2.88.66-2.253.836-4.086.876-5.146-3.23-3.764-7.38-6.703-12.11-8.464-1.61.052-4.06.475-6.736 2.417 2.355 1.997 5.03 3.103 7.14 8.313" fill="#f40e1b" /> </g> <g> <path d="M50.353 75.36c-3.555-3.26-4.835-5.187-6.855-8.544-2.077 1.673-3.387 3.992-3.988 5.218.687 4.715 2.485 9.065 5.12 12.776.888.646 2.307 1.314 4.853 1.695-.402-2.99-.536-6.23.87-11.146" fill="#f40e1b" /> </g> <g> <path d="M94.08 66.817c-2.02 3.356-3.3 5.282-6.857 8.543 1.407 4.916 1.27 8.154.863 11.146 2.164-.334 3.517-.853 4.413-1.403 2.823-3.87 4.716-8.454 5.375-13.43-.667-1.29-1.92-3.34-3.796-4.856" fill="#f40e1b" /> </g> <g> <path d="M68.69 116.873c-26.56 0-48.17-21.61-48.17-48.17 0-26.565 21.61-48.176 48.17-48.176 26.568 0 48.175 21.61 48.175 48.175 0 26.56-21.607 48.17-48.175 48.17" fill="#f40e1b" /> </g> <g> <path d="M68.69 128.073c-1.713 0-3.108-1.4-3.108-3.11 0-1.717 1.394-3.114 3.107-3.114 29.3 0 53.133-23.835 53.133-53.133 0-29.294-23.834-53.124-53.133-53.124-29.294 0-53.13 23.83-53.13 53.124 0 13.62 5.17 26.587 14.555 36.498 1.18 1.246 1.125 3.215-.122 4.397-1.248 1.186-3.213 1.12-4.398-.12C15.115 98.42 9.34 83.936 9.34 68.717 9.34 35.994 35.966 9.37 68.69 9.37c32.73 0 59.354 26.625 59.354 59.347 0 32.73-26.622 59.355-59.353 59.355" fill="#fabe49" /> </g> <g> <path d="M68.69 137.394C30.814 137.394 0 106.578 0 68.702c0-1.717 1.39-3.112 3.108-3.112 1.718 0 3.11 1.395 3.11 3.11 0 34.45 28.027 62.475 62.472 62.475 34.454 0 62.48-28.026 62.48-62.474 0-34.45-28.026-62.476-62.48-62.476-1.712 0-3.107-1.39-3.107-3.114C65.583 1.4 66.978 0 68.69 0c37.883 0 68.703 30.825 68.703 68.7 0 37.878-30.82 68.694-68.703 68.694" fill="#f40e1b" /> </g> <g> <path d="M102.433 68.702c0 18.633-15.104 33.735-33.743 33.735-18.63 0-33.736-15.103-33.736-33.735 0-18.633 15.106-33.74 33.737-33.74 18.64 0 33.745 15.108 33.745 33.74" className="logo__background" /> </g> <g> <path d="M61.516 57.212s-3.374 5.32-4.422 13.616c3.61 3.518 5.812 5.573 11.583 8.4 6.757-3.07 10.94-7.953 11.57-8.4-1.244-7.71-4.235-13.045-4.422-13.616-7.778-1.986-14.307 0-14.307 0" fill="#f40e1b" /> </g> <g> <path d="M58.028 50.886c2.11-5.21 4.79-6.316 7.146-8.313-2.774-2.007-5.37-2.316-6.983-2.332-4.625 1.763-8.69 4.653-11.863 8.34.03 1.057.203 2.91.874 5.186 3.58-1.602 6.05-2.232 10.83-2.88" fill="#f40e1b" /> </g> <g> <path d="M68.676 87.515c-3.832 2.52-7.364 3.465-10.366 3.703.93 2.846 1.414 4.076 1.832 4.767 2.7.82 5.564 1.262 8.533 1.262 2.978 0 5.86-.446 8.575-1.277.4-.676.853-1.91 1.782-4.753-3.002-.237-6.536-1.18-10.357-3.702" fill="#f40e1b" /> </g> <g> <path d="M79.343 50.886c4.785.647 7.258 1.278 10.832 2.88.66-2.253.836-4.086.876-5.146-3.23-3.764-7.38-6.703-12.11-8.464-1.61.052-4.06.475-6.736 2.417 2.355 1.997 5.03 3.103 7.14 8.313" fill="#f40e1b" /> </g> <g> <path d="M50.353 75.36c-3.555-3.26-4.835-5.187-6.855-8.544-2.077 1.673-3.387 3.992-3.988 5.218.687 4.715 2.485 9.065 5.12 12.776.888.646 2.307 1.314 4.853 1.695-.402-2.99-.536-6.23.87-11.146" fill="#f40e1b" /> </g> <g> <path d="M94.08 66.817c-2.02 3.356-3.3 5.282-6.857 8.543 1.407 4.916 1.27 8.154.863 11.146 2.164-.334 3.517-.853 4.413-1.403 2.823-3.87 4.716-8.454 5.375-13.43-.667-1.29-1.92-3.34-3.796-4.856" fill="#f40e1b" /> </g> </svg> ); }
Sony's 2010 Game Developers Conference press conferences kicks off at 4PM Pacific, and we'll be there live, bringing you up-to-date news on the big announcement, interesting tidbits, and what music they're playing before the speakers take the stage. Sony is hosting a large press event this year at the Game Developers Conference, and we expect big news from it. The company should have plenty to talk about, so you should definitely tune in. Possible news includes new game reveals, the official naming of the PlayStation motion controller, and perhaps news on how the company plans to make the faltering PSPgo more attractive to the consumer. I'm hoping they announce a new version of the PSPgo that's longer, loses the slider, and includes a UMD drive. That would be spectacular.
IPG has taken a minority stake in Seattle-based mobile attribution firm Placed, the holding company confirmed Thursday. Placed specializes in measuring the impact of mobile advertisements on in-store visits. The two firms are characterizing the deal as a strategic partnership. The IPG Media Lab will oversee the company's relationship with Placed, which includes introducing IPG clients to Placed's services. Three non-equity Placed partners were also disclosed today including Collective, dstillery and SessionM. Terms of the deal were not disclosed, although The Wall Street Journal reported today that Placed has raised a total of $13 million to date. Placed’s measurement offering includes location-driven insights, targeting and attribution. All three services are derived from what Placed says is the largest opt-in consumer location panel in North America, measuring the location of nearly a quarter of a million devices daily as consumers visit brick-and-mortar stores. The company's research enables agencies and brands to better understand customer traffic patterns influenced by mobile ads, and to tailor relevant communications accordingly. That research is critical as mobile advertising ramps up. Mobile ad expenditures will nearly double this year to $18 billion, according to eMarketer. "IPG is committed to investing in and working with companies that are changing our industry and making our clients and agencies smarter,” said IPG CEO Michael Roth. “Placed is at the forefront of leveraging data to help our agencies see where we're moving the needle on a client's business."
Development of mammalian cell-enclosing calcium-alginate hydrogel fibers in a co-flowing stream. A jetting technique in a liquid-liquid co-flowing stream was applied to the preparation of mammalian cell-enclosing calcium-alginate (Ca-alg) hydrogel fibers of several hundred micrometers in cross-sectional diameter. One percent alginate aqueous solution was extruded from needles (270, 480, 940 microm inner diameter) into a co-flowing laminar stream of 100 mM aqueous calcium chloride solution. The extruded alginate solution was stretched by the CaCl solution, which is known as a "jetting process", and the Ca-alg hydrogel fibers were formed by gelation of the alginate solution through the uptake of calcium ions in the CaCl solution. The cross-sectional diameter of the hydrogel fibers could be controlled from approximately 100-800 microm by changing the velocities of the alginate and CaCl solution, and the inner diameter of the needle. Approximately 95% of bovine carotid artery vascular endothelial cells remained alive after the process of preparing hydrogel fibers in a co-flowing stream, demonstrating that the cell-enclosing process scarcely influences the viability of the enclosed cells.
<reponame>oyberntzen/gogame package ggevent type MouseButtonCode int const ( MouseButton1 MouseButtonCode = 0 MouseButton2 MouseButtonCode = 1 MouseButton3 MouseButtonCode = 2 MouseButton4 MouseButtonCode = 3 MouseButton5 MouseButtonCode = 4 MouseButton6 MouseButtonCode = 5 MouseButton7 MouseButtonCode = 6 MouseButton8 MouseButtonCode = 7 MouseButtonLast MouseButtonCode = MouseButton8 MouseButtonLeft MouseButtonCode = MouseButton1 MouseButtonRight MouseButtonCode = MouseButton2 MouseButtonMiddle MouseButtonCode = MouseButton3 )
A model undergraduate research institute for study of emerging non-contact measurement technologies and techniques The Infrared Development and Thermal Structures Laboratory (IDTSL) is an undergraduate research laboratory in the College of Integrated Science and Technology (CISAT) at James Madison University (JMU) in Harrisonburg, Virginia. During the 1997-98 academic year, Dr. Jonathan Miles established the IDTSL at JMU with the support of a collaborative research grant from the NASA Langley Research Center and with additional support from the College of Integrated Science and Technology at JMU. The IDTSL supports research and development efforts that feature non-contact thermal and mechanical measurements and advance the state of the art. These efforts all entail undergraduate participation intended to significantly enrich their technical education. The IDTSL is funded by major government organizations and the private sector and provides a unique opportunity to undergraduates who wish to participate in projects that push the boundaries of non-contact measurement technologies, and provides a model for effective hands-on, project oriented, student-centered learning that reinforces concepts and skills introduced within the Integrated Science and Technology (ISAT) curriculum. The lab also provides access to advanced topics and emerging measurement technologies; fosters development of teaming and communication skills in an interdisciplinary environment; and avails undergraduates of professional activities including writing papers, presentation at conferences, and participation in summer internships. This paper provides an overview of the Infrared Development and Thermal Structures Laboratory, its functionality, its record of achievements, and the important contribution it has made to the field of non-contact measurement and undergraduate education.
Classification of pregnancy and labor contractions using a graph theory based analysis In this paper, we propose a new framework to characterize the electrohysterographic (EHG) signals recorded during pregnancy and labor. The approach is based on the analysis of the propagation of the uterine electrical activity. The processing pipeline includes i) the estimation of the statistical dependencies between the different recorded EHG signals, ii) the characterization of the obtained connectivity matrices using network measures and iii) the use of these measures in clinical application: the classification between pregnancy and labor. Due to its robustness to volume conductor, we used the imaginary part of coherence in order to produce the connectivity matrix which is then transformed into a graph. We evaluate the performance of several graph measures. We also compare the results with the parameter mostly used in the literature: the peak frequency combined with the propagation velocity (PV +PF). Our results show that the use of the network measures is a promising tool to classify labor and pregnancy contractions with a small superiority of the graph strength over PV+PF.
Published: April 13, 2019 at 11:11 a.m. Whether or not the New York Jets retain the No. 3 overall selection in the upcoming 2019 NFL Draft is up for speculation. If the Jets do pick third or move down, they're continuing to study up, most recently hosting a top defensive end and top tight end. Kentucky defensive end Josh Allen took a visit with the Jets on Saturday, NFL Network Insider Ian Rapoport reported, while New York hosted Iowa tight end Noah Fant on Friday. The Jets, who have long been in search of a premium pass rusher, played host to Allen on Saturday, a day after the pass rusher was hosted by the New York Giants. Allen is ranked third in NFL Network draft analyst Daniel Jeremiah's top 50, while Fant is ranked 15th. In 2018, linebacker Jordan Jenkins and pass rusher Henry Anderson tied for the Jets team lead of seven sacks. With new defensive coordinator Gregg Williams in town, an edge like Allen would seemingly thrive for Gang Green. With the free-agent signings of receiver Jamison Crowder and running back Le'Veon Bell, a phenomenal pass-catcher out of the backfield, the Jets are going all in on building around quarterback Sam Darnold and aiding his maturation. Fant would be an added piece in that puzzle as the tall and athletic standout can line up at the line of scrimmage, out wide or on the wing.
Q: Client or server check if insert or update in RESTful architecture Before I began developing a RESTful API I used a query similar to this: $query = "INSERT INTO availability (user_id, date, status) " . "VALUES ('".$id."', '".$date."', '".$status."') " . "ON DUPLICATE KEY " . "UPDATE status='".$status."'"; Yes, I know it's subject to SQL injection. Anyway, I'm having trouble deciding if this should be a POST or PUT request since it can insert or update. I got to thinking: maybe it's better to have both POST and PUT methods in the API and then the client determines which one to call. Is this usually how RESTful APIs handle this scenario? A: I got to thinking: maybe it's better to have both POST and PUT methods in the API and then the client determines which one to call. Is this usually how RESTful APIs handle this scenario? Not really. Think about how you would implement this in a web site. Somebody would go to the home page, and then follow a link to a form allowing them to provide the data they would want to use, and then they would submit the form, and get a message back announcing whether the submission was successful. The representation of the form would describe the identifier for the resource to submit the request to, and the method to use (in an HTML form, that would always be a POST, of course). So you could use POST, or PUT, or alternate back and forth, or do whatever you like by simply altering the representation of the form. From a REST perspective, the only important thing is that you respect the uniform interface; which in this case means that the semantics of your messages are aligned with the HTTP specification. REST really doesn't care which method you use, so long as (a) you use it correctly according to the uniform interface, and (b) the client discovers which method to use by consuming the hypermedia provided by the server. Semantically, a PUT is supposed to be a complete replacement. RFC 7231 is the relevant standard. The PUT method requests that the state of the target resource be created or replaced with the state defined by the representation enclosed in the request message payload It's a little bit weird that you insert $date but you don't update it. Not wrong -- but tough to tell from the example whether date is part of the "representation enclosed in the request message payload." The semantics of PUT allow clients and intermediaries to make certain assumptions about the state of the updated resource without sending a GET to the server -- it's the responsibility of the server to ensure that those assumptions hold. A: It might help to first look at the difference between POST and PUT. The main difference is that PUT is intended for idempotent operations. That is, operations that can be performed multiple times but will behave as if they were performed once. POST has no such assumption. The significance of idempotence is that the client or infrastructure can repeat the request if it doesn't seem to have been acknowledged. With a POST request, this may cause unintended consequences. This is why your browser warns you if you refresh a page that was produced via a POST request. The operation in your question is idempotent, so PUT can and should be used. A POST request would be appropriate for operations that are not idempotent like operations that are more like RPC calls or, more in the spirit of REST and HTTP, creating new records. For example, if you had an auto-increment ID field and each INSERT created a new record, you'd use POST because such an operation is not idempotent: executing it twice will create two new records.
Effect of neutron irradiation on the spectrum of deep-level defects in GaAs grown by liquid-phase epitaxy in hydrogen or argon ambient We present results of experimental studies of capacitance--voltage characteristics, spectra of deep-level transient spectroscopy of graded high-voltage GaAs p+-p0-i-n0 diodes fabricated by liquid-phase epitaxy at a crystallization temperature of 900oC from one solution--melt due to autodoping with background impurities, in a hydrogen or argon ambient, before and after irradiation with neutrons are presented. After neutron irradiation, deep-level transient spectroscopy spectra revealed wide zones of defect clusters with acceptor-like negatively charged traps in the n0-layer, which arise as a result of electron emission from states located above the middle of the band gap. It was found that the differences in capacitance--voltage characteristics of the structures grown in hydrogen or argon ambient after irradiation are due to different doses of irradiation of GaAs p+-p0-i-n0 structures and different degrees of compensation of shallow donor impurities by deep traps in the layers. Keywords: GaAs, neutron irradiation, capacitance spectroscopy, p0-i-n0-junction, liquid-phase epitaxy, hydrogen, argon.
<filename>java/engine/org/apache/derby/impl/store/raw/xact/XactId.java<gh_stars>1-10 /* Derby - Class org.apache.derby.impl.store.raw.xact.XactId Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package org.apache.derby.impl.store.raw.xact; import org.apache.derby.iapi.services.io.FormatIdUtil; import org.apache.derby.iapi.services.io.StoredFormatIds; import org.apache.derby.iapi.services.sanity.SanityManager; import org.apache.derby.iapi.error.StandardException; import org.apache.derby.iapi.store.raw.xact.TransactionId; import org.apache.derby.iapi.services.io.CompressedNumber; import java.io.ObjectOutput; import java.io.ObjectInput; import java.io.IOException; /** Use this class for a short hand representation of the transaction. This value is only guarentee to be unique within one continuous operation of the raw store, in other words, every reboot may reuse the same value. Whereas GlobalXactId is unique for all times across all raw store, a XactId is only unique within a particular rawstore and may be reused. XactId keeps track of the outstanding transactionId and is responsible for dispensing new transactionIds */ public class XactId implements TransactionId { /* ** Fields */ private long id; // immutable /* ** Constructor */ public XactId(long id) { this.id = id; } /* * Formatable methods */ // no-arg constructor, required by Formatable public XactId() { super(); } /** Write this out. @exception IOException error writing to log stream */ public void writeExternal(ObjectOutput out) throws IOException { CompressedNumber.writeLong(out, id); } /** Read this in @exception IOException error reading from log stream */ public void readExternal(ObjectInput in) throws IOException { id = CompressedNumber.readLong(in); } /** Return my format identifier. */ public int getTypeFormatId() { return StoredFormatIds.RAW_STORE_XACT_ID; } /** TransactionId method */ public int getMaxStoredSize() { return FormatIdUtil.getFormatIdByteLength(StoredFormatIds.RAW_STORE_XACT_ID) + CompressedNumber.MAX_LONG_STORED_SIZE; } public boolean equals(Object other) { if (other == this) return true; // assume cast will be successful rather than waste time doing an // instanceof first. Catch the exception if it failed. try { XactId oxid = (XactId)other; return (id == oxid.id); } catch (ClassCastException cce) { return false; } } public int hashCode() { return (int)id; } /** Methods specific to this class */ /** Return 0 if a == b, +ve number if a > b -ve number if a < b */ public static long compare(TransactionId a, TransactionId b) { if (a == null || b == null) { if (a == null) return -1; else if (b == null) return 1; else return 0; } if (SanityManager.DEBUG) { SanityManager.ASSERT(a instanceof XactId); SanityManager.ASSERT(b instanceof XactId); } XactId A = (XactId)a; XactId B = (XactId)b; return A.id - B.id; } protected long getId() { return id; } public String toString() { // needed for virtual lock table return Long.toString(id); } }
The UFW and the Undocumented It has become an embarrassment. The American Conservative crows, Cesar Chvez, Minuteman, and the accompanying article delivers the news that the United Farm Workers was not only anti-immigrant but that it set up its own border patrol between Arizona and Mexico. The magazine does not stand alone. Leaders of current right-wing vigilante groups claim Chvez's retroactive endorsement for their occasional attempts to close small sections of the border. In response, Chvez's defenders contend that the UFW opposed the undocumented only when they broke strikes; and besides, that was long ago, and now the union is a strong defender of immigrant rights. Thus, UFW policy toward the undocumentedcontroversial in its own timeonce again has become a subject of public debate, threatening to tarnish the name given to so many California streets, parks, schools, community centers, and even university departments.
/** * This header is generated by class-dump-z 0.2b. * * Source: /System/Library/PrivateFrameworks/UIAccessibility.framework/UIAccessibility */ #import <UIAccessibility/XXUnknownSuperclass.h> @interface UIAccessibilityLoader : XXUnknownSuperclass { } + (void)_accessibilityStopServer; // 0x4ab1 + (void)_accessibilityReenabled; // 0x4a65 + (void)_accessibilityStartServer; // 0x48d9 + (id)_loadAXBundleForBundle:(id)bundle didLoad:(BOOL *)load; // 0x482d + (id)_loadAXBundleForBundle:(id)bundle didLoad:(BOOL *)load forceLoad:(BOOL)load3 loadSubbundles:(BOOL)subbundles; // 0x4675 + (id)loadAccessibilityBundle:(id)bundle didLoad:(BOOL *)load force:(BOOL)force; // 0x4645 + (id)loadAccessibilityBundle:(id)bundle didLoad:(BOOL *)load; // 0x4621 + (void)loadActualAccessibilityBundle:(id)bundle didLoad:(BOOL *)load loadSubbundles:(BOOL)subbundles; // 0x44a9 + (id)_axBundleForBundle:(id)bundle; // 0x423d + (id)_accessibilityBundlesForBundle:(id)bundle; // 0x40f5 + (void)initialize; // 0x3fb9 @end
Photographer: SeongJoon Cho/Bloomberg President Obama's high-stakes bid to complete one of the largest free trade pacts in U.S. history--over the objections of most Democrats--moved ahead Thursday when the Senate introduced bipartisan legislation that would give his administration vast new powers to close the deal. The fast-track trade bill from Sens. Orrin G. Hatch (R-Utah) and Ron Wyden (D-Ore.) would allow the president to present a final agreement to Congress for an up-or-down vote without lawmakers being able to amend the terms. A similar bill, drafted by Rep. Paul Ryan (R-Wis.), is expected to be introduced in the House in the coming weeks. Hatch, chairman of the Senate Finance Committee, said he would hold a markup and vote on the bill April 23. [Obama’s proposal for more trade with Asia may not go over so well in his own party] Obama administration officials have called fast-track authority crucial to wrapping up negotiations on the Trans-Pacific Partnership (TPP), a 12-nation trade and regulatory deal in the Asia Pacific that Obama has touted as a cornerstone of his second-term agenda. "My top priority in any trade negotiation is expanding opportunity for hardworking Americans," Obama said in a statement Thursday. "It’s no secret that past trade deals haven’t always lived up to their promise, and that’s why I will only sign my name to an agreement that helps ordinary Americans get ahead. At the same time, at a moment when 95 percent of our potential customers live outside our borders, we must make sure that we, and not countries like China, are writing the rules for the global economy." The president has said the deal would help ensure U.S. competitiveness in the face of a rising China. But getting it across the finish line on Capitol Hill will mark a major leadership test for Obama. The deal faces fierce opposition from many Democrats, labor unions and environmental groups. They said the TPP will kill U.S. jobs and benefit large, multinational corporations. The AFL-CIO announced Thursday it would launch a six-figure ad buy to fight the fast-track bill in a series of digital ads targeting 16 senators and 36 House members. The campaign could expand to newspapers and television, the organization said. The fast-track bill, formally known as "trade promotion authority," was negotiated for months between Hatch and Wyden, with the Democrat under increasing pressure from liberal groups not to sign onto the legislation. [White House counts on Sen. Wyden to deliver on a trade deal] Presidents have over the past 40 years enjoyed fast-track powers on trade, but the powers have not been renewed since they expired in 2007 under George W. Bush. Obama and his aides have said they need the authority because other countries are unwilling to agree to final terms if they believe Congress will amend the pact afterward. "We must speak with one voice in our demands and provide assurance that we will deliver what we promise," Hatch said. Wyden fought for, and won, a number of provisions aimed at ensuring greater public transparency in the trade negotiations, as well as a key provision that would allow the Senate to turn off the fast-track authority if the Obama administration fails to live up to certain requirements during the negotiations. "One issue that has come up again and again is the excessive secrecy that seems to have accompanied so much of this debate," Wyden said Thursday. "American trade policy needs to be debated openly." The bill also includes provisions mandating that the administration pursue protections for workers and the environment and a stipulation that U.S. negotiators make human rights a priority, though that provision lacks specifics. Politically, the legislation places Obama squarely against a majority of House Democrats and a large bloc of Senate Democrats, many of whom believe previous trade deals hurt U.S. workers. In a sign of how difficult the fight will be, Sen. Charles E. Schumer (D-N.Y.), who is expected to take over as leader of his party in the Senate in 2017, said he was skeptical of the TPP because it lacks protections against currency manipulation. "All evidence I’ve seen is that this hurts middle-class incomes," he said at a hearing before the trade bill was introduced. "I can’t be for it." Later Thursday, Schumer said that support from him and many Democrats would depend on a separate currency manipulation bill that Hatch has promised to bring up, saying he would not settle for a "milquetoast" measure. Hatch made clear that he'll give Schumer the chance in committee, but that TPP would move on its own track. "We'll do a bill on currency manipulation, but it can't be part of this, or it's dead. So we can't kill trade just because somebody wants one aspect or another," Hatch said. Sen. Richard J. Durbin (D-Ill.), the minority whip, estimated that only a quarter of Senate Democrats were in the president's camp right now. He broke down his caucus ranks this way: "A fourth, hell no; a fourth, lean yes; and a big group undecided." With most Senate Republicans likely to support the fast-track legislation, most corporate and union officials expect the Senate to clear the bill in May and set up a much bigger fight in the House, where the margins are incredibly narrow at the moment. Several dozen House Republicans either come from manufacturing districts and do not support trade deals, or are just so opposed to giving Obama any new authority that they would vote no. That means Obama would need to deliver more than 30 Democrats to win passage in the House, which insiders on both sides of the aisle say is not guaranteed. "This is wrong, and members of Congress will not stand for it," said Rep. Rosa L. DeLauro (D-Conn.), who has led the liberal opposition to trade deals.
import java.util.Arrays; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedList; import java.util.Map; import java.util.Queue; import java.util.Scanner; import java.util.ArrayList; public class Main { static dat[] mat; static class dat { int w = 0; int h = 0; int max = 1; int path = -1; int ind = -1; } public static void main(String[] args) { Scanner scan = new Scanner(System.in); int N = scan.nextInt(); int W = scan.nextInt(); int H = scan.nextInt(); mat = new dat[N]; //int[] max = new int[N]; //int[] path = new int[N]; for(int i = 0;i<N;i++) { mat[i] = new dat(); mat[i].w = scan.nextInt(); mat[i].h = scan.nextInt(); mat[i].ind = i; } quickSort(0,N-1); int curmax = 1; int maxind = -1; for(int i = 0;i < N;i++) { if(mat[i].w>W&&mat[i].h>H) { maxind = i; break; } } if(maxind==-1) { System.out.println(0); return; } for(int i = 1;i < N;i++) { if(mat[i].w<=W||mat[i].h<=H) continue; for(int j = i-1;j>=0;j--) { if(mat[j].w<=W||mat[j].h<=H||mat[j].w==mat[i].w||mat[j].h<=mat[i].h) continue; if(mat[i].max<=mat[j].max) { mat[i].max=mat[j].max+1; mat[i].path = j; if(curmax<mat[i].max) { curmax = mat[i].max; maxind = i; } } } } System.out.println(curmax); while(curmax>0) { System.out.println(mat[maxind].ind+1); maxind = mat[maxind].path; curmax--; } } static void quickSort(int begin,int end) { int i=begin; int j=end; int m=i+(j-i)/2; int mid=mat[m].w; do { while (mat[i].w > mid) i++; while (mat[j].w < mid) j--; if(j>=i) { dat temp; temp=mat[j]; mat[j]=mat[i]; mat[i]=temp; //mat[i].ind=i; //mat[j].ind=j; i++; j--; } } while(i<=j); if(j>begin) quickSort(begin,j); if(end>i)quickSort(i,end); } }
<reponame>fakepop/hubspot-api-python from hubspot import HubSpot from hubspot.conversations.visitor_identification import GenerateApi def test_is_discoverable(): apis = HubSpot().conversations.visitor_identification assert isinstance(apis.generate_api, GenerateApi)
for treating catheter-related infections, which prompted a round of spending cuts. The company expects to close 2009 with $81 million to $86 million in the bank—almost $40 million more than it had on hand a year ago. —Cypress Bioscience (NASDAQ: CYPB). The San Diego company won FDA approval of milnacipran (Savella) for fibromyalgia, so now it’s gearing up for the marketplace. This drug is important since it would be Cypress’ sole marketed product, but it can wait a while if need be. The company had $145.5 million in cash and investments heading into 2009, about $36 million lighter in the wallet than it was a year earlier. —Genoptix Medical (NASDAQ: GXDX). This Carlsbad, CA-based lab services company had about $103 million stockpiled heading into this year, but it reported a $20.7 million profit in the fourth quarter, so it’s not in a position where it needs to burn cash reserves to stay alive. The company generated $116.2 million in sales during 2008, and it forecasts sales will surge by 46 percent this year to about $170 million. —Halozyme Therapeutics (NASDAQ: HALO). This San Diego company entered 2009 with $63.7 million stockpiled, and it expects to burn through $30 million to $35 million this year. —Illumina (NASDAQ: ILMN). This San Diego-based maker of high-speed genetic analysis instruments, said it had about $687 million in cash and investments heading into this year. It expects to generate $690 million to $720 million in revenue this year, and continue to operate in the black, with about $1.10 to $1.20 expected this year in the all-important earnings per share number watched on Wall Street. —Isis Pharmaceuticals (NASDAQ: ISIS). The Carlsbad, CA-based developer of antisense drug technology made major strides to firm up its balance sheet. Isis entered this year with $491 million in the bank, more than double the $193.7 million it had stashed away a year earlier. That cash horde, built up through a lucrative partnership with Cambridge, MA-based Genzyme and the sale of its diagnostics subsidiary to Abbott Laboratories, should last for years, the company says. —La Jolla Pharmaceutical (NASDAQ: LJPC). This San Diego-based company hasn’t filed its fourth-quarter financial report with the SEC yet, although that’s the least of its problems. The company’s lead product in development, Riquent, failed in a pivotal clinical trial of patients with lupus. It is now “evaluating strategic options such as winding down the business” or a sale of the company.
A case study on the SCORM-based e-learning in computer-aided drafting course with users' satisfaction survey In this paper, we propose a case study to compare the learning difference on a Computer added Drafting (CAD) course between traditional learning and e-learning. The learning materials for the e-learning were designed in Shareable Contents Object Reference Model (SCORM) standard. Seventy-four students from a vocational high school in Taiwan attended the experiment. These students were divided into two groups: the control group and the treatment group. The control group was provided with traditional learning in a regular classroom. The treatment group utilized an e-learning platform to conduct learning activities. The experimental results show that the learning performance between the traditional learning and the e-learning on CAD course is not different. Finally, a survey was also conducted to realize the users' satisfaction of using the e-learning course.
Pyrazinamide resistance and mutations in pncA among isolates of Mycobacterium tuberculosis from Khyber Pakhtunkhwa, Pakistan Background Pyrazinamide (PZA) is an important component of first-line drugs because of its distinctive capability to kill subpopulations of persistent Mycobacterium tuberculosis (MTB). The prodrug (PZA) is converted to its active form, pyrazinoic acid (POA) by MTB pncA-encoded pyrazinamidase (PZase). Mutation in pncA is the most common and primary cause of PZA resistance. The aim of the present study was to explore the molecular characterization of PZA resistance in a Pashtun-dominated region of Khyber Pakhtunkhwa, Pakistan. Methods We performed drug susceptibility testing (DST) on 753 culture-positive isolates collected from the Provincial Tuberculosis Control Program Khyber Pakhtunkhwa using the BACTEC MGIT 960 PZA method. In addition, the pncA gene was sequenced in PZA-resistant isolates, and PZA susceptibility testing results were used to determine the sensitivity and specificity of pncA gene mutations. Results A total of 69 isolates were PZA resistant (14.8%). Mutations were investigated in 69 resistant, 26 susceptible and one H37Rv isolates by sequencing. Thirty-six different mutations were identified in PZA-resistant isolates, with fifteen mutations, including 194_203delCCTCGTCGTG and 317_318delTC, that have not been reported in TBDRM and GMTV Databases and previous studies. Mutations Lys96Thr and Ser179Gly were found in the maximum number of isolates (n=4 each). We did not detect mutations in sensitive isolates, except for the synonymous mutation 195C>T (Ser65Ser). The sensitivity and specificity of the pncA sequencing method were 79.31% (95% CI, 69.29 to 87.25%) and 86.67% (95% CI, 69.28 to 96.24%). Conclusion Mutations in the pncA gene in circulating isolates of geographically distinct regions, especially in high-burden countries, should be investigated for better control and management of drug-resistant TB. Molecular methods for the investigation of PZA resistance are better than DST. Background Tuberculosis (TB) is a common life-threatening infectious disease caused by Mycobacterium tuberculosis (MTB). The standard new therapy includes a six-month treatment of four recommended first-line drugs, i.e., isoniazid, rifampin, pyrazinamide and ethambutol. However, the misuse of these antibiotics has led to the emergence of multidrug-resistant (MDR) strains of MTB. According to the WHO report in 2016, Pakistan is ranked among the top five countries, with a prevalence of 56% of global TB and among the high-burden drug-resistant countries. The incidence of drug resistance (MDR/RR-TB) is twenty-six thousand with 14 (8.5-19) per hundred thousand individuals in a population. This situation leaves a high challenge to the TB control program in the country. Khyber Pakhtunkhwa (KPK), a region in Pashtun, is the third largest province of Pakistan, with an area of 74,521 km 2 and a population of approximately 30,523,371 individuals. Tuberculosis remains a major public health problem and one of the neglected health areas in the past. Recently, a TB control program has been launched at Hayatabad Medical Complex Peshawar that regularly monitors the incidence of TB as well as drug susceptibility testing (DST) in the population. Pyrazinamide (PZA), a key first-line antibiotic used for the short-course treatment of 6 months, kills dormant tubercle bacilli at an acidic pH, whereas other drugs fail to work during the early severe stages of chemotherapy. Due to some technical and buffering issues of drug susceptibility testing (DST) for PZA, the results of phenotypic resistance are not always reliable. The MGIT 960 system is the most reliable method to perform PZA-DST and is currently the only available phenotypic test method to explore PZA susceptibility. MTB isolates are cultured in the presence of PZA under acidic conditions as required for conversion into pyrazinoic acid (POA), the active form of pyrazinamide in vivo, activated by MTB PZase (PncA). These issues have prompted efforts towards molecular methods of PZA resistance. In Khyber Pakhtunkhwa, there are no molecular studies to explore pncA mutations in the KPK Pashtun-dominant region. Here, we aimed to compare phenotypic resistance to PZA to genotype and identify mutations in pncA among PZA-resistant isolates circulating in this epidemiologically distinct area in a Pashtun-dominant population, which may be useful in tracing the transmission in patients. Ethical considerations The present investigation was approved by the Institutional Ethics Committee of CUST Islamabad and Provincial Tuberculosis Reference Laboratory (PTRL) KPK under reference number PTP/PTRL-402/16. Prior to the study, informed consent was obtained from each TB patient, however, the results were not linked back to individual patients. Study samples All samples were processed at the BSL-III facility of PTRL, Hayatabad Medical Complex (HMC). The lab receives TB cases from the entire province, which is facilitated by the MGIT 960 system of drug susceptibility testing. The data for TB patients were collected from their guardians or caretakers. Sample processing, isolation and mycobacterial culture The samples were processed using the N-acetyl-L-cysteinesodium hydroxide (NALC-NaOH) concentration method in a Falcon tube containing an equal volume of the NaOH/N-acetyl-L-cysteine (NALC), which was subsequently vortexed and incubated at room temperature for 15 min for decontamination and digestion. Next, 50 ml of phosphate buffer was transferred to each tube, followed by centrifugation at 3000 rpm for 15 min. The supernatant was transferred to a fresh tube containing 5% phenol, while the pellet was mixed with phosphate buffer and cultured on Lowenstein-Jensen medium (LJ) in MGIT tubes containing 7H9 media. Drug susceptibility testing (DST) Drug susceptibility testing of PZA was performed through the automated BACTEC MGIT 960 system (BD Diagnostic Systems, NJ, USA). Mycobacterium tuberculosis H37Rv and Mycobacterium bovis were used as susceptible and resistant controls, respectively. A sample was marked as PZA resistant if growth was found at 100 g/ml of the PZA critical concentration. DST for resistant isolates was repeated for confirmation of drug resistance. The PZA-resistant samples were further subjected to DST with isoniazid (INH), rifampin (RIF), ethambutol (EMB), amikacin (AMK), streptomycin (SM), capreomycin (CAP), ofloxacin (OFX) and kanamycin (KM) through the BACTEC MGIT 960 system, with critical concentrations of drugs as per the policy guidelines of the WHO (WHO 2014). The resistant samples were further manually assessed to confirm the growth of MTB against the critical drug concentration. DNA extraction, PCR amplification and sequencing Genomic DNA from PZA-resistant isolates were extracted by sonication. One microliter of fresh culture was transferred from a Mycobacterium Growth Indicator Tube (MGIT) to a microcentrifuge tube and boiled at 86°C for 30 min using an Echotherm™ IC22 Digital, Chilling/Heating Dry Bath followed by 15 min sonication using a sonicator (ELMASONIC S30). All the samples were centrifuged for 5 min at 10,000 rpm. The supernatant containing DNA was stored at − 20°C. The fragments containing pncA were amplified using previously reported primers (pncA-F = 5GCGTCATGGACCCTATATC-3 and pncA-R = 5 AACA GTTCATCCCGGTTC-3=). Each 50-l PCR contained 0.1 l of each DNTs, 0.8 l of Taq (New England Biolabs, UK)), 5 l of PCR buffer, 3 l of MgCl 2, 1 l of each forward and reverse primer, 34.8 l of molecular grade water and 4 l of genomic DNA. The PCR conditions were 5 min at 94°C for denaturation, followed by 30 cycles of 30 s at 94°C, 30 s at 56°C, and 72°C for 1 min, with an extension step at 72°C for 5 min, as previously described. The PCR product was analyzed by 6 Applied Biosystems 3730xl (Macrogen Korea). Data analysis The sequence data obtained were loaded into Mutation Surveyor V5.0.1 software. The data were analyzed and compared with the PncA (Rv2043c) gene of RefSeq database of NCBI (NC_000962), while the patient data were entered by using Epi-Data entry version 3.1 software and analyzed through Epi-Data analysis software. Socio-demographic characteristics A total of 4518 samples were collected from TB subjects from all districts of KPK. Among these individuals, 753 subjects were culture positive, with ages ranging from 8 to 76 years (median age = 34.34). A majority of the cases were never treated (diagnostics) (44/69). All patients were KPK residents with Pushto as the main language (Table 1). Risk factors such as age, gender, history, reason, disease type and resistance type are presented in Table 1. The presence of a high number of MDR isolates (52/69) in PZA-resistant isolates shows the major risk factor in transmission and treatment failure. Mutation in PZA-resistant and PZA-susceptible isolates Mutations were investigated in both resistance and susceptible isolates in the coding region (561 bp) of pncA ( Table 2). Among the 69 PZA-resistant isolates 51 (74%), thirty-six different mutations with fifteen novel mutations, including 194_203delCCTCGTCGTG and 317_318delTC, were detected; but these variations were not found in Discussion PZA is a distinctive antituberculosis drug that plays a key role in shortening TB treatment. PZA kills nonreplicating persistent MTB and is prescribed in both susceptible and MDR-TB treatment. After conversion into its active form POA by pyrazinamidase (PZase), PZA remains active at low pH during acidic stress. However, in a large number of cases, MTB patients develop resistance against PZA that led to the survival of persistent bacteria. Conventional methods of PZA DST increase the level of false resistance that may result from media buffering issues and large inoculum sizes, where the acidic environment is required for drug action but inhibits the growth of MTB. Under such conditions, the most reliable method in the present scenario is the molecular detection of PZA-resistance, which involves sequencing the pncA gene to assess mutations in the 561 bp coding region and upstream regulatory region. In the present study, more than half of the tested PZA-resistant isolates were also MDR-TB, 52/69 (75.35%) isolates, consistent with the results of previous studies. Previous studies have also shown a correlation between mutations in the pncA gene and phenotypic PZA-resistance. Based on these findings, we report mutations in 51 (74%) resistant isolates that harbor 36 mutations in the coding region of pncA, with sensitivity and specificity of pncA sequencing of 79, which were categorized into four groups, 1) very high confidence resistance mutations, 2) high-confidence resistance mutations, 3) mutations with an unclear role and 4) mutations not associated with phenotypic resistance based on confidence level. We detected 12 mutations with very high confidence resistance, while the rest of mutations detected have been found in Miotto unclear category ( Table 2). The mutations 211C > T, 212A > G, 226A > C, 286A > C and 422A > C in the present study (Table 2) were previously shown as very high confidence resistance mutations. Molecular biomarkers that could specifically target the first two categories should be developed. Tan et al. reported that each geographical region has a diverse type of variations in pncA. Isolates from Southern China exhibited a scattered type of mutations in 561 bp region, which remains a complex target in the development of diagnostic biomarkers in identification of all resistance conferring mutations. Some strains, which were PZA resistant by conventional DST, lack any mutations in PncA and its regulatory gene, suggesting other targets of drug and issues concerning DST. The residues Cys138, Asp8, Lys96 and Asp49, His51, His57, and His71 are present in the active and metal binding sites of the pncA-encoded enzyme pyrazinamidase (PZase). We identified mutations dispersed throughout the pncA gene (35A > C---538G > T) nearby the area of metal binding and active site amino acids (46-76 and 133-146). We detected mutations that are important for enzyme catalysis and metal binding (Table 2). However, we did not detect any mutations in the 18 PZA-resistant MTB isolates, suggesting the involvement of other genes RpsA and PanD (aspartate decarboxylase). A potential new target of PZA, the clpC1 (Unfoldase) gene, which encodes a family of ATPases, was identified in PZA-resistance isolates in addition to the previously identified genes pncA, rpsA and panD. However, the role of these genes (rpsA, panD and clpC1) in PZA resistance is small compared to that of PncA. In a more recent study four new efflux proteins Rv0191, Rv1667c, Rv3756c and Rv3008 were implicated in PZA/POA resistance. These findings suggest a new mechanism for PZA resistance in MTB. Further investigations are needed to identify the quantitative role of all these targets and mechanisms in PZA-resistant MTB for better management of drug-resistant TB. In conclusion, considering phenotype as a reference, among the 69 PZA resistant isolates, 51 (74%) showed mutations with sensitivity of 79.31% (95% CI, 69.29 to 87.25%) and specificity of 86.67% (95% CI, 69.28 to 96.24%). The mutations 33C > A, 53C > A, 194_203 Del CCTCGTCGTG, 205C > A, 317-18 Del TC, 331G > T, 376G > A, 419G > A, 430G > A, 449G > C, 508G > C, 519G > A, 522G > A, 530DEL C and 535A > G were not found in the GMTV and TBDRM databases and neither in previous studies suggesting the need of some more studies from distinct geographical regions must be carried out for some novel mutations confined to that specific areas. Majority of mutations were of high confidence intervals and uncharacterized category in resistance. Molecular methods to investigate PZA resistance by screening mutations in pncA gene in distinct epidemiological regions offer a much more rapid alternative compared to that of conventional bacteriology. Mutations in pncA gene are highly linked with resistance to PZA and scattered in the entire coding region of pncA. Further, we found an association between PZA resistance and resistance to other important first line drugs, INH and RIF, which is a major hurdle in the treatment management of MDR TB. This high frequency of pncA mutations from geographically distinct regions recommends the WHO guidelines to empirically use the pyrazinamide in drug-resistant TB should be considered. Further studies with large sample size may strengthen these findings to identify the mutations in PZA-resistant isolates specific to certain geographical areas for the better treatment and development of geographically specific biomarkers. Acknowledgments The present study was ethically and technically supported by Dr. Sahar Fazal and Dr. Aamer Nadeem, Associate Professors of Bioinformatics and Bioscience, and Dr. Nayyer Masood, Professor, and Muhammad Tanvir Afzal, Associate Professor, Faculty of Computing, Capital University of Science and Technology Islamabad Capital University of Science and Technology Islamabad. Funding No funding sources. Availability of data and materials The datasets in the present study will be provided upon reasonable request to the corresponding author. Authors' contributions Experiment was design by MdTK, SIM, SA. Experimental work was conducted by MdTK, SA, ASK. Data analysis and manuscript writing were carried by MdTK, TN, NM, MdTA, SIM. All the authors read and approved the final manuscript. Ethics approval and consent to participate The present investigation was approved by the Institutional Ethics Committee of CUST Islamabad and Provincial Tuberculosis Reference Laboratory (PTRL) KPK under reference number PTP/PTRL-402/16. Prior to the study, all the participants provided a written informed consent. However, the results were not linked back to individual patients. Consent for publication Not applicable.
One of corporate journalism’s bad habits is framing international stories on the premise that news is what happens to the US. There is no better recent example of this than the story of tens of thousands of children fleeing Central America for refuge in other countries, including, but not limited to, the US. With some exceptions, this story is covered as the US’s “border crisis,” and the latest installment in our supposed immigration debate, with the children little more than nameless symbols of a troubled policy. The framing of the refugee crisis as a domestic political story can be read in the headlines: “Obama, on Texas Trip, Will Face Immigration Critics” (New York Times, 7/10/14); “Obama Hardens Tone on Border” (Washington Post, 7/8/14); “In Border Crisis, Obama Is Accused of ‘Lawlessness’ for Following Law” (Washington Post, 7/9/14). Some reporting has bucked the trend and attempted to look beyond US borders. In “Fleeing Gangs, Children Head to US Border” (7/9/14), New York Times reporter Frances Robles reported on the root causes for a refugee crisis that could see 90,000 reaching the US border by the end of this year. Violence, gangs and poverty are mentioned, and that’s good, as far as it goes, but these stories don’t ask some obvious questions. Like, why are almost all the children from three Central American countries? The largest number of child refugees are from Honduras, with El Salvador and Guatemala accounting for almost all others. Why these three countries? And why aren’t children streaming out of Nicaragua, which suffers from “staggering poverty, but not a pervasive gang culture or a record-breaking murder rate,” as the Times‘ Robles notes, but does not attempt to explain? According to the the landmark 2013 study by the UN Office on Drugs and Crime, Nicaragua, with 11.3 homicides per 100,000 population, has a homicide rate about one-eighth that of world leader Honduras (90.4), and roughly a quarter of that of El Salvador (41.2) and Guatemala (39.9). Why is Nicaragua so much safer? Here’s where it might come in handy to quote Central Americans and experts on the region. But these vital groups are nearly invisible in coverage, particularly in the large number of stories that treat it as a domestic or political story. If journalists interviewed University of California/Santa Cruz Honduras expert Dana Frank, they would learn that in the nation with the highest crime rate on Earth, the criminal gang culture extends into every level of government. This includes the US-allied federal government that came to power following a US-backed coup (Guardian, 6/29/12) that removed a popular, democratically elected president (Extra!, 9/09). As Frank wrote last Wednesday in “Who’s Responsible for the Flight of Honduran Children?” (Huffington Post, 7/9/14): Missing from the discussion about Honduras, though, is the post-coup regime governing the country that is largely responsible for the vast criminality that has overtaken it. Equally absent is the responsibility of the United States government for the regime. Yes, gangs are rampant in Honduras. But the truly dangerous gang is the Honduran government. And our own tax dollars are pouring into it while our top officials praise its virtues. Mexico City-based writer Laura Carlsen suggests other US policies have also served as important drivers in the flight of the children. Carlsen takes on US reports that cite the laxity of US immigration policies, the lure of social welfare programs and the irresponsibility of the children’s parents, by pointing out how US drug war and trade polices have made conditions increasingly unlivable. In “Child Migrants and Media Half-Truths” (Americas Program, 6/23/14), Carlsen writes: So why does the mainstream press seek to place the blame on the parents and a supposed softening of immigration policy? Because the alternative to blaming migrant families themselves is unpalatable to them. The alternative is to accept that the Central American and North American Free Trade Agreements have left thousands of youth with no economic opportunities. It is to accept that US security aid for drug wars has armed and aggravated violence in Mexico and Central America. It is to understand the high cost of supporting the Honduran coup and how the Honduran people and the US population continue to pay that price, as out migration has surged over 500 percent in the past two years, and human rights violations, instability and violence are skyrocketing. Veteran Latin American correspondent James North writes in the Nation (7/9/14) that the White House is “showing little concern for international law, and none at all for Washington’s own historic responsibility in Central America,” by “asking Congress to change the law so America can deport the refugee children more quickly.” North explains the US’s responsibility: The United States has a particular moral responsibility in the Central America refugee crisis that goes even deeper. Americans, especially young Americans, probably know more about the 1994 genocide in Rwanda than they do about how their own government funded murderous right-wing dictatorships in Central America back in the 1980s. The Reagan administration’s violent and immoral policy included $5 billion in aid to the military/landowner alliance in El Salvador, which prolonged an awful conflict in which some 75,000 people died–a toll proportionally equivalent to the casualty rate in the American Civil War. But once shaky peace agreements were signed in the 1990s, the United States walked away, leaving the shattered region to rebuild on its own. This history is virtually never mentioned in reports on the refugee crisis. In addition to the loss of blood and treasure caused by the US during the Reagan era, the US-supported governments of Guatemala, El Salvador and Honduras targeted popular democratic organizers and institutions, flooded those nations with guns, and interrupted political and social development. The three countries sending almost all of the refugee children abroad are the same three in which death squads flourished and where US policy became most deeply embedded in the political culture. Nicaragua, whose political development has taken a different trajectory–seldom in lock step with Washington, its agencies and military advisers–is not experiencing astronomical crime rates or a refugee problem. It’s still very poor, but far less violent. But there is virtually no media discussion of how “our border crisis” might be somewhat of our own making–“blowback” resulting from US policy going back to the 1980s. Are US journalists interested in the actual roots of Central America’s refugee crisis? Or are they ignoring them because they confer moral responsibility on the US, and that discussion would spoil a swell debate about just how quickly the kids should be returned to their homelands?
package org.swtk.commons.dict.wordnet.indexbyname.controller.j.o; import java.util.Collection; import java.util.Set; import java.util.TreeSet; import org.swtk.common.dict.dto.wordnet.IndexNoun; import org.swtk.commons.dict.wordnet.indexbyname.instance.j.o.a.WordnetNounIndexNameInstanceJOA; import org.swtk.commons.dict.wordnet.indexbyname.instance.j.o.b.WordnetNounIndexNameInstanceJOB; import org.swtk.commons.dict.wordnet.indexbyname.instance.j.o.c.WordnetNounIndexNameInstanceJOC; import org.swtk.commons.dict.wordnet.indexbyname.instance.j.o.d.WordnetNounIndexNameInstanceJOD; import org.swtk.commons.dict.wordnet.indexbyname.instance.j.o.e.WordnetNounIndexNameInstanceJOE; import org.swtk.commons.dict.wordnet.indexbyname.instance.j.o.f.WordnetNounIndexNameInstanceJOF; import org.swtk.commons.dict.wordnet.indexbyname.instance.j.o.g.WordnetNounIndexNameInstanceJOG; import org.swtk.commons.dict.wordnet.indexbyname.instance.j.o.h.WordnetNounIndexNameInstanceJOH; import org.swtk.commons.dict.wordnet.indexbyname.instance.j.o.i.WordnetNounIndexNameInstanceJOI; import org.swtk.commons.dict.wordnet.indexbyname.instance.j.o.k.WordnetNounIndexNameInstanceJOK; import org.swtk.commons.dict.wordnet.indexbyname.instance.j.o.l.WordnetNounIndexNameInstanceJOL; import org.swtk.commons.dict.wordnet.indexbyname.instance.j.o.n.WordnetNounIndexNameInstanceJON; import org.swtk.commons.dict.wordnet.indexbyname.instance.j.o.o.WordnetNounIndexNameInstanceJOO; import org.swtk.commons.dict.wordnet.indexbyname.instance.j.o.p.WordnetNounIndexNameInstanceJOP; import org.swtk.commons.dict.wordnet.indexbyname.instance.j.o.r.WordnetNounIndexNameInstanceJOR; import org.swtk.commons.dict.wordnet.indexbyname.instance.j.o.s.WordnetNounIndexNameInstanceJOS; import org.swtk.commons.dict.wordnet.indexbyname.instance.j.o.t.WordnetNounIndexNameInstanceJOT; import org.swtk.commons.dict.wordnet.indexbyname.instance.j.o.u.WordnetNounIndexNameInstanceJOU; import org.swtk.commons.dict.wordnet.indexbyname.instance.j.o.v.WordnetNounIndexNameInstanceJOV; import org.swtk.commons.dict.wordnet.indexbyname.instance.j.o.w.WordnetNounIndexNameInstanceJOW; import org.swtk.commons.dict.wordnet.indexbyname.instance.j.o.y.WordnetNounIndexNameInstanceJOY; import com.trimc.blogger.commons.exception.BusinessException; public final class WordnetNounIndexNameControllerJO { public static Collection<IndexNoun> get(final String TERM) throws BusinessException { if (TERM.length() < 3) throw new BusinessException("TERM not found (term = %s)", TERM); String key = TERM.replaceAll(" ", "").substring(0, 3).toLowerCase(); if ("joa".equals(key)) return WordnetNounIndexNameInstanceJOA.get(TERM); if ("job".equals(key)) return WordnetNounIndexNameInstanceJOB.get(TERM); if ("joc".equals(key)) return WordnetNounIndexNameInstanceJOC.get(TERM); if ("jod".equals(key)) return WordnetNounIndexNameInstanceJOD.get(TERM); if ("joe".equals(key)) return WordnetNounIndexNameInstanceJOE.get(TERM); if ("jof".equals(key)) return WordnetNounIndexNameInstanceJOF.get(TERM); if ("jog".equals(key)) return WordnetNounIndexNameInstanceJOG.get(TERM); if ("joh".equals(key)) return WordnetNounIndexNameInstanceJOH.get(TERM); if ("joi".equals(key)) return WordnetNounIndexNameInstanceJOI.get(TERM); if ("jok".equals(key)) return WordnetNounIndexNameInstanceJOK.get(TERM); if ("jol".equals(key)) return WordnetNounIndexNameInstanceJOL.get(TERM); if ("jon".equals(key)) return WordnetNounIndexNameInstanceJON.get(TERM); if ("joo".equals(key)) return WordnetNounIndexNameInstanceJOO.get(TERM); if ("jop".equals(key)) return WordnetNounIndexNameInstanceJOP.get(TERM); if ("jor".equals(key)) return WordnetNounIndexNameInstanceJOR.get(TERM); if ("jos".equals(key)) return WordnetNounIndexNameInstanceJOS.get(TERM); if ("jot".equals(key)) return WordnetNounIndexNameInstanceJOT.get(TERM); if ("jou".equals(key)) return WordnetNounIndexNameInstanceJOU.get(TERM); if ("jov".equals(key)) return WordnetNounIndexNameInstanceJOV.get(TERM); if ("jow".equals(key)) return WordnetNounIndexNameInstanceJOW.get(TERM); if ("joy".equals(key)) return WordnetNounIndexNameInstanceJOY.get(TERM); throw new BusinessException("TERM not found (term = %s)", TERM); } public static Collection<String> terms() throws BusinessException { Set<String> set = new TreeSet<String>(); set.addAll(WordnetNounIndexNameInstanceJOA.terms()); set.addAll(WordnetNounIndexNameInstanceJOB.terms()); set.addAll(WordnetNounIndexNameInstanceJOC.terms()); set.addAll(WordnetNounIndexNameInstanceJOD.terms()); set.addAll(WordnetNounIndexNameInstanceJOE.terms()); set.addAll(WordnetNounIndexNameInstanceJOF.terms()); set.addAll(WordnetNounIndexNameInstanceJOG.terms()); set.addAll(WordnetNounIndexNameInstanceJOH.terms()); set.addAll(WordnetNounIndexNameInstanceJOI.terms()); set.addAll(WordnetNounIndexNameInstanceJOK.terms()); set.addAll(WordnetNounIndexNameInstanceJOL.terms()); set.addAll(WordnetNounIndexNameInstanceJON.terms()); set.addAll(WordnetNounIndexNameInstanceJOO.terms()); set.addAll(WordnetNounIndexNameInstanceJOP.terms()); set.addAll(WordnetNounIndexNameInstanceJOR.terms()); set.addAll(WordnetNounIndexNameInstanceJOS.terms()); set.addAll(WordnetNounIndexNameInstanceJOT.terms()); set.addAll(WordnetNounIndexNameInstanceJOU.terms()); set.addAll(WordnetNounIndexNameInstanceJOV.terms()); set.addAll(WordnetNounIndexNameInstanceJOW.terms()); set.addAll(WordnetNounIndexNameInstanceJOY.terms()); return set; } }
<gh_stars>1-10 /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to you under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.calcite.sql.fun; import org.apache.calcite.sql.SqlCallBinding; import org.apache.calcite.sql.SqlFunction; import org.apache.calcite.sql.SqlFunctionCategory; import org.apache.calcite.sql.SqlKind; import org.apache.calcite.sql.SqlOperandCountRange; import org.apache.calcite.sql.type.OperandTypes; import org.apache.calcite.sql.type.ReturnTypes; import org.apache.calcite.sql.type.SqlOperandCountRanges; /** * The REGEXP_SUBSTR(source_string, regex_pattern, [, pos, occurrence, match_type]) extracts a * substring from source_string that matches a regular expression specified by regex_pattern. */ public class SqlRegexpSubstrFunction extends SqlFunction { public SqlRegexpSubstrFunction() { super( "REGEXP_SUBSTR", SqlKind.REGEXP_SUBSTR, ReturnTypes.VARCHAR_2000_NULLABLE, null, null, SqlFunctionCategory.STRING); } @Override public SqlOperandCountRange getOperandCountRange() { return SqlOperandCountRanges.between(2, 5); } @Override public boolean checkOperandTypes(SqlCallBinding callBinding, boolean throwOnFailure) { final int operandCount = callBinding.getOperandCount(); for (int i = 0; i < 2; i++) { if (!OperandTypes.STRING.checkSingleOperandType( callBinding, callBinding.operand(i), 0, throwOnFailure)) { return false; } } for (int i = 2; i < operandCount; i++) { if (i == 2 && !OperandTypes.INTEGER.checkSingleOperandType( callBinding, callBinding.operand(i), 0, throwOnFailure)) { return false; } if (i == 3 && !OperandTypes.INTEGER.checkSingleOperandType( callBinding, callBinding.operand(i), 0, throwOnFailure)) { return false; } if (i == 4 && !OperandTypes.STRING.checkSingleOperandType( callBinding, callBinding.operand(i), 0, throwOnFailure)) { return false; } } return true; } }
package ru.asemenov.List; /** * Interface SimpleStack решение задачи части 005. * @author asemenov * @version 1 * @param <E> generic. */ public interface SimpleStack<E> { /** * Push element. * @param e element. */ void push(E e); /** * Get element. * @return element. */ E pop(); }
Ginger Cats and Cute Puppies: Animals, Affect and Militarisation in the Crisis in Ukraine1 Exposure to affective depictions of soldiers with domesticated animals such as cats and dogs encourages civilian audiences to view soldiers, militaries and even the aims of war with sympathy and approval. This chapter argues that Russia and Ukraine are currently engaged in parallel processes of creating and disseminating such depictions in order to rehabilitate the reputations of their armed forces and garner support for their military operations in eastern Ukraine. This positioning of soldiers bodies and animals bodies together, most notably in photographs circulated on social media, but also in other representations such as statues, is just one example of the wider phenomenon of digital militarism. State militaries and alliances have become very sophisticated and systematic about the use of digital technologies, especially social media and the internet, to disseminate positive messages and images about soldiers, the armed forces and war. The chapter concludes that the differing degrees of success by Russia and Ukraine can be attributed to factors that are highly dependent on context, demonstrating that militarisation is above all a set of social processes.
No, this needs to make it's way to the USSC. the most obvious issue, of course, is that the judge(s) are using the puffery of a campaign as though it were a statement of the elected president,and that is actually unprecedented in American jurisprudence. But the larger, over-riding issue is whether or not a power given to a president by Congress -which power itself is indisputable constitutional- can be circumscribed by the courts. The matter at hand is a distinctly political one, and that political matters are beyond the scope of the judiciary has been indisputable to this point. The states suing because of a speculative loss of immigrants is little more than a policy dispute. Whether the president is a Republican or Democrat, the veyr last thing our republic needs is for the judiciary to elevate itself to the role of political overseer.
U.S. Rep. Gwen Graham announced her endorsement of Hillary Clinton after voting for her Monday in Tallahassee. (Photo: Jeff Burlew) U.S. Rep. Gwen Graham is supporting Hillary Clinton for president, saying she has the “empathy, experience, intelligence and desire” to move the country forward. CLOSE U.S. Rep. Gwen Graham announced her endorsement of Hillary Clinton after voting for her Monday in Tallahassee. Jeff Burlew Graham, one of 32 Democratic superdelegates in Florida, announced her endorsement after casting her ballot for Clinton during early voting at the Leon County Courthouse. “We’ve got to want to work together to make sure that we deal with the issues that our country is facing, and I know that Secretary Clinton is the one to be able to do that.” Graham, asked about the GOP primary, called it a “circus.” “I find it to be appalling and an embarrassment,” she said. “You’re campaigning to be the leader of the greatest country in the world. And we’ve turned this into a juvenile mudslinging. And I’m using that word ‘mudslinging’ as a nicer term for some of the other things that have gone on.” Graham said she bumped into Clinton’s challenger, Vermont Sen. Bernie Sanders, recently at Reagan National Airport. “He’s very sweet,” she said. “I like Sen. Sanders very much. I believe from my very core that Secretary Clinton has the combination of qualities we need at this moment in our country’s history more than ever.” Early voting began Saturday in Leon County and runs through Saturday, ahead of Florida’s March 15 presidential primary. Graham urged people to get out and vote. “Every election is important,” she said. “But I think we can all recognize this one has become critical to our nation’s future. So get out, come vote, support who you believe can best lead our country forward.” Contact Jeff Burlew at jburlew@tallahassee.com or follow @JeffBurlew on Twitter. Read or Share this story: http://on.tdo.com/1p2Nq3H
Former Labor Secretary Tom Perez, who is a candidate to run the Democratic National Committee, speaks during the general session of the DNC winter meeting in Atlanta, Saturday, Feb. 25, 2017. (AP Photo/Branden Camp) WASHINGTON (AP) — The Latest on the selection of a new national leader of the Democratic Party (all times local): 4:25 p.m. Former President Barack Obama has congratulated Tom Perez on his election to lead the Democratic Party and commended his decision to ask runner-up Keith Ellison to be his deputy. Obama said in a statement that he knows “Perez will unite us under that banner of opportunity, and lay the groundwork for a new generation of Democratic leadership for this big, bold, inclusive, dynamic America we love so much.” Perez, who was labor secretary under Obama, won the top party job on the second ballot Saturday at the Democratic National Committee meeting in Atlanta. He is the first Latino to hold the post. ___ 3:35 p.m. In a show of unity, newly minted Democratic National Committee Chairman Tom Perez has picked runner-up Keith Ellison to be deputy chairman. Perez won the top job on the second ballot Saturday at the DNC meeting in Atlanta. Perez, who was labor secretary under President Barack Obama, immediately asked members to make Ellison the deputy. In remarks to the gathering, Ellison stressed the need for a unified party despite the divisions between establishment Democrats who backed Perez and the liberal wing that favored Ellison. The Minnesota congressman spoke of the “earnest work we must do to confront Donald Trump” as well as creating a country where everyone can aspire to a good life. Ellison said he and all his supporters were going to help Perez as the party tries to get back to its winning ways. ___ 3:20 p.m. Democrats have a new national party chairman and it’s Tom Perez, who was labor secretary under President Barack Obama. Perez won over Keith Ellison, a Minnesota congressman, in the second round of voting Saturday by Democratic National Committee members at their meeting in Atlanta. Perez gave a speech before the vote, and he said Democrats face “a crisis of confidence” and a “crisis of relevance.” He pledged to “take the fight” to Trump and “right-wing Republicans.” The Democrats’ power-deficit is stark. Republicans occupy the White House, run both chambers of Congress and control about two-thirds of U.S. statehouses. Perez and Ellison each pledged to rebuild state and local parties, including in Republican-dominated states. Both said the party must capitalize on widespread opposition to Trump but also work to reach frustrated working-class voters who felt abandoned Democrats and embraced Trump. ___ 2:35 p.m. Several candidates for Democratic Party leader have bowed out — and that’s left a showdown between Tom Perez — labor secretary under President Barack Obama — and Keith Ellison, a Minnesota congressman. It takes 214.5 votes to win — and Perez has come up just one short in the first round of voting Saturday by members of the Democratic National Committee meeting in Atlanta. Ellison captured 200 votes. That’s allowed several candidates who won captured a dozen or so votes to step away, heading into the second round of voting. Sally Boynton Brown, who got 12 votes, exited the race without making an endorsement. Samuel Ronan praised both candidates as he departed the race. Jehmu Greene endorsed Perez, and Peter Peckarsky backed Ellison. ___ 2:20 p.m. Democrats have held their first round of voting for a new party leader, and there’s no winner yet. Former Obama Labor Secretary Tom Perez has fallen just short a majority for victory, but not far behind is Keith Ellison a Minnesota congressman. So members of the Democratic National Committee are set to move ahead with a second round of voting at their meeting in Atlanta. A group of long-shot candidates captured enough votes to require another round of voting. Perez and Ellison will try to win over their backers to secure a majority. Perez and Ellison each promise to oppose President Donald Trump and rebuild state and local Democratic organizations. ___ 1:05 p.m. The national Democratic chairman’s race is narrowing before party activists even cast ballots. Mayor Pete Buttigieg of South Bend, Indiana, told Democratic National Committee members Saturday that he did not have the votes and was removing his name from consideration. That increases the likelihood that former Labor Secretary Tom Perez or Minnesota Rep. Keith Ellison could win the job without a marathon voting session. There still are six candidates, and the winner must win a majority. Buttigieg had campaigned as an outsider promising “a fresh start.” He had hoped neither Ellison nor Perez could reach a majority, opening the door for another option. Buttigieg urged the party to “look outside Washington” to find its way back. ___ 12:50 p.m. A congressman who wants to head the Democratic National Committee says his party is in “this mess because we lost not one election, but a thousand elections” at all levels around the country. Keith Ellison of Minnesota says he’ll turn around the party’s fortune if he’s elected DNC chairman. He made his comments before party activists meeting in Atlanta. The vote is expected later Saturday. Whatever the outcome, Ellison says it’s important that the party is unified after the gathering as Democrats work to counter President Donald Trump. He says: “We’ve got to come out of here, hand in hand, brothers and sisters, because Trump is right outside of that door, and not just Trump, but Trumpism.” ___ 12:30 p.m. A leading candidate to be Democratic Party chairman says his party is “suffering from a crisis of confidence, a crisis of relevance” after the election of Donald Trump as president. Tom Perez is pledging that if he’s elected chairman, he’ll help “turn this party around and get Democrats winning again.” The former labor secretary under President Barack Obama says “a united Democratic Party is not only our best hope, it is Donald Trump’s worst nightmare.” Perez spoke before party activists in Atlanta, as the Democratic National Committee neared a vote on a new leader. ___ 12 p.m. The outgoing Democratic Party chief is addressing cybersecurity concerns after internal communications were stolen by hackers and leaked during the 2016 presidential election. U.S. intelligence officials blame Russian agents. Donna Brazile tells Democratic National Committee members gathered in Atlanta that the party has worked with high-tech experts to leave the party more secure. She chides President Donald Trump for his mockery of DNC cybersecurity and his doubts that Russians are at fault. Brazile is urging Congress to investigate whether Russians hacked the Republican National Committee. No RNC emails were leaked during the 2016 campaign. Republican officials insist their party communications were not breached. Brazile suggests that proves Russians wanted to help Trump. ___ 9:55 a.m. Democratic officials are in new territory with a competitive election for party chief. In past races, a leading candidate usually emerged well before actual voting. This time, there’s a high likelihood that will take multiple rounds of voting for former Obama Labor Secretary Tom Perez or a Minnesota congressman, Keith Ellison, or a dark horse candidate to win a majority. So, party staff and 442 eligible DNC members have had to dust off complicated rules that usually don’t matter. For example, party officials expect about 70 or 75 members to be absent. Nearly all have designated another member to cast paper proxy ballots on their behalf. That will add a layer of suspense. Members in the room vote electronically, with quick tallies. But the paper proxies must be counted by hand. ___ 9:35 a.m. The Democratic National Committee has resumed its meeting in Atlanta and on the agenda later in the morning is the election of a new chairman to lead the party in the era of Republican President Donald Trump. The top contenders are Tom Perez, labor secretary under President Barack Obama, and Keith Ellison, a Minnesota congressman. Other candidates are maintaining long-shot bids. The election for party chief involves 442 eligible members of the national party committee. The winner must capture a majority of votes cast. The DNC will have as many rounds of voting as it takes for a new chairman to emerge. Perez, Ellison and other Democrats agree on the need to rebuild the party at the state and local levels. They say those organizations then can capitalize on the widespread opposition to Trump by getting frustrated voters to elect more Democrats. ___ 9:20 a.m. Democrats are gathering in Atlanta to pick a new national chairman — and the vote seems to be coming down to Tom Perez, labor secretary under President Barack Obama, and Keith Ellison, a Minnesota congressman. Those voting in Saturday’s race include hundreds of state party leaders, donors and activists who make up the Democratic National Committee and determine the party’s direction. Perez supporters say he’s on the edge of the required majority. Ellison says he’s still a viable candidate. A few other candidates are holding out hope that neither Ellison nor Perez can seal the deal — and that would open up the race for an upset in later rounds of voting. The election is seen as the start of party rebuilding after Donald Trump’s presidential victory.
/*************************************************************************** * Copyright 2018 <NAME> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ***************************************************************************/ #ifndef __enigma_app_window_h__ #define __enigma_app_window_h__ /*! \file enigma_app_window.h * \brief Header file for the application class that implements the functionality to control the Enigma simulators provided by the GUI classes. * This class implements all the user interface stuff that is provided on top of the basic simulator functionality. */ #include<gtkmm.h> #include<enigma_draw.h> #include<rotor_dialog.h> #include<plugboard_dialog.h> #include<ukwd_wiring_dialog.h> #include<machine_config.h> #include<enigma_sim.h> #include<display_dialog.h> #include<app_helpers.h> #define ENIGMA "Enigma" /*! \brief An application class that allows to control the Enigma simulators implemented by the GUI classes. It contains the code * that is used to configure the Enigma simulators, save and load their state, open and close the log viewer and so on. * * The member enigma_app_window::conf references the simulator state that is managed by instances of this class. All configuration * changes are primarily made to enigma_app_window::conf and are then synchronized to the real simulator object pointed to by * enigma_app_window::enigma through the methods enigma_app_window::update_rotors() and enigma_app_window::update_stecker_brett(). */ class enigma_app_window : public Gtk::Window { public: /*! \brief Constructor. * * \param c [in] The machine_config that is to be used to initialize this instance of enigma_app_window. * \param l_dir [inout] When constructing an instance of enigma_app_window this variable has to contain the directory in which * the last load or save operation has been execxuted. If further load or save operations are excuted via * this instance the ustring which is referenced by this parameter is updated accordingly. */ enigma_app_window(machine_config& c, Glib::ustring& l_dir); /*! \brief Destructor. */ virtual ~enigma_app_window() { delete enigma; delete rotor_dialog; delete plugs_dialog; delete display_window; delete ukw_d_wiring_dialog; delete disp; } /*! \brief Callback that is executed, when the "Rotor settings ..." menu entry is selected. */ virtual void on_settings_activate(); /*! \brief Callback that is executed, when the "Plugboard ..." menu entry is selected. */ virtual void on_plugboard_activate(); /*! \brief Callback that is executed, when the "Reset" menu entry is selected. */ virtual void on_reset_activate(); /*! \brief Callback that is executed, when the "Rip paper strip" menu entry is selected. */ virtual void on_rip_schreibmax_activate(); /*! \brief Callback that is executed, when the "Quit" menu entry is selected. */ virtual void on_quit_activate(); /*! \brief Callback that is executed, when the "Set rotor positions ..." menu entry is selected. */ virtual void on_set_rotor_positions_activate(); /*! \brief Callback that is executed, when the "Show logs ..." menu entry is selected. */ virtual void on_output_activate(); /*! \brief Callback that is executed, when the "UKW Dora wiring ..." menu entry is selected. */ virtual void on_ukwd_activate(); /*! \brief Callback that is executed, when the "Rotor set|Save rotor set data" menu entry is selected. */ virtual void on_save_rotor_set_data_activate(); /*! \brief Callback that is executed, when the "Rotor set|Randomize rotor sets ..." menu entry is selected. */ virtual void on_randomize_rotor_set_data_activate(); /*! \brief Callback that is executed, when the "Rotor set|Laod a rotor set ..." menu entry is selected. */ virtual void on_load_rotor_set_data_activate(); /*! \brief Callback that is executed, when the "Rotor set|Show active rotors ..." menu entry is selected. */ virtual void on_show_rotors_activate(); /*! \brief Callback that is executed, when the "Randomize" menu entry is selected. */ virtual void on_randomize_activate(); /*! \brief Callback that is executed, when the log window was closed. */ virtual void on_log_invisible(); /*! \brief Callback that is executed, when the "Log style: Encryption" menu entry is toggled. */ virtual void on_enc_state_activate(); /*! \brief Callback that is executed, when the GUI simulator object changes its En/Decryption mode. */ virtual void on_mode_changed(); /*! \brief Method that can be used to set the least recently used directory. The new value has to be specified by the string referenced by the * l_dir parameter. */ virtual void set_last_dir(Glib::ustring& l_dir) { last_dir = l_dir; } /*! \brief Method that can be used to retrieve the least recently used directory. The current value is written to the string referenced by the * l_dir parameter. */ virtual void get_last_dir(Glib::ustring& l_dir) { l_dir = last_dir; } /*! \brief This method can be used to configure the underlying simulator to use a real lampboard that can be controlled by sending appropropriate * commands to the serial port named by the contents of the parameter port (See ::enigma_real_lamp_board). */ virtual void use_serial_port(string port); /*! \brief Method that can be used to retrieve the position the main window had when the application was closed. */ virtual void get_last_pos(int& x, int& y) { x = pos_x; y = pos_y; } /*! \brief Method that is used to save the state of the application. */ virtual void save_state(); /*! \brief Method that is used as a callback when the user presses the close button in the title bar of the main window. */ virtual bool on_my_delete_event(GdkEventAny* event); /*! \brief Method that is used as a callback when saving the settings of the simulator. */ virtual bool do_save(Glib::ustring& desired_file_name); /*! \brief Method that is used as a callback when loading the settings of the simulator. */ virtual bool do_load(Glib::ustring& desired_file_name); protected: /*! \brief This method configures the simulator object to which enigma_app_window::enigma points according to the values specified * by enigma_app_window::conf. It is called each time the user made changes to the rotor configuration of the simulated machine * via the menu entries and dialogs provided by this class. * * This method updates the rotor selection (including use of UKW D), the ring setting and the rotor positions of the underlying simulator. */ virtual void update_rotors(); /*! \brief This method configures the simulator object to which enigma_app_window::enigma points according to the values specified * by enigma_app_window::conf. It is called each time the user made changes to the plugboard configuration of the simulated machine * via the menu entries and dialogs provided by this class. * * This method updates the plugboard configuration (including the use of the Enigma Uhr) of the underlying simulator. */ virtual void update_stecker_brett(); /*! \brief This method retrieves the rotor positions from the underlying simulator object to which enigma_app_window::enigma points and * uses this data to update the corresponding values in enigma_app_window::conf. * * This method is used to synchronize the rotor positions stored in the state variable enigma_app_window::conf used by this class with * the actual rotor positions of the simulator object. Not calling this method before allowing the user to make changes to the configuration * of the underlying machine can result in the loss of the rotor positions that were reached before making the configuration changes as * enigma_app_window::update_rotors() uses the state recorded in enigma_app_window::conf as its source. It should also be called before storing * the state of a simulator object. */ virtual void sync_rotor_pos(); /*! \brief Helper method that sets up the data structures which are used to represent the menus in gtkmm. Not virtual as intended to be called * from constructor. */ void setup_menus(); /*! \brief This method queries the current state of the underlying rotor machine and set the grouping value in the log dialog * accordingly. */ void sync_log_grouping(); /*! \brief Points to the GUI simulator that is in use in this application. */ rotor_draw *simulator_gui; /*! \brief Holds the name of the application. */ Glib::ustring app_name; /*! \brief Holds the object that represents the menu action group of this application. */ Glib::RefPtr<Gio::SimpleActionGroup> menu_action; /*! \brief Holds the layout object that is used to stack the GUI elements (menu_bar and simulator_gui) of this application on top of each other. */ Gtk::Box *vbox1; /*! \brief Holds the menu item that can be used to switch between encryption and decryption style when logging machine output. */ Glib::RefPtr<Gio::SimpleAction> log_style_menuitem; /*! \brief Holds the menu item that can be used to switch the log window on or off. */ Glib::RefPtr<Gio::SimpleAction> show_log_menuitem; /*! \brief Holds the menu that is in use in this application. */ Gtk::Widget *menu_bar; /*! \brief Holds the names of the rotor slots that are in use by the Enigma variant that is currently simulated. */ vector<string> rotor_names; /*! \brief Holds the configuration (and the full state) of the Enigma variant that is currently simulated. */ machine_config& conf; /*! \brief Holds the least recently used directory. */ Glib::ustring last_dir; /*! \brief Holds the Gtj::Builder object that can be used to gain access to GUI objects defined in the glade file. */ Glib::RefPtr<Gtk::Builder> ref_xml; /*! \brief Holds the least recently used file. */ Glib::ustring last_file_opened; /*! \brief Points the rotor_dialog_processor object that is in use in this application. */ Gtk::Dialog *rotor_dialog; /*! \brief Points the plugboard_dialog object that is in use in this application. */ Gtk::Dialog *plugs_dialog; /*! \brief Points the ukwd_wiring_dialog object that is in use in this application. */ Gtk::Dialog *ukw_d_wiring_dialog; /*! \brief Points the Gtk::Window object underlying managed by the enigma_app_window::disp member. */ Gtk::Window *display_window; /*! \brief Points to the object that manages the events of the log dialog. */ display_dialog *disp; /*! \brief Points to the object that actually holds the simulated Enigma machine. */ enigma_base *enigma; /*! \brief Defines the columns used in the combo boxes that allow rotor selection in the rotor dialog. */ simple_text_cols model_cols; /*! \brief Defines the columns used in the list view that displays the already selected plugs in the plugboard dialog. */ plug_cols plugboard_cols; /*! \brief Defines the columns used in the list view that displays the already selected plugs in the UKW D wiring dialog. */ plug_cols plugboard_cols_ukw; /*! \brief Holds the x-position of this object when it was closed or hidden. This position is saved and restored when * restarting the application. */ int pos_x; /*! \brief Holds the y-position of this object when it was closed or hidden. This position is saved and restored when * restarting the application. */ int pos_y; /*! \brief Object that manages the events occuring when the user selects entries in the Help menu. */ help_menu_helper help_menu_manager; /*! \brief Object that manages the events occuring when the user selects Load or Save settings entries from the menu. */ file_operations_helper file_helper; /*! \brief Object that manages the events occuring when the user selects "Process clipboard" from the menu. */ clipboard_helper clip_helper; /*! \brief Object that manages the events occuring when the user selects to show or hide the log window. */ log_helper loghelp; /*! \brief Helper object that is used to display simple messages. */ menu_helper messages; /*! \brief Helper object that is used to manage the events that occur when the user select the "Set rotor positions" * entry from the menu. */ rotor_position_helper pos_helper; /*! \brief Helper object that is used to manage the events that occur when the user requests to randomize machine settings. */ randomizer_param_helper rand_helper; /*! \brief Helper object that is used to manage the event that occurs when the user requests to randomize all rotor sets. */ rotor_set_rand_helper rand_rotor_set_helper; }; #endif /* __enigma_app_window_h__ */
/** * Method used to iterate over the assignments in a best first order * @author Brammert Ottens, 28 apr 2010 */ private void iter() { --nbrSolsLeft; if(nbrSumSolsLeft > 0) { iterateSumVars(); } else { if(hasVehicleRoutingProblem) { if(vehicleIterBestFirst.hasNext()) { vrpAssignment = vehicleIterBestFirst.nextSolution(); currentUtility = vehicleIterBestFirst.getCurrentUtility(); } } else { vrpAssignment[0] = vrpAssignment[0] == one ? zero : one; } if(hasSumVariables) { Arrays.fill(sumAssignment, zero); if(vrpAssignment[ownVariableAssignmentVRPIndex].equals(zero)) { sumAssignment[0] = one; nbrSumSolsLeft = nbrSumSols - 1; } else { nbrSumSolsLeft = 0; } } } combineVRPAndSum(); }
Uber drivers have been accused of secretly logging out of the app to make prices soar and allow them to charge customers more money, new research suggests. Researchers interviewed Uber drivers in London and New York and produced a study which claims staff are deliberately making the price more expensive. It was suggested that drivers working in the same area are logging out of the mobile taxi app which will make the number of available cars drop. This, as a result, causes a higher demand because there are less cars available and therefore a 'surge' price is introduced with fares increasing. New research suggests that some Uber workers are working together to try and drive the prices up On some occasions, the price of a journey can cost several times the normal fare when there is a surge. The study, which was seen by the Times newspaper, looked at more than 1,000 posts on the online forum Uberpeople.net. One London driver posted on the site: 'Guys, stay logged off until surge.' Another worker asked why and the Londoner replied: 'Less supply high demand = surge.' It was suggested that drivers working in the same area are logging out of the mobile taxi app (pictured) Dr Mareike Möhlmann, from the Warwick Business School in Coventry, was quoted in the newspaper as saying: 'Drivers have developed practices to regain control, even gaming the system. 'It shows that the algorithmic management that Uber uses may not only be ethically questionable but may also hurt the company itself.' An Uber spokesperson told MailOnline: 'This behaviour is neither widespread nor permissible on the Uber app, and we have a number of technical safeguards in place to prevent it from happening.'
/** * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __MR4C_C_EXTERNAL_CONTEXT_API_H__ #define __MR4C_C_EXTERNAL_CONTEXT_API_H__ typedef struct CExternalContextStruct *CExternalContextPtr; #ifdef __cplusplus extern "C" { #endif struct CExternalContextMessageStruct { const char* topic; const char* contentType; const char* content; }; typedef struct CExternalContextMessageStruct CExternalContextMessage; /** * Function type for callback to receive logging * @param level One of INFO, ERROR, DEBUG, WARN */ typedef void (*CExternalLogFunctionPtr) ( const char* level, const char* message ); /** * Function type for callback to receive progress reporting */ typedef void (*CExternalProgressFunctionPtr) ( float percentDone, const char* message ); /** * Function type for callback to receive messages to topics */ typedef void (*CExternalMessageFunctionPtr) ( const CExternalContextMessage& message ); /** * Function type for callback to report failure */ typedef void (*CExternalFailureFunctionPtr) ( const char* message ); struct CExternalContextCallbacksStruct { CExternalLogFunctionPtr logCallback; CExternalProgressFunctionPtr progressCallback; CExternalMessageFunctionPtr messageCallback; CExternalFailureFunctionPtr failureCallback; }; typedef struct CExternalContextCallbacksStruct CExternalContextCallbacks; CExternalContextPtr CExternalContext_newContext( const CExternalContextCallbacks& contextCallbacks ); #ifdef __cplusplus } #endif #endif
package com.octo.android.robospice.notification; import android.app.Notification; import android.app.NotificationManager; import android.app.Service; import android.content.Context; import android.content.Intent; import android.os.IBinder; import com.octo.android.robospice.SpiceManager; import com.octo.android.robospice.SpiceService; import com.octo.android.robospice.persistence.exception.SpiceException; import com.octo.android.robospice.request.listener.RequestListener; import com.octo.android.robospice.request.listener.RequestProgress; import com.octo.android.robospice.request.listener.RequestProgressListener; import com.octo.android.robospice.request.listener.RequestStatus; /** * Will create notifications to display the progress of a given request. This * class is a base class to create such a service. Implementations will only * focus on creating notifications to "follow" the status of given request. All * informations about the request to track are provided to the service via an * Intent. This services will be automatically stopped when the requests has * been fully processed. * @author SNI */ public abstract class SpiceNotificationService extends Service { private static final int DEFAULT_ROBOSPICE_NOTIFICATION_ID = 70; public static final String BUNDLE_KEY_NOTIFICATION_ID = "BUNDLE_KEY_NOTIFICATION_ID"; public static final String BUNDLE_KEY_REQUEST_CACHE_KEY = "BUNDLE_KEY_REQUEST_CACHE_KEY"; public static final String BUNDLE_KEY_REQUEST_CLASS = "BUNDLE_KEY_REQUEST_CLASS"; public static final String BUNDLE_KEY_SERVICE_CLASS = "BUNDLE_KEY_SERVICE_CLASS"; public static final String BUNDLE_KEY_FOREGROUND = "BUNDLE_KEY_FOREGROUND"; private int notificationId = DEFAULT_ROBOSPICE_NOTIFICATION_ID; private Class<?> requestClass; private String requestCacheKey; private boolean foreground; private Class<? extends SpiceService> spiceServiceClass; private NotificationManager notificationManager; private SpiceManager spiceManager; public static Intent createIntent(final Context context, final Class<? extends SpiceNotificationService> clazz, final Class<? extends SpiceService> spiceServiceClass, final int notificationId, final Class<?> requestResultType, final String cacheKey, final boolean foreground) { final Intent intent = new Intent(context, clazz); intent.putExtra(BUNDLE_KEY_NOTIFICATION_ID, notificationId); intent.putExtra(BUNDLE_KEY_SERVICE_CLASS, spiceServiceClass); intent.putExtra(BUNDLE_KEY_REQUEST_CLASS, requestResultType); intent.putExtra(BUNDLE_KEY_REQUEST_CACHE_KEY, cacheKey); intent.putExtra(BUNDLE_KEY_FOREGROUND, foreground); return intent; } @Override public IBinder onBind(final Intent intent) { return null; } @SuppressWarnings({ "unchecked", "rawtypes", "deprecation" }) @Override public final void onStart(final Intent intent, final int startId) { super.onStart(intent, startId); notificationId = intent.getIntExtra(BUNDLE_KEY_NOTIFICATION_ID, DEFAULT_ROBOSPICE_NOTIFICATION_ID); requestClass = (Class<?>) intent.getSerializableExtra(BUNDLE_KEY_REQUEST_CLASS); requestCacheKey = intent.getStringExtra(BUNDLE_KEY_REQUEST_CACHE_KEY); spiceServiceClass = (Class<? extends SpiceService>) intent.getSerializableExtra(BUNDLE_KEY_SERVICE_CLASS); if (spiceServiceClass == null) { throw new RuntimeException("Please specify a service class to monitor. Use #createIntent as helper."); } foreground = intent.getBooleanExtra(BUNDLE_KEY_FOREGROUND, true); spiceManager = new SpiceManager(spiceServiceClass); notificationManager = (NotificationManager) getSystemService(NOTIFICATION_SERVICE); spiceManager.start(this); spiceManager.addListenerIfPending(requestClass, requestCacheKey, new NotificationRequestListener()); if (foreground) { startForeground(notificationId, onCreateForegroundNotification()); } } @Override public final void onDestroy() { spiceManager.shouldStop(); super.onDestroy(); } public Notification onCreateForegroundNotification() { throw new RuntimeException("If you use foreground = true, then you must override onCreateForegroundNotification()."); } public abstract Notification onCreateNotificationForRequestFailure(SpiceException ex); public abstract Notification onCreateNotificationForRequestSuccess(); public abstract Notification onCreateNotificationForRequestProgress(RequestProgress requestProgress); // ---------------------------------- // INNER CLASS // ---------------------------------- private class NotificationRequestListener<T> implements RequestListener<T>, RequestProgressListener { @Override public void onRequestFailure(final SpiceException arg0) { final Notification notification = onCreateNotificationForRequestFailure(arg0); notificationManager.notify(notificationId, notification); stopSelf(); } @Override public void onRequestSuccess(final T result) { final Notification notification = onCreateNotificationForRequestSuccess(); notificationManager.notify(notificationId, notification); stopSelf(); } @Override public void onRequestProgressUpdate(final RequestProgress progress) { final Notification notification = onCreateNotificationForRequestProgress(progress); notificationManager.notify(notificationId, notification); if (progress.getStatus() == RequestStatus.COMPLETE) { stopSelf(); } } } }
Lambda oxygen sensors are used in the exhaust system of internal combustion engines to optimize pollutant emissions and the exhaust-gas aftertreatment. The lambda oxygen sensors determine the oxygen content of the exhaust gas which is then used for the closed-loop control of the air-fuel mixture supplied to the internal combustion engine and thus the exhaust-gas lambda number upstream of a catalytic converter. A lambda control loop controls the supplying of air and fuel to the internal combustion engine in closed loop to achieve an exhaust gas composition that is optimal for the exhaust-gas aftertreatment by the catalytic converters provided in the exhaust tract of the internal combustion engine. In the case of spark ignition engines, a lambda of 1, thus a stoichiometric ratio of air to fuel is typically controlled in closed loop. The pollutant emissions of the internal combustion engine can thus be minimized. Various forms of lambda oxygen sensors are in use. In the case of a two-step lambda oxygen sensor, also referred to as a discrete-level sensor or Nernst sensor, the voltage-lambda characteristic curve exhibits a step change in the characteristic curve profile at lambda=1. Therefore, it essentially allows a distinction to be made between rich exhaust gas (λ<1) during internal combustion engine operation characterized by excess fuel and lean exhaust gas (λ>1) during operation characterized by excess air, and permits a closed-loop control of the exhaust gas to a lambda of 1. Using a broadband lambda oxygen sensor, also referred to as a continuous or linear lambda oxygen sensor, the lambda value in the exhaust gas can be measured within a broad range around lambda=1. Thus, for example, an internal combustion engine can also be controlled in closed loop toward a lean operation characterized by excess air. By linearizing the sensor characteristic, a continuous closed-loop lambda control upstream of the catalytic converter is possible within a limited lambda range even when a less expensive, two-step lambda oxygen sensor is used. This requires that there be a unique relationship between the sensor voltage of the two-step lambda oxygen sensor and lambda. This relationship must exist for the entire service life of the two-step lambda oxygen sensor since, otherwise, the accuracy of the closed-loop control will not suffice, and unacceptably high emissions can occur. This requirement is not met due to manufacturing tolerances and the aging effects of the two-step lambda oxygen sensor. For that reason, two-step lambda oxygen sensors upstream of the catalytic converter are mostly used in the context of a two-step closed-loop control. This has the disadvantage that, in operating modes, for which a lean or rich air-fuel mixture is required, for example, for catalytic converter diagnostics or for component protection, the target lambda can only be precontrolled, but not controlled in closed loop. It is believed to be understood that there are various methods for calibrating the voltage-lambda characteristic curve of two-step lambda oxygen sensors to ensure that they can be used for a continuous control over the entire operational life thereof. German Patent Application DE 3827978 discusses determining and compensating for a voltage offset, which is constant over the entire lambda range, of the voltage-lambda characteristic curve in question using a reference-voltage lambda characteristic curve of the two-step lambda oxygen sensor as a basis for comparison by adjusting the sensor voltage upon trailing throttle fuel cutoff of the internal combustion engine. Furthermore, German Patent Application DE 102010027984 A1 describes a method for operating an exhaust system of an internal combustion engine where at least one parameter of the exhaust gas flowing in an exhaust tract is measured by an exhaust-gas sensor. In accordance therewith, fresh air is supplied to the exhaust tract upstream of the exhaust-gas sensor via a fresh air supply assigned to the exhaust system during one operating state of the internal combustion engine in which injection and fuel combustion do not take place, and the exhaust-gas sensor is adjusted during this time and/or subsequently thereto. However, the voltage offset can only be adequately compensated when it is equally pronounced, not only in the case of trailing throttle fuel cutoff given corresponding oxygen-containing exhaust gas, but also over the entire lambda range. This can be the case when the voltage offset is due to a single cause. For the most part, however, there are several overlapping reasons why the voltage-lambda characteristic curve deviates from a reference-voltage lambda characteristic curve. These may be more or less salient in different lambda ranges, whereby the voltage offset changes as a function of the exhaust gas lambda. In particular, the causes in the lean and rich lambda ranges can vary in saliency. In the case of trailing throttle fuel cutoff, such a lambda-dependent voltage offset cannot be adequately compensated by an adjustment. A further drawback of the method resides in that present-day engine designs feature fewer and fewer trailing-throttle phases, thereby limiting the possibility of such trailing-throttle adjustments. German publication DE3837984 discusses a method for compensating for a shift of the lambda-1 step of the voltage-lambda characteristic curve by a setpoint control that includes a second lambda oxygen sensor disposed downstream. German publication DE19860463 discusses a method for determining the composition of the fuel-air mixture of a combustion engine during operation at a given setpoint value deviation from lambda=1, where the actual value deviation from lambda=1 is determined by temporarily adjusting the composition and evaluating the resulting reaction of a lambda oxygen sensor. It provides for a step-type adjustment by a defined value toward lambda=1 to be initially made, and for the lambda value to be subsequently further modified at a defined rate of change until the lambda oxygen sensor reacts, and for the actual deviation to be determined from the value of the step-type adjustment, the rate of change, and the time until the reaction of the lambda oxygen sensor is ascertained. Using the method, an offset of the voltage-lambda characteristic curve of a two-step lambda oxygen sensor can be recognized. It is disadvantageous that differences in various lambda ranges remain unconsidered in determining the actual value deviation from lambda=1. This can falsify the result to such an extent that the accuracy required for a continuous closed-loop lambda control using a two-step lambda oxygen sensor disposed upstream of the catalytic converter in order to recognize a characteristic curve offset, is not met.
package com.ifttt.connect.ui; public enum ConnectButtonState { /** * A button state for displaying an Connection in its initial state, the user has never authenticated this Connection * before. */ Initial, /** * A button state for the create account authentication step. In this step, the user is going to be redirected * to web to create an account and continue with service connection. */ CreateAccount, /** * A button state for the login authentication step. In this step, the user is going to be redirected to web * to login to IFTTT. */ Login, /** * A button state for displaying a Connection that is enabled. */ Enabled, /** * A button stat for displaying a Connection that is disabled. */ Disabled }
#include<iostream> #include "complejos.h" int main() { Complejo complejo1{4, 5}, complejo2{7, -8}; Complejo resultado; resultado = resultado.Add(complejo1, complejo2); //resultado.Print(resultado); resultado = resultado.Sub(complejo1, complejo2); ///resultado.Print(resultado); Complejo prueba; Complejo prueba1; Complejo prueba2; int prueba_entero = 1; double prueba_real= 0.1; prueba = complejo1 - complejo2; prueba1= complejo1 - prueba_entero; prueba2= complejo1 - prueba_real; prueba.Print(prueba); prueba1.Print(prueba1); prueba2.Print(prueba2); return 0; }
Variations in recommended nutrient intakes Tables of recommended daily intakes or amounts (RDA) of nutrients have now been published by international agencies and by governments or scientific institutes in more than forty different countries, and a number have issued more than one set. Few are the same, and an indication of just how much the values can vary even for one of the better-understood nutrients is shown in Table I for vitamin C. It is not my intention to summarize or review all the differences, for this has already been comprehensively done by the International Union of Nutritional Sciences (IUNS). I Shall concentrate more on how such differences can arise and on some of their consequences. This will require consideration of how the concept of recommended allowances, recommended intakes or recommended amounts began; of some of the ways in which they have been derived from often limited experimental evidence; and of some of the many purposes for which the concept and the numerical values have been developed.
Delayed-enhancement 320 row volume multidetector computed tomography for the assessment of reperfused acute and old myocardial infarction in a porcine model. To evaluate the practical value of delayed enhancement 320 row volume multidetector computed tomography (DE-320 row volume MDCT) for reperfused acute and old myocardial infarction (MI) in a porcine model, 14 pigs underwent balloon-induced occlusion of the middle segment of the left circumflex coronary artery (LCX) for 90 min. The balloons were removed and reperfusion was performed. DE-320 row volume MDCT was performed 15 min after the injection of iodinated contrast. Delayed-enhancement magnetic resonance imaging (DE-MRI) was completed 15 min after injection of gadolinium-DTPA. The pigs were then divided into acute vs old MI groups. Six pigs in the acute MI group were sacrificed immediately and their hearts were harvested for triphenyltetrazolium chloride (TTC) pathology. The pigs in the old MI group were reared for another 6 months before undergoing repeat DE-320 row volume MDCT and DE-MRI prior to being sacrificed. On DE-320 row volume MDCT, the size of the acute and old myocardial infarction was 23.95±9.8% and 16.93%±7.04%, respectively, which had a high consistency and correlation with DE-MRI and TTC pathology. The CT values of the delayed enhanced area were different between acute MI and old MI and were 132.5±30.5 HU and 91.2±18.3 HU, respectively (P <0.001). The CT values of the delayed enhanced areas were different from that of viable myocardium (acute, P <0.001, old, P <0.001). The diagnosis of the infarcted segments by DE-320 row volume MDCT had good consistency with the results of TTC pathology (acute, kappa = 0.76; old, kappa = 0.64). DE-320 row volume MDCT can be an effective method for assessing MI.