content
stringlengths 7
2.61M
|
---|
<filename>RESTful Samples/RESTfulEngine.TypeScriptClient/TypeScriptClient/src/datasource/XmlDataSource.ts
import DataSource = require("./DataSource");
import Variable = require("./TemplateVariable");
export class XmlDataSource implements DataSource.DataSource {
Variables: Variable.TemplateVariable[];
xmlData: Buffer = undefined;
uri: string = undefined;
schemaUri: string = undefined;
schemaData: Buffer = undefined;
constructor(data: string | Buffer, schema: string | Buffer = undefined) {
if (typeof data === "string")
this.uri = data;
else
this.xmlData = <Buffer>data;
if (typeof schema === "string")
this.schemaUri = schema;
else
this.schemaData = <Buffer>schema;
}
GetJsonRequest() {
var json = {
Variables: JSON.stringify(this.Variables),
Type: "xml",
Data: (this.xmlData == undefined ? undefined : this.xmlData.toString("base64")),
Uri: this.uri,
SchemaUri: this.schemaUri,
SchemaData: (this.schemaData == undefined ? undefined : this.schemaData.toString("base64")),
}
return json;
}
}
|
<gh_stars>0
# This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from typing import List
from enum import Enum
from GridCal.Engine.basic_structures import Logger
from GridCal.Engine.Core.multi_circuit import MultiCircuit
from GridCal.Engine.Core.snapshot_pf_data import SnapshotCircuit, SnapshotCircuit
from GridCal.Engine.Simulations.PowerFlow.power_flow_worker import single_island_pf, PowerFlowResults
from GridCal.Engine.Simulations.PowerFlow.power_flow_options import PowerFlowOptions
from GridCal.Engine.Simulations.PTDF.ptdf_results import PTDFVariation
class PtdfGroupMode(Enum):
ByTechnology = 'By technology'
ByNode = 'By node'
ByGenLoad = 'By Generator and Load'
def group_generators_by_technology(circuit: MultiCircuit):
"""
Compose a dictionary of generator groups
:param circuit: MultiCircuit
:return: dictionary [Technology] : [generator indices]
"""
gens = circuit.get_generators()
groups = dict()
for i, gen in enumerate(gens):
if gen.technology in groups.keys():
arr = np.r_[groups[gen.technology], i]
groups[gen.technology] = arr
else:
groups[gen.technology] = np.array([i])
return groups
def get_ptdf_variations(circuit: MultiCircuit, numerical_circuit: SnapshotCircuit, group_mode: PtdfGroupMode, power_amount):
"""
Get the PTDF variations
:param circuit: MultiCircuit instance
:param numerical_circuit: NumericalCircuit instance (faster)
:param group_mode: group generators by technology?
:param power_amount: power amount to vary. if group_by_technology the variation applies by group otherwise per
individual generator
:return: list of Variations (instances of PTDFVariation)
"""
variations = list()
# declare the default variation object and store it
var = PTDFVariation(name='Default', n=numerical_circuit.nbus, original_power=power_amount)
variations.append(var)
# compute the per unit power
power = power_amount / circuit.Sbase
if group_mode == PtdfGroupMode.ByTechnology:
# get generator groups by technology
groups = group_generators_by_technology(circuit=circuit)
for key, indices in groups.items():
ng = len(indices)
# power increment by technology of all the generators
dPg = np.ones(ng) * power / float(ng)
# declare the variation object
var = PTDFVariation(name=key, n=numerical_circuit.nbus, original_power=power_amount)
# power increment by bus
var.dP = numerical_circuit.C_bus_gen[:, indices] * dPg
# store the variation
variations.append(var)
elif group_mode == PtdfGroupMode.ByGenLoad:
# add the generation variations
for i in range(numerical_circuit.ngen):
# generate array of zeros, and modify the generation for the particular generator
dPg = np.zeros(numerical_circuit.ngen)
dPg[i] = power
# declare the variation object
var = PTDFVariation(name=numerical_circuit.generator_names[i],
n=numerical_circuit.nbus,
original_power=power_amount)
# power increment by bus
var.dP = numerical_circuit.C_bus_gen * dPg
# store the variation
variations.append(var)
# add the load variations
for i in range(numerical_circuit.nload):
# generate array of zeros, and modify the generation for the particular generator
dPg = np.zeros(numerical_circuit.nload)
dPg[i] = -power
# declare the variation object
var = PTDFVariation(name=numerical_circuit.load_names[i],
n=numerical_circuit.nbus,
original_power=power_amount)
# power increment by bus
var.dP = numerical_circuit.C_bus_load * dPg
# store the variation
variations.append(var)
elif group_mode == PtdfGroupMode.ByNode:
# add the generation variations
for i in range(numerical_circuit.nbus):
# declare the variation object
var = PTDFVariation(name=numerical_circuit.bus_names[i],
n=numerical_circuit.nbus,
original_power=power_amount)
# generate array of zeros, and modify the generation for the particular generator
var.dP = np.zeros(numerical_circuit.nbus)
var.dP[i] = power
# store the variation
variations.append(var)
else:
raise Exception('PTDF grouping mode not implemented: ' + str(group_mode))
return variations
def power_flow_worker(variation: int, nbus, nbr, n_tr, bus_names, branch_names, transformer_names, bus_types,
calculation_inputs: List[SnapshotCircuit], options: PowerFlowOptions, dP, return_dict):
"""
Run asynchronous power flow
:param variation: variation id
:param nbus: number of buses
:param nbr: number of branches
:param n_tr:
:param bus_names:
:param branch_names:
:param transformer_names:
:param bus_types:
:param calculation_inputs: list of CalculationInputs' instances
:param options: PowerFlowOptions instance
:param dP: delta of active power (array of values of size nbus)
:param return_dict: dictionary to return values
:return: Nothing because it is a worker, the return is done via the return_dict variable
"""
# create new results
pf_results = PowerFlowResults(n=nbus,
m=nbr,
n_tr=n_tr,
n_hvdc=0,
bus_names=bus_names,
branch_names=branch_names,
transformer_names=transformer_names,
hvdc_names=(),
bus_types=bus_types)
logger = Logger()
# simulate each island and merge the results
for i, calculation_input in enumerate(calculation_inputs):
if len(calculation_input.vd) > 0:
# run circuit power flow
res = single_island_pf(circuit=calculation_input,
Vbus=calculation_input.Vbus,
Sbus=calculation_input.Sbus - dP[calculation_input.original_bus_idx],
Ibus=calculation_input.Ibus,
branch_rates=calculation_input.branch_rates,
options=options,
logger=Logger())
# merge the results from this island
pf_results.apply_from_island(results=res,
b_idx=calculation_input.original_bus_idx,
br_idx=calculation_input.original_branch_idx,
tr_idx=calculation_input.original_tr_idx)
else:
logger.append('There are no slack nodes in the island ' + str(i))
return_dict[variation] = (pf_results, logger)
|
Nearly 10 months after announcing a tentative deal to build a 20,000-seat soccer stadium on Buzzard Point, District officials and D.C. United executives have nearly finalized an agreement, according to sources familiar with the discussions.
Despite months of delays and missed deadlines, the basic framework of the deal remains in place, according to the sources, who spoke on the condition of anonymity because the discussions have not been made public.
District Mayor Vincent Gray has less than eight months to get a soccer stadium plan approved. (AP Photo/Cliff Owen)
As previously agreed upon, the District government would provide up to $150 million for the estimated $300 million project through land infrastructure improvements. D.C. United would be responsible for building the stadium, which would be located a few blocks southwest of Nationals Park, but would also receive property and sales tax breaks.
A hang-up in the discussions has been how to replace an original provision in which the District would have shared in any profits the team made. After vetting the profit-sharing provision with members of the D.C. Council, City Administrator Allen Lew, the District’s chief negotiator for D.C. Mayor Vincent C. Gray (D), agreed with the team to replace it with a combination of sales tax payments and a future $2 surcharge on tickets.
In the deal, the District would pay to build utility and road infrastructure for the project. Its costs are capped at $150 million, a provision aimed at preventing overruns that made construction of the Nationals Park far more expensive than originally envisioned.
D.C. United would lease the land for 30 years at no cost with an option to extend it. According to sources, the team would pay no property taxes for the first five years, 25 percent property taxes for the next five years, then 50 percent for five years, 75 percent for five years and finally full property taxes.
United would also pay no sales taxes for the first five years, then 50 percent sales tax for five years and then full sales taxes. At that point the team would begin collecting a surcharge on tickets that would begin at $2 and increase with the Consumer Price Index. Proceeds from the surcharge would go to the District.
Lew declined to comment through a spokesman. Jason Levien, the team’s managing partner, also declined to comment.
Once the team and Gray finalize the agreement, the deal still faces a number of political hurdles and a frenzied time frame if it is to be approved by the D.C. Council before Gray leaves office at the end of the year.
Lew has agreed to deals with two owners of needed Buzzard Point land, developer Akridge and Pepco Holdings, but the Akridge deal requires trading the Reeves Center municipal office building on U Street in exchange for land and cash. Akridge plans to build a tower of apartments or condominiums in its place.
D.C. Council members Muriel Bowser (D-Ward 4) and Jim Graham (D-Ward 1) have criticized the idea to trade the Reeves Center and Bowser, who chairs the economic development committee and is the Democratic nominee for mayor, is likely to play a central role in the council’s consideration of the legislation.
Lew has still not secured two more valuable properties on the northern end of the site, along Potomac Avenue, owned by the Super Salvage scrap metal yard and Washington Kastles owner Mark Ein.
According to two sources, Ein, who has been negotiating on behalf of both parties, has asked for substantially more than what Akridge and Pepco agreed to be paid, though Akridge will be left with land to build on after the stadium is completed. Lew has repeatedly said he would use eminent domain to take the properties if he cannot negotiate a deal.
“We have sought from the beginning to find a fair deal for the city and also a fair deal amongst the parties involved,” Ein said.
The idea of helping the team build a stadium has received mixed reviews from District residents. A Post poll found that six in 10 D.C. residents opposed the idea of helping “finance” the stadium, though the mayor’s office argued the questions unfairly described the deal. The team has since launched an effort to build support for the team’s plans, and says more than 5,000 residents have written to city officials supporting a new stadium.
Lew plans to send the land deals and the agreement with D.C. United to the council in one legislative package this week. The council goes on recess in mid-July.
Should the stadium get built, the earliest the team could play in it is the 2017 season.
Follow Jonathan O’Connell on Twitter: @oconnellpostbiz |
Rodent A Modulates the Solubility and Distribution of Amyloid Deposits in Transgenic Mice* The amino acid sequence of amyloid precursor protein (APP) is highly conserved, and age-related A aggregates have been described in a variety of vertebrate animals, with the notable exception of mice and rats. Three amino acid substitutions distinguish mouse and human A that might contribute to their differing properties in vivo. To examine the amyloidogenic potential of mouse A, we studied several lines of transgenic mice overexpressing wild-type mouse amyloid precursor protein (moAPP) either alone or in conjunction with mutant PS1 (PS1dE9). Neither overexpression of moAPP alone nor co-expression with PS1dE9 caused mice to develop Alzheimer-type amyloid pathology by 24 months of age. We further tested whether mouse A could accelerate the deposition of human A by crossing the moAPP transgenic mice to a bigenic line expressing human APPswe with PS1dE9. The triple transgenic animals (moAPP APPswe/PS1dE9) produced 20% more A but formed amyloid deposits no faster and to no greater extent than APPswe/PS1dE9 siblings. Instead, the additional mouse A increased the detergent solubility of accumulated amyloid and exacerbated amyloid deposition in the vasculature. These findings suggest that, although mouse A does not influence the rate of amyloid formation, the incorporation of A peptides with differing sequences alters the solubility and localization of the resulting aggregates. cessed to generate A starting predominantly at the 1 site. In contrast, murine BACE1 preferentially cleaves endogenous APP at the 11 site (12,. The effect of this truncation on the aggregation of A is subject to debate: published reports describe both enhanced and reduced aggregation (oligomerization or sedimentation) in vitro of N-terminally truncated A. Transgenic mice overexpressing human APP form amyloid deposits composed primarily of human A. In addition, these mice harbor low levels of mouse A beginning at both 1 and 11. Pype et al. show that the amount of mouse A extracted from the brains of transgenic mice increased dramatically with the formation of human A deposits, suggesting that the mouse peptide was co-depositing in the amyloid. However, mouse peptide comprised only about 5% of the total A recovered from the oldest animals tested. Moreover, Calhoun et al. have shown that removing mouse APP expression in human APP transgenic mice had no impact on the extent or location of amyloid formation, indicating that the mouse peptide plays little or no role in the rate of amyloid formation in the presence of high levels of human A. To date, only one study has addressed the effects of elevating murine A in vivo by overexpressing wild-type moAPP. This study examined animals only 3.5-4 months of age and, therefore, did not test whether overexpression of moAPP would induce late onset amyloid formation. A complicating factor in this initial study was the FVB background strain used to generate the transgenic lines. Transgenic mice of this strain are prone to premature death when human APP is expressed via the hamster prion protein promoter (PrP) vector. We have subsequently moved one of these original moAPP transgenic lines out of the FVB background by crossing onto a hybrid C3H/HeJ C57BL/6J strain for more than five generations. This strategy eliminated the premature death of animals, allowing us to examine the potential for amyloid formation in aged moAPP transgenic mice. We report here that neither overexpression of moAPP alone, nor co-expression with the human presenilin-1 exon-9 deletion variant (PS1dE9 ), resulted in amyloid composed only from mouse A. We further demonstrate that mouse A does not accelerate the deposition of human A in transgenic mice overexpressing both peptides, but reveal that high levels of mouse peptide alter the solubility of the resulting A aggregates and increase the prevalence of vascular deposits. We further find that A11-40/42, predicted to be the predominant form of A produced from mouse APP, does not appear to be a major co-depositing peptide. These findings provide insight into the potential role of specific A sequences in modulating the solubility and distribution of amyloid deposits in rodent models. EXPERIMENTAL PROCEDURES Generation of Transgenic Mice-All transgenic mice used in this study have been described and fully characterized in earlier publications. All transgenes were expressed under control of the mouse prion protein promoter (MoPrP.Xho), which drives high protein expression in neurons and astrocytes of the central nervous system. Line S-9, expressing human PS1 harboring the FAD exon-9 deletion (PS1dE9), is described in Lee et al.. Line 1874, expressing wild-type mouse APP (moAPPwt), is described in Hsiao et al.. Line 85, co-expressing human PS1dE9 and mouse/human (mo/hu) chimeric APP695 (humanized A domain) harboring the Swedish (K594M/ N595L) mutation, is described in Jankowsky et al.. Unlike lines S-9 and 1874, line 85 was created by co-injecting two transgenes, each driven by their own prion promoter element. The two transgenes co-integrated and co-segregate as a single locus. Lines 85 and S-9 have been deposited with Jackson Laboratories (Bar Harbor, ME) for distribution (Stock numbers 004462 and 005866, respectively). After these experiments were completed, line 1874 was lost through accidental mistyping of breeding stock. Line 85 and line S-9 animals used in this study were maintained on a hybrid background by backcrossing to C3HeJ C57BL/6J F1 animals obtained from Jackson Laboratories. Line 1874 was backcrossed to C57BL/6J for two generations after it was originally generated on the FVB background. After discovering premature lethality in the offspring, the line was crossed back to the hybrid C3HeJ C57BL/6J F1 strain for two additional generations, which restored normal longevity to the line. Offspring from the second C3/B6 backcross were used as breeders to generate the cohorts described in this study. Offspring were genotyped for the presence of the transgene by PCR amplification of genomic DNA extracted from 1-cm tail clippings as described previously. Reactions contained three primers, one antisense primer matching sequence within the vector that is also present in mouse genomic PrP (5: GTG GAT ACC CCC TCC CCC AGC CTA GAC C), one sense primer specific for the transgene cDNA (PS1: 5: CAG GTG GTG GAG CAA GAT G, huAPP: 5: CCG AGA TCT CTG AAG TGA AGA TGG ATG, moAPP: 5: CCT TCA GGA TTT GAA GTC CGC), and a second sense primer specific for the genomic PrP coding region, which has been removed from the MoPrP vector (5: CCT CTT TGT GAC TAT GTG GAC TGA TGT CGG). All reactions give a 750-bp product from the endogenous PrP gene as a control for DNA integrity and successful amplification; transgene-positive samples have an additional band at 400 bp (huAPP), 350 bp (moAPP) or 1.3 kb (PS1). Animals were housed in microisolator cages with free access to food and water. All procedures involving animals were approved by the Johns Hopkins University Institutional Animal Care and Use Committee. Mouse and human A differ in primary sequence and N-terminal processing. Three amino acid differences at residues 5, 10, and 13 distinguish the rodent and human peptides. These substitutions influence the specificity of BACE1 cleavage. Human APP expressed in transgenic mice is preferentially cleaved at residue 1, producing peptides 38 -43 amino acids in length. In contrast, endogenous mouse APP is preferentially cleaved by BACE1 at 11, generating peptides of 28 -33 residues. Peptides ending at amino acid 42 are shown for comparison. Western Blotting-Mice of each genotype (NTg and 1874, n 3-5; lines 85 and 1874 85, n 10 -15) were harvested at 8 months of age for assessment of amyloid pathology and APP/A biochemistry. One-half of the brain was immersed in 4% paraformaldehyde and used for histology as described below. The remaining hemisphere was frozen on dry ice and prepared as a 20% homogenate that was used for Western blotting, filter trap assay, and ELISA. Frozen hemi-brain samples were sonicated in 5 volumes of 1 PBS containing 5 mM EDTA and 1 protease inhibitor mixture (Mammalian cell mix, Sigma). Homogenates were further diluted 1:1 with additional PBS/EDTA/protease inhibitor and centrifuged at high speed for 10 min, and the supernatant was used for analysis. Approximately 50 g of protein homogenate per sample (5 g for 22C11) was loaded onto 4 -12% BisTris Novex gels (Invitrogen) and electrophoresed at 175 V for 1.5-2 h in 1 MES buffer (Invitrogen). Proteins were transferred for 1 h at 100 V to 0.45-m Optitran nitrocellulose (Schleicher and Schuell, Keene, NH) in 1 NuPAGE transfer buffer made with 10% methanol and 1% antioxidant solution (Invitrogen). Blots were blocked in PBS containing 5% nonfat dry milk powder for 30 -60 min at room temperature. After blocking, blots were incubated with primary antibody for either 3 h at room temperature or overnight at 4°C. The following primary antibodies and dilutions were used: 6E10 mouse anti-human A monoclonal (Signet Laboratories, Dedham, MA) 1:2000; rabbit antirodent APP purified polyclonal antibody (AB5571P, Chemicon, Temecula, CA) 1:2000; 22C11 mouse anti-APP N terminus monoclonal, kind gift of Drs. Konrad Beyreuther and Andreas Weidemann, 1:2000 ; m/hSOD1 rabbit anti-SOD1 polyclonal, 1:4000. After incubation with primary antibody, the blots were washed several times with PBS containing 0.1% Tween 20, and then incubated with either goat anti-rabbit IgG or goat anti-mouse IgG conjugated to horseradish peroxidase (Jackson ImmunoResearch, West Grove, PA) diluted 1:2500 to 1:5000 in blocking solution. After washing several times in PBS containing 0.1% Tween 20, blots were developed with enhanced chemiluminescence reagent (ECL Plus, Amersham Biosciences/GE Biosciences) and exposed to film. Intensity of immunostaining was quantified from digitally scanned films with ImageJ by first inverting to create a negative image and then measuring the integrated density of each band. Background values calculated from a blank portion of the gel were subtracted manually from each sample before assessing the average signal intensity for the genotype. A Immunoprecipitation-50 l of the 20% PBS homogenate described above was diluted 10-fold in radioimmunoprecipitation assay buffer (0.2% SDS, 0.5% Nonidet P-40, 0.5% deoxycholate, 5 mM EDTA, in 1 PBS) and boiled for 10 min. After cooling, protease inhibitors were added, and the solution was incubated overnight at 4°C with 2 l of purified 4G8 (Signet Laboratories). The antibody was recovered with protein A-agarose beads (1 h at 4°C), and nonspecific binding was removed by several washes with additional radioimmunoprecipitation assay buffer at 4°C. The beads were heated to 95°C for 5 min in 2 Tricine-SDS sample buffer, and the entire reaction was loaded onto 10 -20% Tricine gels (Bio-Rad). Gels were pre-run for 10 min prior to loading, and then run in 15-min voltage steps of 25, 50, and 100, before running the gel to completion at 150 V. Protein was transferred to 0.45-m Optitran nitrocellulose (Schleicher and Schuell) in 1 Tris-glycine/20% methanol/0.1% SDS, after which the blot was boiled 5 min in 1 PBS and blocked in 2% ECL Advance blocking reagent (Amersham Biosciences/GE Biosciences)/1 TBS/0.1% Tween-20. Blots were incubated overnight at room temperature with 4G8 diluted 1:10,000 in Advance block with 0.1% sodium azide. After washing several times in blocking reagent, blots were incubated for 2-3 h at room temperature with peroxidase-conjugated anti-mouse IgG diluted 1:20,000 in block. Blots were washed thoroughly with TBS/0.1% Tween-20, developed with ECL Advance (Amersham Biosciences/GE Biosciences), and exposed to film 1 h after developing with ECL. To demonstrate that 4G8 was capable of binding A11-42, several reactions were run using 15 l of 20% homogenate spiked with 10 -50 ng of synthetic human A11-42 (kindly provided by Dr. David Teplow, UCLA). A ELISA: Amyloid Solubility (8 Months of Age)-An aliquot of PBS 20% homogenate generated for Western analysis described above was subjected to a three-step sequential extraction using PBS, 2% SDS, and 70% formic acid (NTg and line 1874, n 4; lines 85 and 1874 85, n 8). At each step, the sample was sonicated in appropriate buffer and centrifuged at 100,000 g for 30 min at 4°C. The supernatant was removed for analysis, and the pellet was sonicated in an equal volume of the next solution in sequence. The 2% SDS extracts were diluted at least 1:40 in EC buffer (0.02 M sodium phosphate buffer, pH 7.0, 2 mM EDTA, 400 mM NaCl, 0.2% bovine serum albumin, 0.05% CHAPS, 0.4% BlockAce (Dainippon Pharmaceuticals), 0.05% NaN 3 ), prior to testing to bring the SDS concentration below 0.05%; the FA extracts were neutralized with 1 M Tris-phosphate buffer, pH 11, and then diluted with EC buffer prior to testing. Brain extracts were measured by sandwich ELISA as described previously. Human A was measured in each fraction using BAN50 for capture (epitope A1-16) and BA27 and BC05 for detection (A40 and A42, respectively). Total A (mouse plus human) was measured in each fraction using BNT77 for capture (epitope A 11-28) and BA27 and BC05 for detection. Although BNT77 recognizes both mouse and human A 1-x and 11-x, it does not bind ␣-secretase processed APP, and measurements with BNT77 therefore do not include p3. All values were calculated as picomoles per g based on the initial weight of brain tissue. Filter Trap Assay-An aliquot of 20% PBS protein homogenate from each 8-month-old animal was partially solubilized by the addition of SDS to a final concentration of 1%. Serial 1:1 dilutions were made with 1 PBS/1% SDS, and 90 l of each dilution was then vacuum-filtered through a pre-wet 0.22-m cellulose acetate membrane (OE66, Schleicher and Schuell, Keene, NH). Each well was washed several times with PBS, after which blots were blocked for an hour in 1 TBS plus 5% nonfat dry milk powder. Blots were then incubated at 4°C overnight with polyclonal anti-A peptide antibody (71-5800, Zymed Laboratories) diluted 1:600 in blocking solution. After washing the blots several times in 1 TBS/0.1% Tween 20, the membrane was incubated for 1 h with an IRDye 800-conjugated goat anti-rabbit IgG secondary antibody (Rockland Immunochemicals, Gilbertsville, PA) diluted 1:5000 in blocking solution. The membranes were again washed three times with 1 TBS/0.1% Tween 20, given a final rinse in 1 TBS, and then imaged with an Odyssey fluorescence imager (LI-COR, Lincoln, NE). Staining intensity for each well was quantified using Odyssey analysis software, from which the linear range of the dilution series was determined and used for all genotype comparisons. Histology-Brains from lines 1874, 85, and 1874 85 were harvested for histological analysis at 4 months (n 3-4 per genotype) and at 8 months (n 5-14 per genotype) of age. Mice were euthanized by ether inhalation, and the brain was removed for analysis. One half was used for biochemical analysis described above; the remaining hemisphere was used for histology. After immersion in 4% paraformaldehyde/1 PBS for 48 h at 4°C, the fixed hemi-brains were transferred to PBS, dehydrated in an alcohol series, treated with cedar wood oil and methylsalicylate, and embedded in paraffin for sectioning. Hirano Silver Stain-Silver impregnation histology was performed on 10-m paraffin-embedded sections by Hirano's modification of the Bielschowsky method. Briefly, sections were deparaffinized through xylene and alcohols into tap water before being placed into fresh 20% silver nitrate solution for 20 min. After washing thoroughly with distilled water, slides were immersed in 20% silver nitrate solution titrated with fresh ammonium hydroxide. After 20 min, slides were washed with ammonia water before being individually developed with 100 l FIGURE 2. Transgenic expression increases moAPP 3-fold without lowering co-expressed human APP. Western blots compare mouse and human APP expression in brain homogenates from each of the four genotypes; four animals (2 male and 2 female) from each genotype are shown. Blots show immunodetection of total full-length APP and APP-like protein 2 (top panel, 22C11); rodent-specific APP (second panel, roAPP); human-specific APP (third panel, 6E10); and SOD1 (Cu/Zn superoxide dismutase 1) as an internal control (bottom panel). The 62-and 98-kDa markers in the upper two panels migrated more slowly than expected based on the known size of APP and were consistently positioned lower on the BisTris gels run in MES buffer than in previous studies using Tris-HCl gels in Tris-glycine-SDS buffer. of developer (20 ml of 37% formaldehyde, 100 ml of distilled water, 50 l of concentrated nitric acid, and 0.5 g of citric acid) added to 50 ml of titrated silver nitrate solution. Slides were then rinsed in tap water, fixed in 5% sodium thiosulfate, and dehydrated through alcohols and xylene. Campbell-Switzer Silver Stain-A detailed protocol for this stain was kindly provided by Dr. Bob Switzer of NeuroScience Associates. Thioflavine-S Staining-Following deparaffinization of sections through xylene and alcohols, amyloid impregnation with thioflavine-S was performed according to the Guntern modification of the standard protocol. Slides were washed twice in distilled water, then immersed for 5 min in a 0.25% potassium permanganate solution, followed by 5 min in a 1% potassium metabisulfate/1% oxalic acid solution. After this preparation, slides were placed into a filtered aqueous 0.02% thioflavine-S solution (Chroma-Gesellschaft, Schmid GmbH and Co., Kongen, Germany) for 8 min. Excess stain was removed by two brief rinses in 80% ethanol, then two in distilled water, after which slides were finished in aqueous mounting medium for florescence photomicrography. A Immunohistochemistry-Prior to immunostaining, slides were deparaffinized by oven heating followed by immersion in xylene. After rehydration through graded alcohols into tap water, amyloid was partially denatured by immersing sections in 80% formic acid for 5 min, followed by rinsing in running tap water. Nonspecific staining was blocked for 1 h with 3% normal goat serum and 0.1% Triton-X 100 in TBS. Slides were then placed into primary antibody (rabbit anti-human A peptide polyclonal antibody 71-5800 diluted 1:500, Zymed Laboratories; 6E10 mouse anti-human A monoclonal antibody diluted 1:250, Signet Laboratories; or rabbit anti-rodent APP purified polyclonal antibody AB5571P diluted 1:250, Chemicon) in blocking solution and incubated overnight at room temperature. After washing with several changes of TBS, slides were incubated either with the Vectastain Elite antimouse secondary system according to the manufacturer's directions (for diaminobenzidine-developed anti-A immunostaining, Vector Laboratories, Burlingame, CA) or with Alexafluor-conjugated goat anti-rabbit (Alexa 568) and goat anti-mouse (Alexa 488) secondary antibodies diluted 1:100 in blocking solution (for double immunofluorescence immunostaining, Molecular Probes c/o Invitrogen). Slides were again rinsed several times in TBS and either mounted immediately in fluorescence mounting medium (6E10/roAPP) or developed with diaminobenzidine (Zymed Laboratories anti-A), counterstained with hematoxylin, dehydrated and mounted. Plaque Load Estimation-Amyloid burden was estimated using non-biased stereology or image-based threshold analysis. Three sagittal sections spaced at 200-m intervals were analyzed for each animal. Slides were analyzed by an investigator blind to the genotype of the samples. Non-biased Stereology-StereoInvestigator software (MBF Biosciences, Colchester, VT) was used to estimate the surface area covered by plaques stained with A immunohistochemistry (Zymed Laboratories, 71-5800 anti-A polyclonal) using an area fraction fractionator grid (Cavalieri spacing: 500 500 m, grid spacing: 10 m, frame size: 85 110 m, 40 magnification). Percent coverage within the cortex of each animal was averaged from three sections to obtain a final estimate of plaque burden. ImageJ-based Threshold Analysis-Sections stained for A immunohistochemistry or Campbell-Switzer silver were scanned using an Epson 4990 flatbed scanner set for film at 2400 dpi. Images were brought into ImageJ and converted to an 8-bit grayscale. The cortex was outlined manually using a Wacom Graphire tablet, the surrounding area was cleared, and the automatic threshold within the remaining image was determined (but not applied to the image). The percent surface area above threshold was then determined using the Analyze Particles command. Vascular Amyloid Quantitation-Sections from 8-monthold mice that had been stained for A immunohistochemistry were viewed at 40 under standard brightfield conditions. All amyloid deposits within one microscopic field of the dorsal cortical surface running from the frontal to occipital poles were examined. Blood vessels were identified by morphology with hematoxylin counterstain; all vessels positive for A were manually counted by an investigator who was blind to the genotype of the sample. Only parenchymal vessels were considered; amyloid-positive vessels in the meninges at the pial surface were not counted. Three sections, spaced at 200-m intervals, were averaged for each animal (line 85: n 9; line 1874 85 n 8). Statistics-All data were analyzed for statistical significance by ANOVA followed with Tukey post-test using SigmaStat analysis software (Systat Software, Port Richmond, CA). APP Expression and Steady-state A Levels Are Elevated Severalfold by Transgenic Expression of moAPP-Two lines of transgenic mice were used for these studies. The first line, hereafter referred to as line 85, co-expressed a chimeric mouse/ human APP 695 (human A sequence) harboring the Swedish K594M/N595L mutation (using 695 numbering) alongside human PS1 harboring the exon-9 deletion mutation (PS1dE9). Each transgene is controlled by an independent mouse prion promoter. The two transgenes are co-integrated and segregate as a single locus, making all mice from this line transgenic for both proteins. The second line, hereafter referred to as line 1874, expressed wild-type mouse APP 695 under the control of the hamster prion promoter, which produces a similar pattern of transgene expression as the mouse prion promoter used for line 85. Interbreeding of the two lines generated mice of four genotypes (non-transgenic (NTg), 1874 (single transgenic), 85 (double transgenic), and 1874 85 (triple transgenic)) that were analyzed to determine the impact of mouse A on the timing and extent of human A deposition. Both lines 85 and 1874 overexpressed transgenic APP at levels severalfold over the endogenous protein. Immunoblotting for the full-length protein was used to quantify how well the transgene expressed each line and to ensure that expression of one APP transgene was not diminished by co-expression of the second in 1874 85 triple transgenic (mo/huAPPswe/PS1dE9/ moAPPwt) offspring. Three separate antibodies were used for this analysis: 22C11, which recognizes from both mouse and human the N terminus of mature and unprocessed APP as well as APP-like protein 2 ; 6E10, which recognizes the N-terminal region of human A; and a rodent-specific APP antibody, which recognizes the N-terminal region of mouse A (Fig. 2). Quantitation with 22C11 and roAPP indicated that line 1874 expressed between 2.5-(2.46 0.05; roAPP) and 4-fold (3.98 0.36; 22C11) more APP than NTg. APP overexpression in line 85 appeared somewhat lower than in line 1874; blotting with 22C11 revealed 3-fold more APP than NTg (2.89 0.13). Expression levels in the triple transgenic line 1874 85 were roughly the sum of that in each parental line (7.28 0.27; 22C11). Most importantly, comparison of mo/huAPPswe We next assessed steady-state levels of A in the 1874, 85, and 1874 85 mice. Brain tissue was harvested from young mice prior to the formation of amyloid deposits. These analyses also included mice derived from the mating of line 1874 line S-9, which overexpresses human PS1 encoding the exon 9 deletion. Both A40 and A42 levels were measured by ELISA using two different capture antibodies to distinguish human peptide from total A (mouse plus human). The human-specific capture antibody BAN50 recognizes only full-length human A1-x, whereas the BNT77 capture antibody that detects both mouse and human A recognizes full-length A in addition to A11-x (but not ␣-secretase products 17-x). Therefore, when referring to ELISA results with BNT77, we designate the peptides as x-40 and x-42. Consistent with the immunoblot demonstrating equal expression of mo/huAPPswe, brain tissue from line 85 and 1874 85 mice contained nearly identical levels of human A peptide (Table 1 and Based on our previous study of APP and APP/PS1 transgenic mice, these data predict an earlier onset of amyloid deposition, with a greater plaque burden at any given age in the triple transgenic mice compared with their double transgenic siblings. Plaque Burden Is Not Increased or Accelerated by Overproduction of Mouse A-Line 1874 produces 2-to 4-fold more APP than wild-type animals, which is in the range needed to produce amyloid pathology in transgenic lines expressing the mutant human protein. Despite high levels of transgene expression, long term study of line 1874 found no evidence for amyloid formation in mice up to 24 months of age (data not shown). Similarly, the 1874 S-9 mice also failed to develop amyloid deposits by 2 years of age (data not shown). These findings suggest that either mouse A is incapable of initiating amyloid deposits in vivo, or that there is too little A42 produced in these lines to aggregate within the 24-month mouse lifespan. We next addressed whether mouse A could accelerate amyloid formation from human A by crossing mice from line 1874 to line 85. Previous studies in line 85 indicated that amyloid formation begins at about 5-6 months of age in this line. We therefore chose two ages for analysis that bracketed this onset to test whether addition of extra mouse A introduced in the triple transgenic 1874 (moAPPwt) 85 (mo/huAPPswe/ huPS1dE9) offspring would alter the rate of human A deposition. The first set of mice was harvested at 4 months of age, with the expectation that raising the steady-state A level in the triple transgenics would accelerate its aggregation into plaques. Instead we found no sign of amyloid formation in any of the mice examined at this age, regardless of genotype or gender (data not shown). Despite carrying significantly more total A, the 1874 85 mice formed cored-amyloid deposits no faster than their line 85 siblings. We next examined amyloid burden in 1874 85 offspring at a time point after deposits were known to appear in line 85 animals. We found that, by 8 months of age, amyloid deposits were apparent in brains from both the line 85 double transgenic animals and their 1874 85 triple transgenic siblings. Qualitatively, there was little to distinguish between genotypes: plaques were found throughout the cortex and hippocampus of most animals, with no obvious difference in the relative appearance of the deposits or the surrounding tissue (Fig. 4). The distribution of plaques appeared similar in line 85 and 1874 85 sections stained with silver or A immunostaining, and a fraction of the deposits in both genotypes bound Thioflavine-S. Non-biased stereology supported this observation. Analysis of the percent surface area covered by plaques in the cortex of Aimmunostained sections revealed almost complete overlap of the two genotypes ( 1874 85)). Overproduction of mouse A did not alter the timing or the extent of amyloid formation in mice producing human A. Although we found that amyloid burden in the brains of 1874 85 mice did not change as a result of overproducing mouse A, we wanted to know whether the relative amount of mouse A co-deposited with human peptide would increase as more mouse peptide was produced. We used double immunofluorescence to label mouse (roAPP) and human (6E10) APP/A in 8-month-old mice of each genotype (Fig. 6). The predominant signal in the plaques of both 85 and 1874 85 mice is from the human peptide. Human immunostaining is present within all plaques observed in both genotypes. Mouse APP/A immunostaining is present within most plaques but covers a much more restricted and focal area within the core of the deposits. Occasional plaques staining with the human antibody can be found that do not co-label for mouse peptide; these are predominantly either diffuse or very small deposits. The most notable finding of this experiment was the similarity of parenchymal amyloid staining in the 85 and 1874 85 mice. Although the triple transgenic mice produced much more mouse peptide than their double transgenic siblings, there was no change in the extent of immunostaining for mouse APP/A in their plaques. Vascular Amyloid Is Increased by Overexpression of Mouse A-Careful examination of A-immunostained sections revealed that vascular amyloid deposits were more common in the 1874 85 mice than in their double transgenic siblings. Although both genotypes displayed extensive accumulation in blood vessels at the pial surface, A immunostaining was rare in vessels within the cortex for line 85. Small amyloid deposits were often seen within close proximity to blood vessels, but the vessel wall was usually clean. By comparison, the appearance of vascular amyloid reactive with human-specific 6E10 antibody was much more common in the triple transgenics (Fig. 7). Here, the entire circumference of some vessels stained for A. However, amyloid-positive vessels still comprised only a small fraction of the total vasculature in each section. Nonetheless, the phenotype was consistent enough to allow a blinded investigator to correctly identify the genotype of tissues samples from 15 Mouse A Increases the Detergent Solubility of Human Peptide Aggregates-To extend our histological data, we used filter-trap and ELISA measurements to assess the solubility and composition of accumulated A in mice of the various genotypes. The filter-trap assay uses a cellulose acetate filter to trap protein aggregates larger than the pore size within the membrane for detection by immunoblotting (Fig. 8). Serial dilutions are made in 1% SDS, which is harsh enough to partially solubilize even human AD amyloid. Unexpectedly, we found that protein extracts from the 1874 85 mice contained substantially less trapped A than extracts from line FIGURE 8. Overexpression of mouse APP increases the solubility of human A aggregates. A, representative example of a filter-trap assay for aggregated A in serial dilutions of brain homogenates from 8-month-old mice. Each row represents a separate mouse. Lacking aggregated peptide, NTg and 1874 homogenates showed no immunostaining for A. Both lines 85 and 1874 85 have significant levels of aggregated A, however, less peptide was retained from the 1% SDS homogenates in mice overexpressing mouse and human APP than in those overexpressing only the human protein. B, average intensity of immunostaining within the linear range of the filter-trap dilution series for each genotype reveals a dramatic reduction of aggregated A in the SDS extracts of mice overexpressing mouse and human APP (line 1874 85) compared with animals overexpressing only the human protein (line 85). C, Western blot of protein homogenate (lanes 1-6) and extracted filter trap wells (lanes 7-18) probed with human-specific antibody 6E10. Consistent with the greater intensity of staining on the serial dilution filter trap shown in A, the extracted wells from line 85 mice contained more aggregated A than those from 1874 85. D, filter-trap quantitation used to generate the genotype averages shown in C are plotted as individual data points. E, separation of genders within each genotype reveals that males carried the lowest, and females the highest, individual amyloid burdens within each group. This is consistent with previous work describing greater amyloid loads in female mice of several other APP transgenic lines. *, p 0.005 versus line 85. TABLE 2 ELISA measurement of human and total (mouse plus human) A in brain tissue from 8-month-old mice Values are in picomoles/g wet tissue weight (S.E.).. This outcome appeared to contradict our histological analyses showing similar amyloid burden in the brains of the two models. However, because the homogenates were prepared in 1% SDS, we thought it possible that the amyloid deposited in the 1874 85 mice might be more soluble in SDS than the amyloid accumulating in the line 85 mice. By contrast to the filter-trap assay, ELISA measurements of A showed that the levels of total human A in 8-month-old 1874 85 mice were similar to that of age-matched line 85 mice (Table 2). However, these ELISA experiments used a three-step sequential extraction and sedimentation protocol. Tissue homogenates were first extracted with PBS, followed by 2% SDS, and finally by 70% formic acid, revealing interesting differences in amyloid solubility at each step. As a fraction of the total human A in each group, more of the accumulated A was soluble in SDS in line 1874 85 mice than in line 85 mice, leaving less A to recover in the final formic acid extraction ( Table 2 0.001, Tukey post-hoc p 0.001; FA: 62% decrease described above) causing a 61% overall decrease in total A40 (334.7 pmol/g (line 85) versus 129.1 pmol/g (line 1874 85), ANOVA: F 3,20 16.93, p 0.001, Tukey post-hoc p 0.002). By contrast, the amount of human A42 did not differ significantly between the two groups. PBS In contrast to the marked decrease in A 40, the amount of total A42 (mouse plus human) was substantially higher in the 8-month-old 1874 85 mice than in their double transgenic siblings ( Fig. 8; 1400 N-terminally Truncated Mouse A Is Not a Major Component of Amyloid Formed in Transgenic Mice-Prior studies of the interaction between BACE1 and APP suggested that the endogenous protease preferentially cleaves rodent APP at the 11 site within A (12,. This cleavage generates predominantly N-terminally truncated A 11-x from moAPP. To determine whether the alterations in amyloid solubility and distribution in the triple transgenic mice correlated with co-deposition of mouse A11-x, we immunoprecipitated A from brain homogenates of both line 85 and 1874 85 mice using an antibody that would detect both full-length and 11-x peptides (4G8). To our surprise, we found no evidence of A11-40/42 in the brains of triple transgenic mice (Fig. 10). In contrast to the intense signal from full-length 1-40/42 peptides, A11-40/42 was undetectable in the immunoprecipitates. Control experiments clearly demonstrated that 4G8 was capable of immunoprecipitating and detecting synthetic A11-x added to homogenates prepared from line 85 mice with high amyloid burden (Fig 10). Moreover, these immunoprecipitations were highly sensitive: as little as 10 ng of exogenous A11-42 could be recovered and detected from 1 mg of mouse brain protein by this method. Thus, we conclude that mouse A11-42 is not a major component of the amyloid deposited in our triple transgenic animals and that factors other than peptide length must influence the solubility and localization of human A in the line 1874 85 animals. FIGURE 9. ELISA analysis of aggregated A in 8-month-old transgenic mice. Brain homogenates were sequentially extracted with PBS, 2% SDS, and FA before each fraction was assayed for human-specific and total (mouse plus human) A40 and A42. Data from this experiment are tabulated in Table 2. A, accumulation of human A40 was significantly reduced in all three fractions by the expression of exogenous mouse APP. Conversely, human A42 levels increased in the SDS-fraction of triple transgenic mice. B, total A levels (mouse plus human) mirror the differences found in human A. SDS-and FA-soluble A40 was significantly lower, whereas SDS-soluble A42 was significantly higher, in line 1874 85 than in line 85. C, overexpression of mouse APP/A significantly decreased the accumulation of both human and total (mouse plus human) A40 summed across all three fractions. The accumulated sum of human A42 is not significantly changed; however, the amount of total (mouse plus human) A42 is substantially higher in the 1874 85 mice, suggesting that mouse A42 may account for the extra peptide. D, the two genotypes harbor statistically indistinguishable amounts of human A (40 plus 42) in each fraction and accumulate nearly identical amounts of total human peptide (PBS plus SDS plus FA). E, total mouse plus human A (40 plus 42) DISCUSSION We set out to test the amyloidogenic potential of mouse A produced in its normal environment within the mouse brain and to understand how the presence of mouse A influences the deposition of human A in transgenic models for Alzheimer disease. Previous studies have shown that full-length mouse A aggregates as readily as the human peptide in vitro and that the two species form co-polymers that are indistinguishable from pure human fibrils. Our findings indicate a more complicated picture when these processes occur in vivo. We show that mouse A on its own does not promote the formation of mature senile plaques as aggressively as the human peptide. We further show that mouse A does not enhance the rate or severity of amyloid formed from human A. Instead, the presence of excess mouse peptide had more subtle effects on human A aggregation. The added mouse A altered the detergent solubility of the human peptides and shifted the A40:42 ratio of the aggregated human peptide; human A40 levels in the triple transgenic 1874 85 mice were 50% lower than that of their age-matched line 85 siblings. Excess mouse A also increased the relative burden of amyloid deposited around cortical blood vessels. Overall, we conclude that the addition of excess mouse APP/A has multiple effects on the solubility, location, and composition of the amyloid deposited in mice producing human A. By itself, we found that overexpression of mouse A did not lead to amyloid formation in moAPP transgenic mice. The lack of deposition could be due to inadequate production of the mouse peptide in our moAPP transgenic line. Transgenic mouse APP was expressed at levels 3-to 4-fold that of endogenous APP, but we had not included mutations that could have augmented overall A production. However, increasing the relative production of A42 by co-expressing PS1dE9 with the mouse APP transgene also failed to produce cored-amyloid deposits. Perhaps the most telling indicator of the minimal ability of mouse A to induce deposition was its lack of effect on senile plaque formation in mice overproducing both mouse and human peptides. Despite its inability to influence the rate or extent of amyloid formation in the moAPP APPswe/PS1dE9 triple transgenic animals, ELISA measurements of total A levels suggest that mouse A was indeed accumulating alongside the human peptide. Although the levels of human A were nearly identical, 8-month-old triple transgenic animals harbored 30% more total A (mouse plus human) than their agematched APPswe/PS1dE9 siblings. The increase in total accumulated A after the onset of amyloid deposition is roughly equivalent to the steady-state overproduction of A in predeposit triple transgenic mice. Thus, mouse A accumulation in these animals is equal to its overproduction, but the accumulation occurs without changing the rate or extent of amyloid burden. Intriguingly, the extra A in the triple transgenic mice is composed almost entirely of SDS-soluble material. The presence of exogenous mouse A unexpectedly influenced the solubility of human A accumulated in the brains of moAPP APPswe/PS1dE9 mice. This effect suggests a close physical interaction between the two peptides. Previous studies comparing amyloid formed in several transgenic mouse models to that from human AD patients had shown that plaques formed in mice from the human peptide were more soluble in detergents than amyloid formed in the human brain. Our data suggest that the higher solubility of amyloid formed in transgenic mice is due in large part to the presence of mouse peptide rather than differences in post-translational modification or local microenvironment. Three amino acids substitutions at positions 5, 10, and 13 may underlie this effect of mouse A. In particular, the His to Arg substitution at position 13 disrupts a metal binding site in the mouse peptide (18 -20, 47), which could influence the structure of the resulting mouse-human co-aggregates. One of the more striking outcomes of co-expressing transgenic mouse APP with the mutant human protein was a dramatic increase in the appearance of vascular amyloid. A similar shift from parenchymal to vascular A deposition has also been noted in several studies that have experimentally manipulated the ratio of A40:42. However, depending on the mouse model studied, both elevated and reduced A40:42 have been associated with increased cerebral amyloid angiopathy. Experimental manipulations such as co-expressing human apoE4 with APPswe, or introducing the E693Q Dutch mutation into APP transgenic mice, both elevated the ratio of A40:42 and caused redistribution of amyloid deposits from the parenchyma to the vasculature. Conversely, several experiments designed to specifically lower the production of A40 relative to A42, such as co-expressing mutant PS1 with APP or expressing an A42-exclusive transgene, are also reported to increase cerebral amyloid angiopathy in transgenic mice. Our current results are most consistent with the hypothesis that lowering A40:42 increases the appearance of vascular amyloid. The triple transgenic animals accumulated considerably more Ax-42 (and considerably less Ax-40) than their double transgenic siblings and developed greater vascular pathology. Whether these two factors were causally related is unclear; it is most likely that the A40:42 ratio is one of several factors that contributed to the increased amyloid angiopathy in our line 1874 85 mice. Despite showing a significant effect on the solubility, composition, and location of human -amyloid, the distribution of the accumulated mouse A is unclear. Immunohistochemical analyses demonstrated that some mouse A co-deposits with FIGURE 10. N-terminally truncated mouse A is not a major component of amyloid aggregates. A, immunoprecipitation of A from brain homogenates of 85 and 1874 85 mice demonstrates the abundance of full-length A, but fails to detect any sign of N-terminally truncated mouse A 11-x. Peptide was immunoprecipitated and detected with purified 4G8, which binds a mid-region epitope common to both mouse and human A. 5 ng of synthetic human A11-42 was run alongside the IP samples as a positive control. B, immunoprecipitation of synthetic A11-42 spiked into NTg or line 85 brain homogenates provides proof that 4G8 is capable of immunoprecipitating the N-terminally truncated peptide when present. 50 ng of added peptide is shown here, however, as little as 10 ng of exogenous A11-42 could be recovered by immunoprecipitation under conditions identical to those shown in panel A. human peptide in cored senile plaques. In addition, the triple transgenic animals have considerably more amyloid in and around cortical blood vessels. The worsening of vascular amyloid is consistent with the overall increase in total A levels in the triple transgenic mice. However, vascular deposits in these animals were reactive with both human-specific and rodent A antibodies, and thus we cannot conclude that mouse A initiates this pathology or concentrates in this location. That the majority of extra A (presumably mouse A) harbored in the triple transgenics is soluble in SDS could mean that this peptide accumulates as diffuse amyloid. Mouse A may also be accumulating in oligomeric structures that could be difficult to detect histologically yet release substantial amounts of peptide into detergent extracts. One motivation behind this study was to resolve a question that arose regarding our recent study of mice that express mutant APP via an inducible promoter. Induction of mutant APP expression for 6 months resulted in robust amyloid pathology that, although arrested by turning off the transgene, was not significantly diminished following long periods of suppression. One issue we could not resolve in these studies was whether mouse A, which was not regulated by the transgene, could provide a source of peptide that would have maintained an equilibrium favoring amyloid stability. The experiments we present here suggest that this was likely not the case: mouse A, even when present in excess, did not appear to promote senile plaque formation on its own or enhance the deposition of human A in these structures. Instead, we find that mouse A would decrease stability of aggregates formed by co-deposition of both peptides. These data make it unlikely that the presence of small amounts of endogenous mouse A would have the ability to sustain amyloid in our inducible APP mice. We therefore conclude that the persistence of amyloid in our APP-inducible mice is due to inherent stability of the human amyloid. Taken together, our studies demonstrate that mouse A produced at 3-to 4-fold wild-type levels does not drive amyloid formation in vivo nor does it accelerate the deposition of human A in mice overproducing both peptides. However, mouse A does accumulate with the human peptide, where it increases the appearance of vascular deposits and alters the overall solubility of resulting amyloid. These effects appear to be mediated by full-length mouse A 1-40/42; A 11-40/42, the predominant BACE product described in vitro, does not accumulate to significant levels in vivo. That the solubility and location of human A aggregates can be influenced by the presence of mouse peptide suggests that a better understanding of the effects of peptide sequence and species-specific processing may provide new insight into disease mechanisms. |
One of the Senate's most liberal members seemed open to passing a scaled back healthcare bill on Wednesday, shrugging off many liberals' concerns.
Sen. Bernie Sanders (I-Vt.) signaled that a less comprehensive bill might be worth passing if it's the only option for passing health reform.
"I think that if what we end up passing may not be as comprehensive as some would like, so what?" Sanders said during an appearance on MSNBC.
"We all know about Massachusetts," Sanders said, in reference to the GOP's special election victory in the traditionally Democratic state last week, which gave Republicans enough votes to sustain a filibuster in the Senate.
That victory forced Democrats to scramble to evaluate their options. Some have advocated a so-called "sidecar" approach that would see the House pass the Senate bill, and then craft a separate bill with fixes, which would pass through reconciliation in the Senate.
That strategy was put in doubt, though, when a flurry of centrist Democratic senators rejected using the budget reconciliation process, which bypasses the 60-vote filibuster rule. |
package hillbillies.common.internal.ui.viewmodel;
import java.util.Set;
import hillbillies.common.internal.map.IByteMap3D;
import hillbillies.common.internal.ui.sprites.AbstractSprite;
import javafx.beans.property.IntegerProperty;
import javafx.beans.property.ReadOnlyDoubleProperty;
public interface IViewModel {
void updateAllInformation();
void update();
/** Max z level (in tile coordinates) */
int getMaxZLevel();
/** Current z level (in tile coordinates) */
IntegerProperty currentZLevelProperty();
/** Current z level (in tile coordinates) */
default int getCurrentZLevel() {
return currentZLevelProperty().get();
}
default void levelUp() {
adjustLevel(+1);
}
default void levelDown() {
adjustLevel(-1);
}
void adjustLevel(int dz);
int worldPointToWorldCube(double worldX);
IByteMap3D getMap();
ReadOnlyDoubleProperty viewWidthProperty();
ReadOnlyDoubleProperty viewHeightProperty();
int getPixelsPerTile();
IntegerProperty xTileOffsetProperty();
IntegerProperty yTileOffsetProperty();
void moveOrigin(double dx, double dy);
int getNbVisibleTilesX();
int getNbVisibleTilesY();
int getLowestVisibleZ(int visibleX, int visibleY);
Set<AbstractSprite<?, ?>> getVisibleSprites();
int screenToVisibleTileX(double x);
int screenToVisibleTileY(double y);
default double visibleTileToScreenX(int visibleX) {
return visibleX * getPixelsPerTile();
}
default double visibleTileToScreenY(int visibleY) {
return visibleY * getPixelsPerTile();
}
double screenToWorldX(double x);
double screenToWorldY(double y);
double screenToWorldZ(double x, double y);
@FunctionalInterface
public static interface NewSpriteListener {
public void newSprite(AbstractSprite<?, ?> sprite);
}
void addNewSpriteListener(NewSpriteListener listener);
@FunctionalInterface
public static interface VisibleTileRefreshListener {
public void refreshVisibleTile(int visibleX, int visibleY, int visibleZ);
}
void addVisibleTileRefreshListener(VisibleTileRefreshListener listener);
}
|
/**
* Contains IPC utility methods and constants
* <p>
* The constants in this class are categorised into KEY and VALUE groups.
* Key -
* <p>
* @author Chathura Sarathchandra
*/
public final class IPCUtil {
/**
* The address of the com.interdigital.force.virtdevservice.VirtDevBR
*/
public final static String VIRTDEV_SERVICE_BR_ADDR = "com.interdigital" +
".force.virtdevservice/com.interdigital.force.virtdevservice.VirtDevBR";
/**
* <p>
* These IPC types should be set per each function based on priority. The
* data structure of availalble IPCs (int[]) of each function is sorted
* based on priority.
*/
public final static int FORCE_ANDROID_IPC_TYPE_LBROADCAST = 1;
public final static int FORCE_ANDROID_IPC_TYPE_AIDL = 2;
public final static int FORCE_ANDROID_IPC_TYPE_MESSENGER = 3;
public final static int FORCE_ANDROID_IPC_TYPE_BINDER = 4;
public final static int FORCE_ANDROID_IPC_TYPE_LBHEAP = 5;
public static final int FORCE_ANDROID_IPC_TYPE_HEAP = 6;
/**
* Name of request method
*/
public final static String METHOD_EXTRA_NAME = "method";
/**
* Values of METHOD_EXTRA_NAME
*/
public final static String IPC_POST = "http.ipc.post";
/******************************************
* Data INTENT - Request/Response
*****************************************/
public final static String IPC_GET = "http.ipc.get";
public final static String IPC_PUT = "http.ipc.put";
public final static String IPC_DELETE = "http.ipc.delete";
public final static String IPC_PATCH = "http.ipc.patch";
public static final String IPC_HEAD = "http.ipc.head";
public static final String IPC_OPTIONS = "http.ipc.options";
public static final String IPC_TRACE = "http.ipc.trace";
public final static String IPC_GET_REPLY = "http.ipc.get_reply";
public final static String HTTP_SERVICE = "com.force.react.toolbox" +
".LocalBroadcastStack";
/**
* Name of request ID intent extra - random string created at runtime.
*/
public final static String REQ_ID_EXTRA_NAME = "reqid";
/**
* Name of the complete URL, of the request
*/
public final static String FUNCURL_EXTRA_NAME = "urlComplete";
/**
* Name of POST request body
*/
public final static String POSTBODY_EXTRA_NAME = "post_body";
/**
* Name of GET request body
*/
public final static String GET_REPLYBODY_EXTRA_NAME = "get_replybody";
/******************************************
* Data INTENT - Request/Response
*****************************************/
/******************************************
* Data INTENT - Request
*****************************************/
/**
* The random request ID assigned for each virtdev requests.
*/
public static final String REQ_ID_MSG_QUERY_SERVICE = "com.force" +
".service.msgattrib.query_reqid";
/******************************************
* Data INTENT - Response
*****************************************/
public static final String REQ_ID_MSG_REGISTER_SERVICE = "com.force" +
".service.msgattrib.register_service_reqid";
/******************************************
* Control INTENT - Request
*****************************************/
public static final String REQ_ID_MSG_UNREGISTER_SERVICE = "com.force" +
".service.msgattrib.unregister_service_reqid";
/******************************************
* Data INTENT - Request
*****************************************/
/**
* The bundle key for function address, used when querying for
* services.
*/
public static final String FUNCTION_ADDRESS_MSG_QUERY_SERVICE = "com.force" +
".service.msgattrib.func.query_function_address";
/******************************************
* Data INTENT - Response
*****************************************/
/**
* The bundle key for function address, used when registering services
*/
public static final String FUNCTION_ADDRESS_MSG_REGISTER_SERVICE = "com" +
".force" +
".service.msgattrib.func.register_service_address";
/**
* The bundle key for function address, used when unregistering services
*/
public static final String FUNCTION_ADDRESS_MSG_UNREGISTER_SERVICE = "com" +
".force" +
".service.msgattrib.func.unregister_service_address";
/**
* The key for the data structure with supported IPCs of the requested
* function.
*/
public static final String IPC_AVAILABILITY_MSG_REGISTER_SERVICE = "com" +
".force" +
".service.msgattrib.func.register_service_availableipcs";
/**
* Command for the service to register, receiving callbacks from
* other service. The message's replyTo field must be a messenger of the
* client where callbacks should be sent.
*/
public static final int MSG_REGISTER_SERVICE = 1;
/**
* Command to unregister a service, to stop receiving
* callbacks from the service. The message's replyTo field must be a
* Messenger of the client as previously given with MSG_REGISTER_SERVICE.
*/
public static final int MSG_UNREGISTER_SERVICE = 2;
/**
* Command to sendMessage services. This message can be sent by any service.
*/
public static final int MSG_QUERY_SERVICE = 3;
/**
* The package name of the FORCE local service. This is used for binding
* to the service
*/
public static final String FORCE_SERVICE_PACKAGE_NAME = "com.interdigital" +
".force" +
".virtdevservice";
/**
* The fully qualified class name of the virtdev service class for sending
* the explicit intent to.
*/
public static final String FORCE_SERVICE_FQCN =
FORCE_SERVICE_PACKAGE_NAME + "" +
".VirtDevService";
/**
* The key for the data structure with supported IPCs of the requested
* function.
*/
public static final String IPC_AVAILABILITY_MSG_QUERY_SERVICE = "com.force" +
".service.msgattrib.func.query_availableipcs";
/**
* Received if the query does not return any services.
*/
public static final int MSG_NO_REGISTERED_SERVICE = 0;
/**
* The name of the virtdev service. Currently only needed for providing
* as input into {@link #isServiceRunning(String, Context)}
*/
public static final String FORCE_SERVICE_NAME = "VirtDevService";
private static final String TAG = IPCUtil.class.getName();
/**
* Log: number of requests
*/
static int nRequests = 0;
/******************************************
* Control INTENT - Request
*****************************************/
/******************************************
* Control INTENT - Response
*****************************************/
// This class cannot be instantiated.
private IPCUtil() {
}
/***************************************************************************
* INTENT
*
* Converted LocalBroadcast IPC packet format
*
* Request Response
* +------------------+ +---------------------------+
* | Function Address | | Function Address -> reqId |
* +------------------+ +---------------------------+
* | reqid | | method |
* +------------------+ +---------------------------+
* | method | | post_body |
* +------------------+ +---------------------------+
* | complete_url |
* +------------------+
* | post_body |
* +------------------+
*
**************************************************************************/
//testing
/**
* Checks if an activity/service is running.
*
* @param serviceClass the class to look up.
* @param queryingContext the context to search from
* @return true if running, otherwise false.
*/
public static boolean isServiceRunning(Class<?> serviceClass, Context
queryingContext) {
try {
ActivityManager manager = (ActivityManager) queryingContext.getSystemService(Context.ACTIVITY_SERVICE);
for (ActivityManager.RunningServiceInfo service : manager.getRunningServices(Integer.MAX_VALUE)) {
if (serviceClass.getName().equals(service.service.getClassName()))
return true;
}
} catch (Exception ex) {
Log.e(queryingContext.getClass().getName(), "Error checking service " +
"status" + ex
.toString());
}
return false;
}
//testing
/**
* Checks if an activity/service is running.
*
* @param serviceClassName name of the class to look up.
* @param queryingContext the context to search from
* @return true if running, otherwise false.
*/
public static boolean isServiceRunning(String serviceClassName, Context
queryingContext) {
try {
ActivityManager manager = (ActivityManager) queryingContext.getSystemService(Context.ACTIVITY_SERVICE);
for (ActivityManager.RunningServiceInfo service : manager.getRunningServices(Integer.MAX_VALUE)) {
// Log.i("IPCUtil", "comparing " + serviceClassName + " with"
// + service.service.getClassName());
if (serviceClassName.equals(service.service.getClassName()))
return true;
}
} catch (Exception ex) {
Log.e(queryingContext.getClass().getName(), "Error checking service " +
"status" + ex
.toString());
}
return false;
}
/**
* Get the instance of LocalBroadcastManager of the local application.
*
* @param TAG The debug TAG of the application class.
* @return LocalBroadcastManager - instance of the local application.
*/
public static LocalBroadcastManager getLocalBroadcastManager(String TAG) {
LocalBroadcastManager broadcastManager = null;
try {
broadcastManager = LocalBroadcastManager.getInstance((Application)
Class.forName
("android.app.AppGlobals")
.getMethod("getInitialApplication")
.invoke(null, (Object[]) null));
} catch (IllegalAccessException e) {
Log.e(TAG, e.getMessage());
e.printStackTrace();
} catch (InvocationTargetException e) {
Log.e(TAG, e.getMessage());
e.printStackTrace();
} catch (NoSuchMethodException e) {
Log.e(TAG, e.getMessage());
e.printStackTrace();
} catch (ClassNotFoundException e) {
Log.e(TAG, e.getMessage());
e.printStackTrace();
}
return broadcastManager;
}
/**
* Get the context of the application that the library imported into.
*
* @return Context - the context of the local application.
*/
public static Context getLocalContext() {
Context context = null;
try {
context = (Application) Class.forName
("android.app.AppGlobals")
.getMethod("getInitialApplication")
.invoke(null, (Object[]) null);
} catch (IllegalAccessException e) {
e.printStackTrace();
} catch (InvocationTargetException e) {
e.printStackTrace();
} catch (NoSuchMethodException e) {
e.printStackTrace();
} catch (ClassNotFoundException e) {
e.printStackTrace();
}
return context;
}
/**
* Choose 130 bits from a cryptographically secure random
* bit generator, and encoding them in base-32.
* <p>
* Complies with:
* RFC 1750: Randomness Recommendations for Security.
* FIPS 140-2, Security Requirements for Cryptographic Modules, section
* 4.9.1 tests
*/
public static String generateReqID() {
return new BigInteger(130, new SecureRandom()).toString(32);
}
/***
* Maps the domain names of web service functions to the domain names of
* IPC functions (reversed internet domain names).
*
* @param domain Domain name to be reversed
* @return Reversed domain name
*/
public static String reverseDomain(final String domain) {
final List<String> components = Arrays.asList(domain.split("\\."));
Collections.reverse(components);
return TextUtils.join(".", components);
}
/**
* Extracts the domain name out from a URL
*
* @param url Complete URL
* @return FQDN extracted from the provided URL
* @throws URISyntaxException
*/
public static String getDomainName(String url) throws URISyntaxException {
URI uri = new URI(url);
String domain = uri.getHost();
return domain.startsWith("www.") ? domain.substring(4) : domain;
}
/**
* TODO: check if this is a redundant method
* Reply to LocalBroadcastMessage at the Function
*
* @param broadcastManager the corresponding instance of
* LocalBroadcastManager
* @param reqID the reqid field of the request - copied
* @param messageBody the message body, if any
*/
public static void replyLBroadcastMessage(LocalBroadcastManager
broadcastManager, String reqID,
byte[] messageBody) {
//Uses the request ID as the action, instead of the destination address.
Intent iVidIntent = new Intent(reqID);
//iVidIntent.putExtra(IPCUtil.REQ_ID_EXTRA_NAME, reqID);
iVidIntent.putExtra(IPCUtil.METHOD_EXTRA_NAME,
IPCUtil.IPC_GET_REPLY);
iVidIntent.putExtra(IPCUtil.GET_REPLYBODY_EXTRA_NAME,
messageBody);
broadcastManager.sendBroadcast(iVidIntent);
}
/**
* Broadcast Http request as a LocalBroadcast message.
*
* @param localBroadcastManager the corresponding instance of
* LocalBroadcastManager
* @param functionName The name of the function
* @param ipcUrl The address of the function
* @param method Message method
* @param messageBody Message body
* @param reqID Randomly generated request ID.
*/
public static void broadcastRequest(LocalBroadcastManager
localBroadcastManager,
String functionName, String ipcUrl,
String method,
byte[] messageBody,
String reqID) {
Intent iVidIntent = new Intent(IPCUtil.reverseDomain(functionName));
iVidIntent.putExtra(IPCUtil.REQ_ID_EXTRA_NAME, reqID);
iVidIntent.putExtra(IPCUtil.METHOD_EXTRA_NAME, method);
iVidIntent.putExtra(IPCUtil.FUNCURL_EXTRA_NAME, ipcUrl);
iVidIntent.putExtra(IPCUtil.POSTBODY_EXTRA_NAME, messageBody);
//TODO: use OrderedBroadcast here to limit the delivery to one receiver.
localBroadcastManager.sendBroadcast(iVidIntent);
}
/***************************************************************************
* INTENT
**************************************************************************/
/**
* Convert byte lenths/counts/sizes into a human readable format
*
* @param bytes byte number
* @param si true if SI unit, false if binary units
* @return formateed human readable count as a {@link String}
*/
public static String humanReadableByteCount(long bytes, boolean si) {
int unit = si ? 1000 : 1024;
if (bytes < unit) return bytes + " B";
int exp = (int) (Math.log(bytes) / Math.log(unit));
String pre = (si ? "kMGTPE" : "KMGTPE").charAt(exp - 1) + (si ? "" : "i");
return String.format("%.1f %sB", bytes / Math.pow(unit, exp), pre);
}
/**
* Send virtDev message to a function
*
* @param reqID Request ID.
* @param functionName Name of the function
* @param request {@link Request} object
* @param ipcmode IPC request method, e.g., IPCUtil.IPC_GET
* @param localBroadcastManager {@link LocalBroadcastManager} of the
* application.
* @param properties {@link IPCUtil.Properties} object for
* storing the response and keeping track
* of the progress.
*/
@RequiresApi(api = Build.VERSION_CODES.HONEYCOMB)
public static void sendMessage(String reqID, String functionName, Request<?>
request, String ipcmode, LocalBroadcastManager
localBroadcastManager, IPCUtil.Properties properties) throws AuthFailureError {
HelperVirtDevService.registerBReceiver(reqID,
localBroadcastManager, properties);
IPCUtil.broadcastRequest(localBroadcastManager,
functionName, request.getUrl(),
ipcmode, request.getPostBody(), reqID);
Log.i(TAG, "Request timeout " + request.getTimeoutMs());
try {
waitForResponse(request
.getTimeoutMs(), properties);
} catch (ExecutionException e) {
Log.e(TAG, e.toString());
e.printStackTrace();
} catch (InterruptedException e) {
Log.e(TAG, e.toString());
e.printStackTrace();
}
// log the current request number and the amount of memory blocks
// created
// log(new SimpleDateFormat("dd/MM/yy-HH:mm:ss").format(new Date
// (System.currentTimeMillis())) + ", " + nRequests++ + ", " +
// HeapFactory.getInstance().getNBlocks());
}
/**
* Log
*
* @param text text to be logged
*/
public static void log(String text) {
File log = new File("sdcard/log.txt");
// Log.i(TAG, "Log: File exists == " + log.exists());
try {
if (!log.exists()) log.createNewFile();
BufferedWriter writer = new BufferedWriter(new FileWriter(log, true));
writer.append(text);
writer.newLine();
writer.close();
} catch (IOException e) {
Log.e(TAG, e.toString());
e.printStackTrace();
}
}
/**
* Log
*
* @param text text to be logged
*/
public static void log(String text, File log) {
// File log = new File("sdcard/log.txt");
// Log.i(TAG, "Log: File exists == " + log.exists());
try {
if (!log.exists()) log.createNewFile();
BufferedWriter writer = new BufferedWriter(new FileWriter(log, true));
writer.append(text);
writer.newLine();
writer.close();
} catch (IOException e) {
Log.e(TAG, e.toString());
e.printStackTrace();
}
}
/**
* Waits until sendMessage response is received
*
* @param waitForResponse timeout
*/
@RequiresApi(api = Build.VERSION_CODES.HONEYCOMB)
public static void waitForResponse(final long waitForResponse,
final IPCUtil.Properties properties)
throws
ExecutionException, InterruptedException {
new AsyncTask<Void, Void, Void>() {
@Override
protected Void doInBackground(Void... voids) {
// Wait 5s for the response
// TODO: This blocks the thread AsyncTask?
long cTime = System.currentTimeMillis();
long endTime = cTime + waitForResponse;
Log.i(TAG, "Waiting for response");
while (!properties.isRequestResponseReceived() && System
.currentTimeMillis() <
endTime) {
}
// the following throws errors in case of null
// responses
Log.i(TAG, "Timed out Waiting for response = " +
!properties.isRequestResponseReceived());
return null;
}
}.executeOnExecutor(AsyncTask.THREAD_POOL_EXECUTOR).get();
}
/**
* Used for storing related objects, and keeping states of one
* virtDevService connectivity.
*/
public static final class Properties {
/**
* Generic
*/
public volatile boolean binderSet = false;
private IBinder serviceBinder = null;
private ServiceConnection serviceConnection = null;
private Messenger replyToMessenger = null;
/**
* Function registration
*/
private boolean registered = false;
private boolean registeredLocally = false;
private boolean registeredRemotely = true;
/**
* Requests
*/
private byte[] requestResponse = null;
private boolean requestResponseReceived = false;
/**
* Lookup
*/
private int[] lookupResponse = null;
/**
* Setting this to true if a local function exists. Defaulting to
* offloading, and setting to false when the function is available
* locally.
*/
private boolean bypassFuncLocal = true;
/**
* The class object of the local function is set, if it exists locally.
*/
private VolleyIPCFunction functionInterface = null;
/**
* Not possible to assign values during instantiation.
*/
public Properties() {
}
/**
* Get stored {@link ServiceConnection} object.
*/
public ServiceConnection getServiceConnection() {
return serviceConnection;
}
/**
* Store a {@link ServiceConnection} object
*/
public void setServiceConnection(ServiceConnection connection) {
this.serviceConnection = connection;
}
/**
* Get stored {@link IBinder} object
*/
public IBinder getServiceBinder() {
return serviceBinder;
}
/**
* Store an {@link IBinder} object
*/
public void setServiceBinder(IBinder binder) {
this.serviceBinder = binder;
binderSet = true;
}
/**
* Get stored replyTO {@link Messenger} object
*/
public Messenger getReplyToMessenger() {
return replyToMessenger;
}
/**
* Store replyTO {@link Messenger} object
*/
public void setReplyToMessenger(Messenger replyToMessenger) {
this.replyToMessenger = replyToMessenger;
}
/**
* Get stored IPC message respnose
*
* @return response as a byte[]
*/
public byte[] getRequestResponse() {
return requestResponse;
}
/**
* Store an IPC response of type byte[]
*/
public void setRequestResponse(byte[] requestResponse) {
this.requestResponse = requestResponse;
}
/**
* For checking if a reponse has been received from a function over IPC.
*
* @return true, if a response has received, false otherwise.
*/
public boolean isRequestResponseReceived() {
return requestResponseReceived;
}
/**
* Flag set to indicate if an IPC response has received.
*
* @param responseReceived true, if response received, false otherwise.
*/
public void setRequestResponseReceived(boolean responseReceived) {
this.requestResponseReceived = responseReceived;
}
/**
* Generic method for checking whether if the function is either
* registered with the local catalogue or the remote catalogue, or
* both.
*/
public boolean isRegistered() {
return registered;
}
/**
* Check if the function is registered with the local catalogue.
*/
public boolean isRegisteredLocally() {
return registeredLocally;
}
/**
* Set if registered locally
*
* @param registeredLocally
*/
public void setRegisteredLocally(boolean registeredLocally) {
if (registeredLocally) registered = true;
else if (!registeredLocally && !registeredRemotely) registered =
false;
this.registeredLocally = registeredLocally;
}
/**
* Check if the function is registered with the remote catalogue.
*/
public boolean isRegisteredRemotely() {
return registeredRemotely;
}
/**
* Set if registered remotely
*
* @param registeredRemotely
*/
public void setRegisteredRemotely(boolean registeredRemotely) {
if (registeredRemotely) registered = true;
else if (!registeredRemotely && !registeredLocally) registered =
false;
this.registeredRemotely = registeredRemotely;
}
/**
* Get stored lookup response
*/
public int[] getLookupResponse() {
return lookupResponse;
}
/**
* Store received lookup response
*
* @param lookupResponse
*/
public void setLookupResponse(int[] lookupResponse) {
this.lookupResponse = lookupResponse;
}
/**
* Indicates if to bypass indirecting to locally residing functions
* over IPC, or to offload the request to the network.
*
* @return true, if offloading, false otherwise.
*/
public boolean isBypassFuncLocal() {
return bypassFuncLocal;
}
/**
* Set flag to indicate, whether to bypass indirecting to locally residing functions
* over IPC, or to offload the request to the network.
*
* @param funcLocal true, if offloading, false otherwise.
*/
public void setBypassFuncLocal(boolean funcLocal) {
this.bypassFuncLocal = funcLocal;
}
public void setFunctionInterface(VolleyIPCFunction functionClass) {
this.functionInterface = functionClass;
}
public VolleyIPCFunction getFunctionInterface() {
return functionInterface;
}
}
} |
<gh_stars>10-100
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import org.junit.jupiter.api.Test;
class BeforeAfterPrimesTest {
@Test
void sample() {
assertArrayEquals(new long[]{89, 101}, BeforeAfterPrimes.primeBefAft(97));
assertArrayEquals(new long[]{97, 101}, BeforeAfterPrimes.primeBefAft(100));
assertArrayEquals(new long[]{97, 103}, BeforeAfterPrimes.primeBefAft(101));
assertArrayEquals(new long[]{113, 127}, BeforeAfterPrimes.primeBefAft(120));
assertArrayEquals(new long[]{127, 131}, BeforeAfterPrimes.primeBefAft(130));
}
}
|
/**
* REST controller for managing BuildState.
*/
@RestController
@RequestMapping("/api/account/")
public class AccountRestController {
private final Logger log = LoggerFactory.getLogger(AccountRestController.class);
private final AccountProcessor accountProcessor;
@Autowired
public AccountRestController(AccountProcessor accountProcessor) {
this.accountProcessor = accountProcessor;
}
@GetMapping(path = "/", produces = MediaType.APPLICATION_JSON_VALUE)
public Flux<AccountDTO> getLoggedinUserAccountInfo() {
log.info("Get account info for logged in user");
Account account = accountProcessor.loadLoggedInUserAccountInfo();
return Flux.just(toAccountDTO(account));
}
@Secured("ROLE_ADMIN")
@GetMapping(path = "/{accountId}", produces = MediaType.APPLICATION_JSON_VALUE)
public Flux<AccountDTO> getAccountInfo(@PathVariable long accountId) {
log.info("Get account info for {}", accountId);
Account account = accountProcessor.loadAccountInfo(accountId);
return Flux.just(toAccountDTO(account));
}
private AccountDTO toAccountDTO(Account account) {
return AccountDTO.builder()
.accountId(account.getAccountId())
.balanceInUSD(account.getBalanceInUSD())
.build();
}
@Secured("ROLE_ADMIN")
@GetMapping(path = "/{accountId}/deposit/{amountInUSD}", produces = MediaType.APPLICATION_JSON_VALUE)
public BigDecimal doDeposit(@PathVariable long accountId, @PathVariable BigDecimal amountInUSD) {
return accountProcessor.deposit(accountId, amountInUSD);
}
@Secured("ROLE_ADMIN")
@GetMapping(path = "/{accountId}/withdrawal/{amountInUSD}", produces = MediaType.APPLICATION_JSON_VALUE)
public BigDecimal doWithdrawal(@PathVariable long accountId, @PathVariable BigDecimal amountInUSD) {
return accountProcessor.withdrawal(accountId, amountInUSD);
}
} |
package com.yanwu.spring.cloud.gateway.config;
import lombok.Getter;
import java.util.HashSet;
import java.util.Set;
/**
* @author <a herf="mailto:<EMAIL>">XuBaofeng</a>
* @date 2020/5/8 15:55.
* <p>
* description:
*/
public class AuthConfig {
/*** 接口白名单 */
@Getter
private static Set<String> passOperations;
static {
/* ----------------------------- 不登录调用接口 -----------------------------*/
passOperations = new HashSet<>();
passOperations.add("/base/webapp/login/login");
}
}
|
<reponame>karoldavid/room-occupancy-manager
import { put, takeLatest } from 'redux-saga/effects';
import * as slice from '../slice';
import { occupancySaga, getGuests } from '../saga';
import { MockResponse } from '../mock/api';
import data from '../mock/guests.json';
import { GuestsErrorType } from '../types';
describe('getGuests Saga', () => {
const response: MockResponse = {
data,
};
let getGuestsIterator: ReturnType<typeof getGuests>;
beforeEach(() => {
getGuestsIterator = getGuests();
const selectDescriptor = getGuestsIterator.next(response).value;
expect(selectDescriptor).toMatchSnapshot();
});
it('should dispatch the guestsLoaded action if it requests the data successfully', () => {
const putDescriptor = getGuestsIterator.next(response).value;
expect(putDescriptor).toEqual(
put(slice.actions.guestsLoaded(response.data)),
);
});
it('should dispatch the response error', () => {
const requestDescriptor = getGuestsIterator.next(response).value;
expect(requestDescriptor).toMatchSnapshot();
const putDescriptor = getGuestsIterator.throw(new Error('some error'))
.value;
expect(putDescriptor).toEqual(
put(slice.actions.guestsError(GuestsErrorType.RESPONSE_ERROR)),
);
});
});
describe('occupancySaga Saga', () => {
const guestsIterator = occupancySaga();
it('should start task to watch for loadGuests action', () => {
const takeLatestDescriptor = guestsIterator.next().value;
expect(takeLatestDescriptor).toEqual(
takeLatest(slice.actions.loadGuests.type, getGuests),
);
});
});
|
// Code by Desh Iyer
class DetectStringArgException extends Exception {
public DetectStringArgException(String message) {
super(message);
}
} |
/** Process mouse-pressed events to assist mouse motion in zooming */
public void mousePressed(MouseEvent e) {
if (this == null || datasetGraphics == null) return;
this.requestFocus();
zoomStart.x = e.getX();
zoomStart.y = e.getY();
zoomEnd.x = e.getX();
zoomEnd.y = e.getY();
datasetGraphics.setXORMode(this.getBackground());
datasetGraphics.drawRect(zoomStart.x, zoomStart.y, 0, 0);
datasetGraphics.setPaintMode();
} |
Finite key analysis in quantum cryptography In view of experimental realization of quantum key distribution schemes, the study of their efficiency becomes as important as the proof of their security. The latter is the subject of most of the theoretical work about quantum key distribution, and many important results such as the proof of unconditional security have been obtained. The efficiency and also the robustness of quantum key distribution protocols against noise can be measured by figures of merit such as the secret key rate (the fraction of input signals that make it into the key) and the threshold quantum bit error rate (the maximal error rate such that one can still create a secret key). It is important to determine these quantities because they tell us whether a certain quantum key distribution scheme can be used at all in a given situation and if so, how many secret key bits it can generate in a given time. However, these figures of merit are usually derived under the "infinite key limit" assumption, that is, one assumes that an infinite number of quantum states are send and that all sub-protocols of the scheme (in particular privacy amplification) are carried out on these infinitely large blocks. Such an assumption usually eases the analysis, but also leads to (potentially) too optimistic values for the quantities in question. In this thesis, we are explicitly avoiding the infinite key limit for the analysis of the privacy amplification step, which plays the most important role in a quantum key distribution scheme. We still assume that an optimal error correction code is applied and we do not take into account any statistical errors that might occur in the parameter estimation step. Renner and coworkers derived an explicit formula for the obtainable key rate in terms of Renyi entropies of the quantum states describing Alice's, Bob's, and Eve's systems. This results serves as a starting point for our analysis, and we derive an algorithm that efficiently computes the obtainable key rate for any finite number of input signals, without making any approximations. As an application, we investigate the so-called "Tomographic Protocol", which is based on the Six-State Protocol and where Alice and Bob can obtain the additional information which quantum state they share after the distribution step of the protocol. We calculate the obtainable secret key rate under the assumption that the eavesdropper only conducts collective attacks and give a detailed analysis of the dependence of the key rate on various parameters: The number of input signals (the block size), the error rate in the sifted key (the QBER), and the security parameter. Furthermore, we study the influence of multi-photon events which naturally occur in a realistic implementation. |
/**
* Helper class for Junit report.
*
* @since 3.1
*/
class Statuses {
/**
* Number of errors.
*/
private int errors;
/**
* Number of failures.
*/
private int failures;
/**
* Number of skipped tests.
*/
private int skipped;
/**
* Number of tests.
*/
private int tests;
/**
* Time.
*/
private double time;
/**
* Default constructor.
*/
public Statuses() {
super();
}
/**
* @return the errors
*/
public int getErrors() {
return errors;
}
/**
* @param errors the errors to set
*/
public void setErrors(int errors) {
this.errors = errors;
}
/**
* @return the failures
*/
public int getFailures() {
return failures;
}
/**
* @param failures the failures to set
*/
public void setFailures(int failures) {
this.failures = failures;
}
/**
* @return the skipped
*/
public int getSkipped() {
return skipped;
}
/**
* @param skipped the skipped to set
*/
public void setSkipped(int skipped) {
this.skipped = skipped;
}
/**
* @return the tests
*/
public int getTests() {
return tests;
}
/**
* @param tests the tests to set
*/
public void setTests(int tests) {
this.tests = tests;
}
/**
* @return the time
*/
public double getTime() {
return time;
}
/**
* @param time the time to set
*/
public void setTime(double time) {
this.time = time;
}
} |
<reponame>jdjxjnsn/besso
from sqlalchemy import (
BigInteger,
Boolean,
Column,
LargeBinary,
Numeric,
String,
Integer,
UnicodeText,
)
from utilities import utilities
from Db import SESSION, Base
import os
class DevUser(Base):
__tablename__ = "dev_user"
id = Column(Integer, primary_key=True, autoincrement=True)
user_id = Column(Numeric)
def __init__(self, id, user_id):
self.user_id = user_id
self.id = id
DevUser.__table__.create(checkfirst=True)
def getDevUser(from_id):
try:
devUser = SESSION.query(DevUser).filter(DevUser.user_id == from_id).one()
SESSION.close()
return devUser
except:
return None
def getDevsUsers():
try:
return SESSION.query(DevUser).all()
except:
return None
finally:
SESSION.close()
def addDevUser(user_id):
addUser = SESSION.query(DevUser).filter(DevUser.user_id == user_id).first()
if addUser:
raise Exception("The user already added as Dev.")
else:
addUser = DevUser(None, user_id)
SESSION.add(addUser)
SESSION.commit()
def remDevUser(user_id):
reUser = SESSION.query(DevUser).filter(DevUser.user_id == user_id).first()
if reUser:
SESSION.delete(reUser)
SESSION.commit()
else:
raise Exception("No user to remove.")
|
A millionaire and art collector from New Mexico named Forrest Fenn has created quite the game to get people off the couch and out in the world all while building human connections.
Scroll down for videos
And it involves gold, lots of gold.
Fenn, 84, has hidden an estimated $2 million worth of treasure in a chest in an unknown location somewhere in the Rocky Mountains.
Fenn revealed in some interviews that the hidden treasure contains “mostly American eagles and double eagles, hundreds of gold nuggets, some as large as chicken eggs, ancient Chinese carved jade figures, pre-Columbian gold animal artifacts, lots of rubies, emeralds, sapphires, and diamonds and other things.
The former fighter pilot came up with the idea of a treasure hunt when he was diagnosed with terminal kidney cancer in 1988.
In order to find the hidden loot, buried 5 years ago in a secret location, one must read Fenn’s book The Thrill Of The Chase.
The book contains a poem with exactly nine clues to help you get started. It reads,
“As I have gone alone in there
And with my treasures bold,
I can keep my secret where,
And hint of riches new and old. Begin it where warm waters halt
And take it in the canyon down,
Not far, but too far to walk.
Put in below the home of Brown. From there it’s no place for the meek,
The end is ever drawing nigh;
There’ll be no paddle up your creek,
Just heavy loads and water high. If you’ve been wise and found the blaze,
Look quickly down, your quest to cease,
But tarry scant with marvel gaze,
Just take the chest and go in peace. So why is it that I must go
And leave my trove for all to seek?
The answers I already know,
I’ve done it tired, and now I’m weak. So hear me all and listen good,
Your effort will be worth the cold.
If you are brave and in the wood
I give you title to the gold.”
The rest? It’s up to you.
Fenn once said that some people have come as close as 200 feet to the treasure, but no one has found it yet. Well, it’s not entirely impossible for you to go even closer, so start hunting.
Watch the videos below
More stories about billionaires below |
When you first start blogging, it can be easy to get caught up in the newness of it. That’s when mistakes are likely to happen. Here are 10 things to avoid as a new blogger.
1) Your posts are too hard to understand.
Even the most sophisticated topics can be broken down and simplified. Your goal as a blogger is to work toward effortless reading.
Each sentence should flow consecutively with no confusion or interruption. Blogging is not a chance to show off your high vocabulary skills — save that for Scrabble!
If it’s inevitable you use unfamiliar-sounding terms, try using pictures. We’re visual creatures. Sometimes we have to see photos, charts, graphs, etc. to understand concepts.
If you’re not graphic-savvy, check out “14 Design Tools We Love for Blogging.” My personal favorite: Canva.
2) You don’t spend enough time editing.
Not taking the time to edit is the most obvious, but overlooked mistake a blogger can make. Grammar mistakes and typos often contribute to the lack of fluidity within blog posts.
Don’t let laziness ruin your content. Spell check and reread your post several times. If it helps, have someone else read it before you press that publish button. As a writer, poor grammar and editing hinders your credibility.
3) Your posts are too hard to read visually.
Sometimes you can write the most amazing content, but your poor design choices turn readers off.
4) Your images need help.
Try to always use high-resolution images. Not only are they easy on the eye, but they provide a sense of professionalism. The more credible your blog looks, the more readers will trust reading your blog. On top of image quality, make sure you are properly resizing your images. Do not distort them.
Need multimedia for your blog post? Learn how to use PR Newswire for Journalists’ multimedia gallery to find and download thousands of photos and video.
5) You’re using auto-start music players.
Nothing is worse than stumbling upon a website, only to hear annoying music once the page loads. What was once popular during the Myspace age has gone extinct.
In addition, music players can make your page lag, dissuading readers from continuing onto your site. Get rid of that auto-start music player, and if necessary embed links from sites like SoundCloud or YouTube that allow readers to play the media at their leisure.
6) Your blog is too egocentric.
Yes, your blog should show your personality and share your experiences – but only if it will benefit the reader. Your blog is not a reality TV show. The harsh reality is that unless you’re either a high-profile celebrity, unbelievably entertaining, or someone willing to publicize their whole life with no inhibition, no one is going to care about your personal life but your close friends. Use your experiences only to enhance your message.
The majority of your readers do not want to know anything about you unless it benefits them in some sort of way. Remember your blog should be more about your reader! Try acknowledging other bloggers as well. Take the time to cater to your readers here and there by hosting giveaways, conducting surveys and interviews, thanking them, commenting back, etc.
7) There’s too much quantity, not enough quality.
Don’t oversaturate your readers with pointless blogs posts, especially for SEO purposes. Sure you’re on a schedule, but the worst thing you can do to a reader is waste their time reading mush. Plain and simple.
8) You’re not paying attention to your readership.
Know who you’re writing for and understand your audience may even change. Always pay attention to who is reading your blog, who’s commenting, etc. For example, you started your blog with a general audience in mind, but may realize more readers are being attracted to specific topics. You can then perhaps focus on those topics more.
9) Your headlines are boring.
Headlines are the first thing readers see across social media platforms. Make it count! Headlines equal first impressions of your content. Draw your readers in by catching their interest. Use creative titles, but at the same time make it clear what you’re offering.
Struggle with headline writing? Here are 9 things not to do.
10) Don’t expect people to visit.
So your first few blog posts were a hit – great! Now what? A common mistake new bloggers make is expecting their readers to be regular visitors.
The second half of blogging entails promoting your blog. Readers all over the internet and social media are lazy. They hate putting in the effort to visit websites – it has to stumble upon them instead.
Publish your blog across different social media platforms – Twitter, Facebook, Pinterest, etc. Without findability, most people won’t know you’ve written a new post.
Spread the word about your blog wherever you can, but at the same time try not to over promote it in an annoying way. It’s also great to promote your blog within your targeted networks via groups on social media, forums, blogging events, etc.
Learn more about strategies and tools for promoting your blog in part 2 of our #BlogTips webinar series.
Register at http://bit.ly/1xO8JJw and live tweet with us that day using #blogtips.
Editing is my number one problem! I recently posted an article and spelled Twitter wrong in the title! Took me 3 days to realize why it wasn’t getting many hits. Excellent tips!
Haha, that’s funny! Typos are my vice, and thanks!
Thank you. Some very helpful advice here. |
Casual workers paying for 'junk' superannuation life insurance they don't know they have
Updated
There is a great insurance rip-off in Australians' superannuation funds that most people do not even know is happening, and it discriminates against casual workers.
Key points: Most workers have life and disability insurance through their super fund
Casual workers have to pass a tough "activities of daily living test" to be paid out for disability
One experienced lawyer in the sector said he has never seen a casual meet the test
Around a quarter of the Australian workforce is casual, and 40 per cent of those casuals are under the age of 25. Most casual workers are found in reasonably low-skilled occupations.
According to research by Federal Parliament, 41 per cent of casual workers are either labourers or sales people. Only 13 per cent are employed as managers and professionals.
All casuals earning more than $450 a week from a single employer are in a superannuation fund.
Most are in default "MySuper" funds, which offer life insurance on an 'opt out' basis.
Many would not even realise they have insurance, which they are paying for out of their super fund, and those who do would probably think they are protected if they have an accident at work.
They could be very wrong if the people the ABC has been speaking to are any guide.
Banks profit from blanket denial of casuals' claims
Little more than a week after the bosses of the big banks told a parliamentary inquiry they were cleaning up their act, there is more evidence of ordinary Australians being ripped off in the name of higher profits.
The superannuation industry, of which the banks are a major part, is generating billions of dollars from disability insurance on which casual workers have virtually no hope of making a successful claim.
Sydney based Eva Thorley is an example.
"I worked from 7am till 3:30 in the afternoon, overtime was included if there was any. You know I worked for this company for like seven years. As a casual but working permanent hours," explained Ms Thorley.
She worked full-time as a furniture removalist until a 60-kilogram safe fell on her foot.
Ms Thorley suffered a severe injury and when she tried to claim on her total and permanent disability insurance policy with CommInsure, it was denied because she was employed as a casual.
"I can't help that I was casual. The company. That was their policy. They didn't want permanent staff. They only wanted casuals," she said.
Meanwhile, family circumstances forced Launceston truck driver Darren Woodward to switch from permanent to casual employment.
On his first day as a casual, doing the same job with the same company the following week, he fell off a truck and will never work again.
His total and permanent disability claim was also denied, by MLC.
"I thought it was a bit of a joke actually," recalled Mr Woodward.
"I thought someone was taking the mickey out of me. Because when you're insured I thought you were insured."
The cases of Darren Woodward and Eva Thorley are far from unique according to solicitor Carl Mickels from Firths Lawyers in Sydney.
He said it is discrimination against people employed as casuals who pay the same premiums and have the same risks as permanent staff.
"I find it appalling that this could occur when the person hasn't changed his occupational status. All they've done is change their hours from full-time to casual," he said in reference to Darren Woodward's case.
'Junk insurance' for casuals
What it means is you can have two people sitting side by side, doing the same job and working the same hours.
They both slip on the same banana skin and suffer the same career ending injuries.
But because one person is permanent and the other is casual, they are treated very differently by their insurer.
"There's no rhyme or reason why you would treat casuals differently from permanents," said Mr Mickels.
The way insurance companies discriminate against casuals is to apply a much tougher claims threshold.
Permanent employees get measured against their ability to perform either their own occupation, or any other job they may be qualified for.
Casuals are measured against what is known as the "activities of daily living test".
Unless casuals need help doing the most basics of life - things such as going to the toilet, washing, or getting out of bed - the claim is denied.
A worker has to be virtually a quadriplegic to pass.
"In my experience, I can't recall ever satisfying them over probably 10,000 clients I have dealt with," explained Mr Mickels, who has been working in this area for nearly two decades.
"It's an impossibly high bar. It essentially makes the insurance junk insurance. Nobody can collect it."
But the insurers are collecting, about $6 billion a year from policies offered automatically with superannuation funds.
"It's totally unethical and it denies any duty of care on the part of the insurance company to provide a person with the insurance that the person they're providing the insurance to thinks they have," said Professor Thomas Clarke, who is the head of the Centre for Corporate Governance at the University of Technology in Sydney.
There are 28 million superannuation accounts in Australia.
Around 15 million, or 53 per cent, are default funds, known as MySuper, which are chosen for workers by their employers.
My Super funds offer life insurance on an 'opt out' basis - and the vast majority of people do not opt out.
"It was done through the company I was working for. They organised it all. Me and the other workers that I worked with had nothing to do with it," said Ava Thorley about her situation.
According to the corporate regulator, ASIC, 16 per cent of total and permanent disability insurance claims are knocked back - the highest rejection rate of any form of life insurance.
One company, which ASIC will not name, rejected 37 per cent of claims - or more than one-in-three.
Danger for growing casual workforce
The legislation covering insurance in super says trustees of super funds must act in the "best interests" of the beneficiaries of that insurance.
But a 2014 letter from Queensland based LGIA Super, defending a claim from a casual worker it rejected, seems to sum up the industry's attitude.
It said, "the trustees are not obligated to provide insured benefits to all members on the same basis".
"The legislation specifically allows offering insured benefits of different kinds and levels," warned Mr Mickels.
"You need to be aware that you cannot trust your financial livelihood through your super if the trustees don't try to do the right thing by you."
That raises the issue of the genuine independence of some super fund trustees.
Eva Thorley's insurance was with CommInsure. Her super was with Colonial First State. Both are owned by Commonwealth Bank.
Darren Woodward's insurance was with the National Australia Bank-owned MLC and his super is an MLC fund.
"These conglomerate structures with separate divisions tend to pursue a corporate interest ultimately as their ultimate logic and the clients' interests are forgotten," said Professor Clarke.
Which, for Professor Clarke, is another danger signal for millions of people in a workforce which is now nearly 25 per cent casual.
"Here we have two individuals who have not had their insurance paid out for what are very unethical reasons," he said.
"But the worry would be that as casualization continues and expands to a larger and larger section of the population, not only are they losing their employment rights, they'll be losing their insurance status too."
It is cold comfort for the many casuals at the younger and most vulnerable end of the workforce and for older people too, including truck driver, Darren Woodward, who has learned the hard way that his total and permanent disability insurance was useless.
"You think you have every base covered in case the worst happens, but when it does happen you find out that the people you put your future in - all they want to do is knife you in the back and pull the rug out from underneath you."
Topics: superannuation, insurance, consumer-finance, consumer-protection, banking, regulation, business-economics-and-finance, australia
First posted |
Lipid rescue for bupivacaine toxicity during cardiovascular procedures Bupivacaine toxicity is a recognized complication of procedures done under local anesthetic infiltration. While local anesthetic toxicity is rare, it is potentially catastrophic and life-threatening.1 A 20% lipid emulsion has been used to resuscitate patients after bupivacaine overdose or inadvertent intravascular injection.27 While the use of lipid emulsion for local anesthetic toxicity has been reported extensively in the anesthesia literature,8 it has not yet been reported in the cardiology literature. We report a case of local anesthetic toxicity resulting in pulseless electrical activity during an electrophysiology procedure that was successfully treated by infusion of 20% lipid emulsion. Introduction Bupivacaine toxicity is a recognized complication of procedures done under local anesthetic infiltration. While local anesthetic toxicity is rare, it is potentially catastrophic and life-threatening. 1 A 20% lipid emulsion has been used to resuscitate patients after bupivacaine overdose or inadvertent intravascular injection. While the use of lipid emulsion for local anesthetic toxicity has been reported extensively in the anesthesia literature, 8 it has not yet been reported in the cardiology literature. We report a case of local anesthetic toxicity resulting in pulseless electrical activity during an electrophysiology procedure that was successfully treated by infusion of 20% lipid emulsion. Case Report A 28-year old male from El Salvador (1.58 m, 55.8 kg) with no significant past medical history presented to a community hospital with a 3-month history of worsening shortness of breath, dyspnea on exertion, fatigue, nausea and vomiting. The patient denied chest pain, fevers, chills, recent febrile illness, sick contacts or palpitations. The patient was found to be in sinus rhythm with complete heart block (3rd degree AV block) and a junctional escape rhythm of 30 bpm. He was then transferred to our institution where he underwent emergent placement of a right internal jugular transvenous pacing wire for temporary pacing. Further evaluation showed the patient to have a nonischemic cardiomyopathy with an echocardiogram demonstrating a left ventricular ejection fraction of 12% with severe global hypokinesis. The patient was scheduled for a biventricular implantable cardioverter defibrillator (ICD) implant. In the electrophysiology suite, the patient had standard monitors placed and oxygen was delivered at 3 L/min via nasal cannula. Intravenous cefazolin 1 g was given as prophylaxis, with fentanyl 12.5 mg and midazolam 2 mg administered intravenously. During this period the patient was comfortable, awake and able to communicate. During the procedure, the patient received a total of 50 cc of local anesthetic injection comprised of a mixture of 2% lidocaine and 0.5% bupivacaine to the left subpectoral region to facilitate left axillary venous access and construction of a subpectoral pocket for the device. The heart rate and blood pressure remained stable during injection of local anesthetic. Soon after obtaining axillary venous access, the patient complained of dizziness but with no changes in blood pressure, heart rate or pulse oximetry. Several minutes later he exhibited generalized seizure activity with severe tonic-clonic activity. Initially, midazolam 1 mg IV and then lorazepam 2 mg IV was used to treat the seizure while a bag valve mask was used to support ventilation. The patient remained hemodynamically stable during this episode. Urgent anesthesiology consultation was requested and the patient was intubated without complication. Subsequently, the patient was noted to have pulseless electrical activity and cardiopulmonary resuscitation was immediately started. Advanced cardiac life support protocol was initiated during which the patient received a total of 4 mg epinephrine, 40 U of vasopressin, 4 mg atropine, normal saline bolus and 200 meq sodium bicarbonate. An echocardiogram demonstrated no pericardial effusion. Bupivacaine toxicity was suspected as the cause for the cardiac arrest and the patient was given 2 units of 20% lipid emulsion. During the second dose of 20% lipid emulsion infusion, the patient regained his pulse and became hemodynamically stable. The pocket was closed after rinsing with antibiotics. A CT scan of the brain and a neurology consultation were unrevealing. Several hours after intubation and cardiac arrest the patient was successfully extubated. Approximately one week later, the patient had successful placement of a biventricular ICD without complication. No bupivacaine was used. Several days later the patient was discharged home. Discussion The anesthesiology community has had extensive experience with local anesthetics as well as their complications. 1,8,9 As a result, the use of lipid emulsion to rescue patients with local anesthetic toxicity is well known to anesthesiologists but perhaps less to cardiologists. 2,3,5 The proposed mechanism of intralipids action is the "lipid sink" binding. In this proposed mechanism, the lipids "bind" the lipophilic bupivacaine and reduce tissue content. The first reported use of lipid therapy in patients was in 2006, with the successful rescue of patients undergoing regional blocks who failed to respond to conventional cardiopulmonary resuscitation after showing signs of local anesthetic toxicity. The patients were successfully resuscitated after treatment with lipid emulsion. 10,11 Weinberg et al. demonstrated that intravenous lipid emulsion therapy increases resistance to, and enhances resuscitation of, rats and dogs exposed to local anesthetic overdoses. Importantly, while lipids have been shown to be helpful in treat- ing lipophilic local anesthetics such as bupivacaine, levobupivicaine, ropivicaine, and mepivicaine, they do not treat toxicity from the more hydrophilic local anesthetics such as lidocaine. While no serum levels were drawn during the resuscitation in this case, we are confident the events were secondary to local anesthetic toxicity due to the timing, the clinical presentation consistent with local anesthetic toxicity (dizziness, hypotension, seizure activity, and arrhythmia) 8 and the immediate resuscitation following administration of 20% lipid emulsion. It is well documented that bupivacaine toxicity first manifests as central nervous system disorders (tinnitus, a metallic taste in the mouth, dizziness, seizures). Cardiovascular signs follow the neurologic signs and include bradycardia, dysrhythmias and, in severe cases, asystole. 8 This patient received 50 cc of a combination of 2% lidocaine and 0.5% bupivacaine (in a 4:3 ratio) yielding a total dose of 570 mg of lidocaine and 107 mg of bupivicaine. In this size patient, that yields a dose of 10 mg/kg of lidocaine plus 1.9 mg/kg of bupavicaine. The maximum local anesthetic dose for lidocaine is 4.5 mg/kg and the maximum dose for bupivacaine is 2.5 mg/kg. Therefore, this patient received a toxic dose of lidocaine plus a dose of bupivacaine approaching the upper limit. In summary, this case represents the first reported case of local anesthetic cardiotoxicity with successful reversal using lipid emulsion in the electrophysiology laboratory, a site that frequently utilizes local anesthetics. Bupivacaine toxicity and the use of lipid emulsion as rescue therapy should be considered in all cases in which symptoms consistent with local anesthetic toxicity occur. Furthermore, all clinical sites where local anesthetics are routinely used should have 20% lipid emulsion readily available and personnel should be educated regarding this medication. Published evidence to date suggests that lipid rescue for presumed local anesthetic toxicity should be considered in prolonged cardiopulmonary resuscitation when there is suspicion of local anesthetic toxicity. |
/*
* Copyright 2002-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.security.web.context;
import java.util.function.Supplier;
import jakarta.servlet.http.HttpServletRequest;
import jakarta.servlet.http.HttpServletResponse;
import org.springframework.security.core.context.DeferredSecurityContext;
import org.springframework.security.core.context.SecurityContext;
import org.springframework.security.core.context.SecurityContextHolder;
import org.springframework.security.core.context.SecurityContextHolderStrategy;
import org.springframework.util.Assert;
/**
* Stores the {@link SecurityContext} on a
* {@link jakarta.servlet.ServletRequest#setAttribute(String, Object)} so that it can be
* restored when different dispatch types occur. It will not be available on subsequent
* requests.
*
* Unlike {@link HttpSessionSecurityContextRepository} this filter has no need to persist
* the {@link SecurityContext} on the response being committed because the
* {@link SecurityContext} will not be available for subsequent requests for
* {@link RequestAttributeSecurityContextRepository}.
*
* @author Rob Winch
* @since 5.7
*/
public final class RequestAttributeSecurityContextRepository implements SecurityContextRepository {
/**
* The default request attribute name to use.
*/
public static final String DEFAULT_REQUEST_ATTR_NAME = RequestAttributeSecurityContextRepository.class.getName()
.concat(".SPRING_SECURITY_CONTEXT");
private final String requestAttributeName;
private SecurityContextHolderStrategy securityContextHolderStrategy = SecurityContextHolder
.getContextHolderStrategy();
/**
* Creates a new instance using {@link #DEFAULT_REQUEST_ATTR_NAME}.
*/
public RequestAttributeSecurityContextRepository() {
this(DEFAULT_REQUEST_ATTR_NAME);
}
/**
* Creates a new instance with the specified request attribute name.
* @param requestAttributeName the request attribute name to set to the
* {@link SecurityContext}.
*/
public RequestAttributeSecurityContextRepository(String requestAttributeName) {
this.requestAttributeName = requestAttributeName;
}
@Override
public boolean containsContext(HttpServletRequest request) {
return getContext(request) != null;
}
@Override
public SecurityContext loadContext(HttpRequestResponseHolder requestResponseHolder) {
return loadDeferredContext(requestResponseHolder.getRequest()).get();
}
@Override
public DeferredSecurityContext loadDeferredContext(HttpServletRequest request) {
Supplier<SecurityContext> supplier = () -> getContext(request);
return new SupplierDeferredSecurityContext(supplier, this.securityContextHolderStrategy);
}
private SecurityContext getContext(HttpServletRequest request) {
return (SecurityContext) request.getAttribute(this.requestAttributeName);
}
@Override
public void saveContext(SecurityContext context, HttpServletRequest request, HttpServletResponse response) {
request.setAttribute(this.requestAttributeName, context);
}
/**
* Sets the {@link SecurityContextHolderStrategy} to use. The default action is to use
* the {@link SecurityContextHolderStrategy} stored in {@link SecurityContextHolder}.
* @since 5.8
*/
public void setSecurityContextHolderStrategy(SecurityContextHolderStrategy securityContextHolderStrategy) {
Assert.notNull(securityContextHolderStrategy, "securityContextHolderStrategy cannot be null");
this.securityContextHolderStrategy = securityContextHolderStrategy;
}
}
|
<filename>src/test/java/seedu/address/model/person/predicates/DepartmentContainsKeywordsPredicateTest.java
package seedu.address.model.person.predicates;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import org.junit.jupiter.api.Test;
import seedu.address.testutil.builders.PersonBuilder;
public class DepartmentContainsKeywordsPredicateTest {
@Test
public void equals() {
List<String> firstPredicateKeywordList = Collections.singletonList("first");
List<String> secondPredicateKeywordList = Arrays.asList("first", "second");
DepartmentContainsKeywordsPredicate firstPredicate =
new DepartmentContainsKeywordsPredicate(firstPredicateKeywordList);
DepartmentContainsKeywordsPredicate secondPredicate =
new DepartmentContainsKeywordsPredicate(secondPredicateKeywordList);
// same object -> returns true
assertEquals(firstPredicate, firstPredicate);
// same values -> returns true
DepartmentContainsKeywordsPredicate firstPredicateCopy =
new DepartmentContainsKeywordsPredicate(firstPredicateKeywordList);
assertEquals(firstPredicateCopy, firstPredicate);
// different types -> returns false
assertNotEquals(firstPredicate, 1);
// null -> returns false
assertNotEquals(firstPredicate, null);
// different person -> returns false
assertNotEquals(secondPredicate, firstPredicate);
}
@Test
public void test_departmentContainsKeywords_returnsTrue() {
// One keyword
DepartmentContainsKeywordsPredicate predicate =
new DepartmentContainsKeywordsPredicate(Collections.singletonList("Computing"));
assertTrue(predicate.test(new PersonBuilder().withDepartment("School of Computing").build()));
// One sub-word
predicate = new DepartmentContainsKeywordsPredicate(Collections.singletonList("Com"));
assertTrue(predicate.test(new PersonBuilder().withDepartment("Computing").build()));
// Multiple keywords
predicate = new DepartmentContainsKeywordsPredicate(Arrays.asList("Computer", "Science"));
assertTrue(predicate.test(new PersonBuilder().withDepartment("Computer Science").build()));
// Multiple sub-words
predicate = new DepartmentContainsKeywordsPredicate(Arrays.asList("Com", "Sci"));
assertTrue(predicate.test(new PersonBuilder().withDepartment("Computer Science").build()));
// Mixed-case keywords
predicate = new DepartmentContainsKeywordsPredicate(Arrays.asList("cOmPuTer", "SCiEnCe"));
assertTrue(predicate.test(new PersonBuilder().withDepartment("Computer Science").build()));
}
@Test
public void test_departmentDoesNotContainKeywords_returnsFalse() {
// Zero keywords
DepartmentContainsKeywordsPredicate predicate =
new DepartmentContainsKeywordsPredicate(Collections.emptyList());
assertFalse(predicate.test(new PersonBuilder().withDepartment("Computing").build()));
// Only one matching keyword
predicate = new DepartmentContainsKeywordsPredicate(Arrays.asList("Science", "Engineering"));
assertFalse(predicate.test(new PersonBuilder().withDepartment("Computer Engineering").build()));
// Non-matching keyword
predicate = new DepartmentContainsKeywordsPredicate(Collections.singletonList("Engineering"));
assertFalse(predicate.test(new PersonBuilder().withDepartment("Computing Science").build()));
// Keywords match name, phone, email, and office, but does not match department
predicate = new DepartmentContainsKeywordsPredicate(
Arrays.asList("12345", "<EMAIL>", "Alice", "Science", "E4-03-01"));
assertFalse(predicate.test(new PersonBuilder().withName("Alice")
.withPhone("12345").withEmail("<EMAIL>")
.withDepartment("Engineering").withOffice("E4-03-01").build()));
}
}
|
/**
* Unfix the changelists from the specified jobs
*
* @param lists
* @param jobs
*/
public void unfix(final IP4Changelist[] lists, final IP4Job[] jobs) {
IP4Runnable runnable = new P4Runnable() {
@Override
public void run(IProgressMonitor monitor) {
for (IP4Changelist list : lists) {
createCollection(jobs).unfix(list);
}
}
};
runRunnable(runnable);
} |
package com.mcash.repository;
import org.springframework.data.repository.CrudRepository;
import com.mcash.model.Customer;
public interface CustomerDaoImpl extends CrudRepository<Customer, Long> {
}
|
The PBL method and the performance of Teachers from the Pitgoras Medicine Faculty of Eunpolis-BA: A Case Study The present study is a qualitative research of an ethnographic nature, carried out in the field of education, to raise comparative aspects through a case study. The scenario of this research occurred simultaneously with the challenges and transformations that the professors of the Faculdade Pitgoras de Medicina de Eunpolis are facing with the innovative methodology adopted by the institution, which started its activities in September 2018. In 2001, it was defined by the Ministry of Education and Culture (MEC) that Brazilian medical schools would have as their National Curriculum Guidelines (DCN) the guidance of their educational guiding matrices, the formation of a graduate with a profile to meet the needs of the country's health system. These new directions would bring great changes in the objectives and in the form of education for the new faculties recently implanted in several Brazilian states. In order to obtain new skills and competences, the courses would start using active teaching-learning methodologies with a focus on the attention to biopsychosocial aspects, employing areas of competence in the dimensions of management, education and health care. The research does not pretend to be a static or final point, it only reveals the moment when difficulties of the time for the concretization and institutionalization of an organized civilization. The forging of science discourse took many years since the 16th century, and, around the third decade after the discovery of Brazil, the first medical activists began to arrive. The art of healing was carried out with few therapeutic resources, brought from the Iberian Peninsula to a tropical country. Local diseases were treated by shamans who took advantage of the local flora to cure diseases. In the isolation of new lands, there was only the shamanism and the work of the Jesuits in dealing with tropical diseases that are still little known. Years, decades and centuries passed and the evolution of treatment was ephemeral even in Europe, at least to face the pathologies of the time that devastated The relationship of health as an institutional policy began with the creation of the Liga Pr-Saneamento in 1918, a project at the national level that aimed at valuing rural people. Driven by sentiment after the Spanish flu epidemic, the health defense movement was carried out on expeditions in favor of an agricultural Brazil, the main economic activity at the time. The appeal to treat rural endemic diseases denounced the conditions in which the agrarian populations that faced diseases, such as yellow fever, chagas diseases, malaria and cholera lived. and contribute to improving the economy and health of men in the field. Politically, it was not so easy to resolve the logic of centralization of power in the State, as there was still no clear model of what the division of financial resources would be between the most productive and the poorest states. Wealthier states like So Paulo differed from the poorest states in the federation in terms of the amount of funds raised. Poor states did not have the structural and installed capacity to combat their daily problems, and in 1923, So Paulo stood out with the creation of its own department, the National Department of Public Health (DNSP). It was an autonomous body, but it allowed the federal government to inference, in cases of epidemic outbreaks, if needed. According to Fonseca, in 1930 the Ministry of Education and Public Health (Mesp) was born, which was the first centralizing and institutionalizing body created to think about policies at the national level. Only in 1937, under the dictatorial regime, was the National Health Department (DNS) born, which aimed to implement norms and standardize national activities with a view to decentralizing their execution at the state level. The current health policy has its basic guidelines for public health and hospital services. In the 1930s, Brazil created several ministries, such as work, and organized legislation with several decrees, such as the creation of pension funds, for example, in all cases and pensions. of health. Political development around the guarantee of health rights began with increasingly greater inclusions in society. The evolution of the Ministry of Health (MS) and of education occurred in response to demands and social and political transformations. For many years, statewide. At the beginning of this century, the federal government's strategy to open more health facilities and increase the coverage of health care for the population, through basic health care, II. METHODOLOGICAL ROUTE The qualitative research had a characteristic that allows the author to participate in the phenomena that seek to capture, during the literary survey and interviews to be carried out, the objective to achieve the content of the interpretation of the research reality. The intention was to understand, by comparing the methods, that teachers with experience in the traditional method are adapting to the PBL in view of the results. At the end of the scientific search, the selection of the sample to be researched and the interviews, I analyzed the data that brought meaning to society. When starting the course on September 18, 2018, the group of educators was composed of professionals graduated in different backgrounds. The selection followed the criterion of experience of the least two years. Four doctors, four were selected. Of the group of biologists, only two experienced the atmosphere of the traditional classroom, a psychologist and a pharmacist, the latter with experience of higher education teachers. After the sample selection, seven teachers were within the criterion of previous experience in the traditional methodology for more than two years, as shown in This nomenclature used in the table indicating the teacher and the number refers to the ordering of the semistructured interviews that, after having been carried out, were transcribed and thus used throughout the analysis, preserving the name of the teachers. To facilitate the analysis of the collected data, I sequenced it in phases to facilitate the treatment of the results. In the first phase, I took care of correcting transcriptions originating from audio recordings. Then, I tried to synthesize parts of the reports in paper clippings (pamphlets), which I called nuclei of meaning. III. PRESENTATION AND DISCUSSION OF DATA When analyzing the data of this research, I considered it important to portray the description of each one of the five themes a little. According to chapter 3 (methodological path), each theme was called an axis. The five axes originated from the biggest challenges we faced in the first year of the medical course, when working with the innovative pedagogy adopted by the Pitgoras Faculty of Eunpolis. In axis 1, my comparative intention was to measure the differences between the two methodologies in terms of lesson planning and I asked teachers to comment on their experience. After the transcription of the interviews, find the difficulty of starting the sequence of filipinas due to the richness and the amount of similar phrases. I found the possibility of nuclear this item in three more significant points: the first dealt with the difficulties and facilities of the idealization of the classes; the second portrayed the teacher's change in posture, and the third consisted of the nucleation of phrases that reported the application of the method in the classroom. Regarding the criterion of difficulty and ease between the methods to prepare the classes, at first glance three teachers stated that the difficulty in planning in the PBL was related to the exposure of the content and this was exposed in the sentence of Professor 6, who portrayed that " in the traditional model it is easier because you are able to prepare the whole class before it happens ". Previously using the traditional method, it was enough for the teacher to master the content, to review his synthesis in a classroom for students " replicating the production of his class in other classes for later classes" (PROFESSOR 2). As a result of the interviews, questions arose, such as: "And now?" "How am I going to do when I arrive in front of the students to start classes?" "What am I going to write to pass on the knowledge?" "Should I create a step-by-step to get to the front of the students and start a class on the subject?" Doubts like that were raised by Professors 2, 4 and 5. The making of a class in the traditional method, usually in Datashow, served as the proper planning of the content to be passed on to students. Silva says that this technology was widely disseminated in higher education in which all teachers were applying this tool with teachers. knowledge, having the role of transmitting the content of their class schedule through slides. The insertion of technologies in the classroom did not replace the teacher, quite the opposite, it expanded the possibilities of the educational practice of this professional. However, the presence of technological resources required a new attitude from the teacher. From the sole holder of knowledge, who will transmit it to the student (SILVA, 2013, p. 11). The teachers' report, when they joined the PBL, shows that this technological tool is no longer the main tool. In active pedagogy, the teacher does not have complete mastery of what will happen in the classroom, for having left the role of transferring knowledge. As Professor 3 reported, " the facilitator becomes the stimulator for the construction of knowledge, leading and directing the frontiers of knowledge established in the modular units of the content". Therefore, the planning of the classroom should provide a measure of time to encourage the student to complete a cycle in which he himself is self-taught and motivated to solve the problem situations presented. Planning in the PBL includes a more proactive movement by the teacher. According to Toledo Jnior, it is necessary to elaborate situations that are inclusive, so that students feel part of the problem through previous knowledge and their curiosity and he is encouraged to build knowledge alone or with his group of colleagues. The PBL includes the structuring of knowledge within a specific context, allows the student to face concrete problems, which could enhance the development of clinical reasoning, favors the development of self-directed study skills and increased motivation for study. The PBL method values, in addition to the content to be learned, the way learning occurs, reinforcing the student's active role in this process, allowing him to learn how to learn (TOLEDO, 2008, p. 126). He perceived anguish of colleagues throughout the interviews, due to the new format that they should have to prepare their lesson plans under the new model. Professor 5 said: " in the traditional method we prepared the class as holders and authorities of knowledge and it was up to the student to absorb, already in this new method the student must build his knowledge". It was evident, in my analysis, that the interviewees 1, 3, 5 and 6 of this axis effectively believe that knowledge should be built instantly by the students, with the teacher being able to guide the trajectory so that his students reach the proposed objectives. Regarding the second criterion (teacher postural change), the teachers attributed the new paradigm to the alternation of the content transmission board to a tutor or knowledge advisor. The difficulty in changing the logic of the content transmission generated discomfort that led to a strangeness in the first moment. These second criteria for the displacement of the place of the teacher at the center of both theoretical and practical learning activity. Four of the seven teachers reported that the preparation required greater teacher discipline in studying the content and describing a lesson program with a step by step, so that the teacher would not be the main teacher of the classroom. Professor 7 also reinforced that this role places the student at the center of the learning activity, whether theoretical or practical. Another point that demanded the educator's postural change was made clear in Professor 1's account: " PBL requires a change in the teacher's place of speech " and " the method reverses the logic of knowledge transmission for the production of it ". These reports enriched my perception, knowing that, even with a short time in teaching practice, a large part of the teaching staff embraced the method's intentionality and dedicated themselves to changing their paradigms in relation to their own postural change. possible; teaching requires understanding that education is a form of intervention in the world; teaching does not transfer knowledge. Creating possibilities to build and produce in the perspective in which the student is autonomous, we induce him to study through his doubts and concerns as part of classes. This displacement of the listener to a more participatory, reflective and questioning role is the objective of the method in the formation of the professional of the future. When entering with the PBL strategies, the student comes into contact with the world of laboratory practice protected from the institution consortium with the real practice of SUS lived in health centers. The same approach of attention should be given to the specific objectives of the content covered, to avoid students' daydreams during the construction of knowledge. Before, in the traditional, the teacher read the proposed objectives and went to the construction of his class. Based on innovative pedagogy, the discipline of teacher to follow the specific objectives must be reviewed at all times, during the stimulation and construction of knowledge by students in the trajectory of what was planned. The third criterion analyzed in this first axis (planning) was reported by six teachers of the seven interviewees who mentioned that the active method of learning requires greater teacher preparation. According to the perception of educators, reports such as Professors 1, 3, 4, 5, 6 and 7 mention that " in PBL it takes much more time to plan, as it requires greater creativity, greater articulation of different types of knowledge to integrate practice with theory ". I noticed the challenge in the statements cited in the elaboration of classes that were dynamic and contemplated the general and specific objectives, so that the student triggers the construction of his knowledge, conquering the understanding of the content in significant ways. This integration of knowledge really requires a greater teaching domain to trigger, direct and integrate the frontiers of knowledge involving practical and human skills than simply elaborating a class in the traditional method. It was clear, in line with these six teachers, that, when executing the plan of the classes elaborated in advance of the class week, the teacher should stick to the objectives of the modular contents, and not to his scientific summary, which they used to do in the traditional method. classroom. Not traditional, the way to plan, many times, already included the way to pass the content through the expository class, as previously mentioned by slides. The domain of the class was verticalized, that is, the teacher ministered to the student who occupied the role of listener. With this new challenge of innovative pedagogy, in addition to the domain of knowledge, the teacher must be prepared for the different situations that the student will create with his group, changing to a more horizontal way in the construction of knowledge. The teacher awakens the student to new paths by asking questions that lead to reflection and knowledge seeking to answer a practical or theoretical problem included in the lesson plan.. This fact makes the active methodology more dynamic and creative, requiring a degree of study from the teacher to be aware of the boundaries of the construction of knowledge in relation to various. Still regarding the challenges of the planning requirement criterion, four teachers reported that the PBL encourages the teacher to deepen his studies around the topic that he will constantly tutor, in order to avoid the discomfort of not knowing how to handle the doubts of more curious students. from elementary school that placed the teacher as the center of knowledge. As it is considered a characteristic among the methods, I sought to research how the conduct of classes has been throughout this first year of the course. On this topic, only two teachers cited educational strategies used in the management of their classes. Although the answers were convergent to the question in axis 2, which argued about the use of educational tools throughout the year, five teachers preferred to mention the importance of knowledge of PBL, to apply the best strategies according to the needs of the class, being able to change the tools according to the difficulties of the students or the size of the day. I noticed, in the speeches of the five teachers who did not mention the names of the tools, that the choice of this strategy involved the need for each group in addition to the criteria, such as the size and difficulty of the group and the environment in which the students were. "Using the environment and the difficulties of the moment to potentiate a problematization is the best strategy to stimulate the student to reflect" (PROFESSOR 7). Teachers 3 and 6 complemented: the profile of the student's interest, the place where they are and the specific objective intended in the decision to alternate the learning tool. In just a moment, I noticed, in the speech of a teacher, the report of his difficulties in relation to this axis, which does not mean that this teacher had little knowledge of the PBL tools, but that he had difficulty in applying them. " I admit that I still can't use the PBL tools properly in many times, administering my classes to motivate students to do an active search and ask surprise questions so that I can stimulate the search for knowledge" (PROFESSOR 5). I was able to verify, with this question, that all teachers value dynamics to stimulate the student in the search for knowledge, stimulating his own learning, to use the pedagogical tools of the active method. This demonstrates the interest of teachers in learning to learn from their own experiences and in managing the method. All the teachers made it clear that it is important to use these tools, so that the student takes a role in selfeducation and is responsible for building their own knowledge. Among the main characteristics, the innovative teachinglearning methods clearly show the migration movement from 'teaching' to 'learning', the deviation from the focus of the teacher to the student, who assumes co-responsibility for his learning (SOUZA; IGLESIAS; PAZIN, 2014, p. 284). It was clear that, in the testimony of four participants in the case study, choosing the most appropriate educational tool to conduct the students' search for knowledge to achieve the objective of the intended content was and has been the greatest challenge for this first year of the course. According to the reports of teachers, this "teaching to learn" (PROFESSOR 6) movement is achieved through the use of the tools mentioned by Professors 2, 3, 6 and 7, such as problematization, meetings for the discussion of clinical cases, dynamics for analysis, criticism and reflection of practice, case study, always starting from the student's prior knowledge. Here I would also like to point out that, in general, all teachers declared it easier to teach classes by the traditional method, in which they used, in their exhibits, slide projection. I considered it important to ask teachers how they did to conduct their classes in the method they applied previously, before enter the Pythagorean Medicine intuition. The classes were unanimously prepared in Power Point, in which a deposit of knowledge was made similar to a bank deposit, where, when passing the years, the educator could revise and increase items of his complete domain of knowledge, making it much easier to teach future lessons. The act of teaching-learning must be a set of articulated activities, in which these different actors increasingly share shares of responsibility and commitment. For this, it is essential to overcome the banking concept, in which one deposits contents, while the other is obliged to memorize them, or the licentious, unlimited, spontaneous practice of individuals given to themselves and their own luck., in an emptiness of what they do, as opposed to, the liberating education is a political practice, reflective and capable of producing a new logic in the understanding of the world: critical, creative, responsible and committed (MITER, 2008, p. 237). The phase in which the teacher is the main protagonist of the classes according to his wishes and convictions and his articulation of the practice is related by TSUJI as a review of responsibility for his role. Regardless of the desire / need, students swallow the materials, regurgitate in the tests and try to forget them afterwards. It is believed that in this way they reach the final years prepared to learn the practice of medicine. This is how medicine has been taught for years. Knowledge evolves in a speed between 40% and 50% of what is taught today is abandoned or put into doubt in four or five years (TSUJI, 2010, p.79). Current management requires that the teacher take on the role of tutor, be a facilitator of knowledge and program trajectories in their class time. It also requires that he be able to arouse curiosity, research motivation and be prepared for unusual questions. This will require not only a knowledge of the application of the tool or method, but a postural preparation to encourage the student to go in search of new knowledge. I noticed, in the professed Professor4, that the name " requires an active method the need for its own postural transformation, so as not to be reactive to the questions and to solve the students' doubts". Even mastering the content, he must often answer with another question, to guide the path of the search for learning to learn. The comparisons I made for the assessment axis were centered on two questions: the first, the cognitive strengths and weaknesses between the teaching methods; and the second perception of the student's cognitive displacement. and the performance measures of the simulated scenarios of the teachers' laboratory practices, the interviews followed a path that extrapolated my intentionality, migrating to a spontaneous evaluation of colleagues. Case studies like this extrapolate the focus of the question from the axis and even reveal a construction of knowledge beyond what was imagined. I detected through the speeches as Professor3 -"Assumed a composition with a horizontal aspect due to the possibility of formative assessment where I do not need notes and I can contribute with my professional perceptions in the formation of the student " -edoProfessor4: "The method active articulates scientific knowledge with less biologicist and more biopsychosocial knowledge, relating ethical and spiritual factors to effective communication and financial and market perception ". Such statements are in line with Ludke and Andr, who report on the unusual that may appear during scientific research. These statements revealed the teachers' postural and humanistic change, which I did not expect to find in such a short time of practice in the method. The case studies are aimed at discovery. Even if the researcher starts from some initial theoretical assumptions, he will try to constantly keep an eye out for new elements that may emerge as important during the study. The initial theoretical framework will thus serve as a skeleton, a basic structure from which new aspects can be detected, new elements or dimensions can be presented as the study progresses (LUDKE; ANDR, 1986, p. 18). There were unanimous reports about the positive reach of the active methodology in relation to the traditional method in anchoring practice over the course of the course. The stimulation of human skills, associated with the perception of the student's attitudinal and behavioral changes prevailed in all reports. According to the seven teachers, a common nucleated phrase is that " the PBL method still provides the use of other knowledge domains favoring the creation of other knowledge". Another point unanimously addressed was the application of scientific cognition to practice. Professor 2 portrays well in his speech, when he mentions " the combination of practical competence with the higher work and a critical reflection and teaches the student to act in new situations and in the doctor's daily life". The PBL method, as previously reported, inserts students into a real environment since the beginning of the course. It is noted that students are being prepared for a behavior to act on the patient instead of illness. The articulation of scientific knowledge with other human dimensions takes and empowers the student for self-learning in addition to the articulation of practical knowledge, since this integration with the community improves reflection, critical analysis and the ability to cope with everyday problems of the future doctor. I emphasize that Professors 1, 2 and 4 mentioned that " both methodologies achieve the same cognitive result, but in PBL the psychomotor skills and attitudinals are most significant and were very different for the physician's training ". These teachers attributed the association of a set of laboratory practices and activities in the community to theoretical knowledge built in parallel and improved skills in relation to the construction of the mature individual. According to Tsuji, the transition from immaturity to maturity (personality) is a painful process that depends on experience, einsight desire. to overcome new challenges ". With regard to axis 4, I aimed to compare how the teachers proceeded with the use of active pedagogical tools in the first year of the course. About this, four of the seven teachers cited the resistance of the teacher's stance as a barrier to resort to these teaching strategies throughout the year. Of these four teachers, Professor 2 added that, "in addition to the teacher's own resistance to adapt to the new teaching models, it is important to remember that students also came from a traditional high school" and, for this reason, face the strangest role passive of the teacher. The task of dealing with new and different strategies is somewhat complex and requires changes in habitus and paradigms: among university professors there is a predominance of content exposure, emaulas expositivas, or lectures, a functional strategy for the transmission of information. This habitus reinforces the action of transmitting ready, finished and determined content, similar to previous experiences. Still, the current curricular configuration and the predominantly conceptual disciplinary organization (in grid), have the lecture as the main form of work, and the students themselves expect from the teacher the continuous and passive exposure of the subjects that will be learned (SOUZA; IGLESIAS; PAZIN, 2014, p.288). This comparative question between the methods brought up an important point: the shift in the role of the teacher from the center of the educational process. The need for qualification of the teaching staff in the first year of Faculdade Pitgoras de Medicina and other periods is fundamental. I realized that it would be necessary to transform the profile of the teacher by inverting his role, in order to place students as promoters of the search for knowledge. Professor 5 speech brings the need to " break dogmas, paradigms, concepts and prejudices of a generation of teachers ". It is natural that medical professionals trained in the traditional method and with a successful career show a certain lack of confidence, to come across the PBL in the first moment. The perception of the need for teacher qualification was clear in six of the seven teachers, almost unanimously. With the change in the medical curriculum, an adaptation of the integration of practice and social integration was required, perhaps that is why so many teachers were mentioning that the most demanding PBL is not just because of the demand for greater creativity, but because of the proactivity to adapt the teaching strategies. Understanding that the teaching model is migrating from disciplinarity to interdisciplinarity involves the need for permanent training for teachers. Curricular changes presuppose the transition from disciplinarity to interdisciplinarity, in addition to presenting new teaching-learning strategies, such as active methodologies, considered a new challenge for the training of teachers of the future (ARAJO; SASTRE, 2009, p. 6). I considered a quote from Professor 5 to be important, in which he reports: " I still lack concrete and well-grounded pillars to underpin and consolidate the method". I noticed, in the teacher's speech, the insecurity to use strategies in the classroom. In my view, they complement each other, since the path to success after the adoption by the institution of an innovative method is the investment in training and continuous training. In this axis 5, I was able to make a perspective of the future of the interviewees, measuring how their students would be in the job market after six years of graduation and how the innovative methodology would contribute to this egress in the market. With the exception of Professor 5, all reports showed optimism and envisioned a professional adapted to technological changes, able to learn to learn with greater speed and with resolving characteristics for coping with everyday problems. PROFESSOR 5). This revelation is linked to the sudden break from the traditional to the PBL and society may not have time to welcome these changes, especially students. Most educators, Teachers (1,2,3,4,6, and 7) complemented that " the active formation for the transformation of the individual for being in daily contact with the problem situations". Professor 3's testimony brings a vision of the future based on " graduates who acquire greater capacity to learn to learn throughout their professional life will be better prepared for changes in the market". All teachers mentioned the cycle of demands and needs of the population in relation to health, which has been changing more and more rapidly, both in the public and private systems. According to Professor 7, this training model will provide students with the opportunity to " learn from the changing needs of the population since the beginning of graduation, forming an individual with the ability to relate to political, socioeconomic and cultural dimensions "And prepare for the constant social changes. Learning how to manage classes correlating these biological, psychological and social domains is an advance for a continental country that has great demographic variation. health needs, in a more comprehensive way, are originated by the way human societies live life, which implies interfaces between the demographic characteristics of populations, their culture and socioeconomic organization, in a given territory and ecosystem. The combination of biological, cultural, subjective, social, political and economic nature elements produces the set of health needs of a given society. I realized, in the words of all Teachers1,2,3,4,5,6 e 7, that " the method is capable of transforming attitudes and behavior in the face of the situations that the work environment imposes on the day to day " health professionals. This ability has been noticed since the first graduation period by teachers. Mrio Alessandro Gontijo de Melo In relation to this perspective, I analyzed in the speeches that teachers, even at the beginning of the course, recognize that the courses will form a general clinical profile, which, because they are part of the first day of school, in the context of learning, there will be no further business in order to continue their journey in order to continue their journey. I was also able to verify, based on the statements of Professors 2, 3, 4, 6 and 7, that " the conduct of the active method will enhance a more humanist professional prepared to meet the needs of the market and the health of the population ". I noticed also the relativization between traditional education and PBL on the safety of the professional who, since the beginning of his graduation, maintains direct contact with society. The protected environment is used by laboratories and classrooms to contribute to the student's cognitive domain over six years, but this learning environment alone is not able to stimulate the student in the biopsychosocial domains, as it reduces opportunities for living with the health network. According to the report of Professor 7, " the apprentice who goes through graduation in conjunction with the field training starts to live and relate to the real social optics, realizing the health management problems, the sociopolitical coexistence, closely watches the patient's pain and live with illness ". I realized, in the words of Professors 3, 5 and 7, that " the student, living and going through the six years of graduation in an environment protected by teaching, but contemplated by the association of the theory to the practice of a real world, will be an individual more prepared for the market". "The connivance with poverty, with the therapeutic cultural difficulties, with the communities weakened by the trafficking traffic, through the habit, will be a professional more adapted to the conditions of the current market (PROFESSOR 7). The MEC, SUS and the DCNs promote a changed medical education with validation of the model with a focus on an emancipator, in which the student comes into contact with social transformations, a question that was previously shielded in the protected environment of the classrooms. In the perspective of permanent education for workers in the Unified Health System -SUS, the expansion of critical capacity, aiming at the transformation of their own practices, has been the axis in the construction of educational proposals with an emancipatory approach, with the use of active teaching-learning methodologies (CADERNO DE COSO, HOSPITAL SRIO LIBANS, 2014, p.6). Only one professor made an inference showing that he was concerned with the insertion of the graduate in the market. The transition from the ways of teaching is being implemented in the faculties that have recently emerged without the adhesion of all existing faculties. In 2014, the National Curriculum Guidelines pointed the way for methodological change, but they did not impose a schedule for all institutions to adopt or structure themselves in the active method. Large and renowned country faculties, mainly in the public, continue to traditional methodology, as well as the selective tests of medical residency and the selective notices of public tenders. "This may cause difficulties for the student who is in transition" (TEACHER 5). Based on the research carried out, I considered the contributions of teachers to be potent in the face of the short time of experience in the methodology. The reported approaches were useful for the application in the exhibition of partial results acquired so far as a reflective strategy in the pedagogical week of 2020. Analysis of the data makes interfaces with the difficulties studied in articles and brings innovative results that will be dealt with in the next chapter. IV. FINAL CONSIDERATIONS When starting this research, I had no perception of how the historical evolution of medical education was accompanied by so many paradigm breaks. The fact is that, when comparing with the implantations or innovations, either of medical schools, or of new methods, one thing is common: the participation of the government and the disruption of society with the theme. The medical training of the hygienist era struggled to be recognized in society. Scientific constructions were a target to be reached for professional recognition, in an attempt to overlap with the practices of traditional healers. Today, in keeping with the 19th century period, it has not been easy to change the medical teaching methodology and evolve into a new curriculum matrix. At that time, there was an effort to differentiate the medical professional from society. Today there is a need to reposition the medical profession for a more generalist environment. This effort to train professionals has had the same authorship throughout history and came from government power to meet the demands of the health needs of the population in the respective times. I perceive significant changes, but, to be welcomed by the scientific society and recognized by the civil society, it will take years if we observe the circadian cycle of changes in medical education. We already have decades of innovative pedagogy in the world and, even knowing the existence of proven successful curriculum models, such as the Canadian and the Dutch, with regard to medical training, we are participating in this transformation in the same State in which the first school of surgery in Brazil was founded. Historically, it seems that we are pursuing a model that, although we call it the new, was applied as a necessity to treat diseases, pests and epidemics in the imperial era. The monarchy 's proactivity supported the search for a cure and encouraged scientists to actively search for remedies to remedy the ills of the time. The proposal for an education that we call innovative and active in current medical education seems to seek this same role for the student as a center for the construction of knowledge, aiming to form an individual capable of learning to always learn as scientists of the 19th century did in the discoveries of tropical diseases. Changing is a challenge. The question regarding this change still persists, which is why many professionals question this new format. Are we modifying the teaching model to meet the market, or has society changed and that is why we are changing the way of learning and teaching? The fact that I was able to learn from this research is that the health needs of the population changed a lot after the implantation of SUS in 1988, which brought the right to health of the population as a duty of the State. With the political and social maturity of Brazilians, the country opened many strategies for dealing with various health system problems, but the training of human resources was out of line with the growth rate of the public health network. In the last two decades, the public power has promoted promoting and encouraging governance and governmentality around the training of health professionals, in order to meet the network's singularities in their different cultures in our continental country. Nowadays, it has become increasingly clear that only the student's cognitive domain, although fundamental in the teaching-learning process, is not enough to accommodate cultural diversity, the effects of globalization and the coexistence with new technologies has made it expand the frontiers of knowledge. Changes on their own bring resistance and the change in the format of teaching would be no different. But did the PBL demand changes in the education of formacausal or did it emerge as a consequence? It is perceived that society has changed and the active method has only been adequate to accommodate these social changes. people. Another point was the advancement of technology, which changed a generation and increased several sources of research with the Internet. For several moments, I came across testimonies that portrayed the teacher's resistance and mistrust towards the PBL, when argued in the interviews. Challenges such as planning, managing classes and using tools appropriate to the method raise doubts. Do these phenomena reported by teachers occur due to the new methodological paradigm or the innovation of the required teacher's posture? This natural discomfort may be related to the way we were educated in the traditional method in which we received as good listeners the transfer of the teacher and today, as teachers, we participate in the displacement of power by changing the logic of the transmission of classes. Ceasing to be the main actor in the room is what the method suggests, so that the student assumes that place. And leaving the place of power is neither easy nor comfortable for those who are unaware of the practice proposed by the PBL. Other conclusions could be perceived, when analyzing the resistance and demand to plan, manage and apply the PBL: the co-responsibility of the teacher in these questions. In this first consideration, teachers, when getting involved in the new methodology, feel more coresponsible for teaching, as they are part of the construction of knowledge. From another point of view, the teacher feels more committed to the more integrated and synchronous format of the provision of the weekly pedagogical modules, which point out the trajectory of the content to be worked on by the teaching group in relation to the same theme. Thus, theoretical classes are often complemented with the synergy of approaching the same subjects concomitantly with other classes, such as microscopy, general skills, medical and morphofunctional skills, in addition to field practice at SUS. Teachers know and know that their part must be done on time and with satisfaction, so as not to compromise the construction of subsequent classes. The syllabus of learning is arranged by the coordinates of the modules with the general objective and the specifics to be worked on during the week. With this, each teacher knows the parallelism in which their colleagues are dynamizing with their students, either in the tutorials (classrooms), in the technical and skills laboratories, or in the field of depression. This set of theory and praxis in PBL is not just an addition of knowledge, but an intersection of knowledge domains in which content is worked in the real field of work with all the singularities and deficiencies that the reality of the health system has in its gradual evolution. The teaching-community requires more teachers and is a point for them to be attentive to the fulfillment of their own objectives and their syllabus. As for the problems faced, these are problematized providing opportunities for learning in ethics, management, communication and other areas that the traditional curriculum did not offer. According to the comparative study, two more critical phenomena are perceived: applying tools and handling the method. From these two inferences appears a peaceful point: to improve these competencies, one must work with the constant training of the teacher. Another interesting fact is the perceptions of the medical market with the new paradigms of the teaching model different from the one in which the teachers themselves were educated. Unaccustomed to the term, while distant from education, they compared educational models without knowing the origin or motives that led to this change. The term used, biopsychosocial, is a term that seems to conflict with traditional training. Some claim to be a term that softens the doctor's hard training, smoothing and even damaging the knowledge base necessary for good training. Studies have already shown that the point of arrival at the level of cognition does not vary from one methodology to another. Both are effective. What can be noticed from the beginning is the critical reflective performance of the student and his progress in communicability, Still on the teacher's resistance to the method, one can infer the paradigmatic change. Changing the logic of the teacher's transmission as a center of knowledge for the student and building his own knowledge is a displacement of power. The generation that is in the current market, as well as that of their parents and grandparents, came from an educated education through the active role of the teacher and the student's passive. This role was indisputable until a few decades ago. So the doubt remains: Why change the way of teaching? Most people who are in contact with the active subject already understand, accept and promote the method after getting to know it better. According to teachers' reports after the first year of experience in innovative pedagogy, both students and teachers who were able to compare between traditional and PBL methods preferred the liberating education of self-construction. Another main point is how to use the method. There is still a doubt: the challenge generated by teaching classes is due to the lack of adaptation to the new tools due to the lack of training to use them. Another perceived point was the concern to use tools to standardize and resignify learning in PBL, through which the management proposes to initiate the awakening of knowledge through the student's prior knowledge, mobilizing his previous experience to motivate new discoveries. Learn by doing, applying, studying and adapting teaching strategies at all times. As, at the end of each class, a quick assessment of activity is proposed, the teacher approaches the opportunity of improvement almost instantaneously based on the reports of his dynamic overtones. A quiet point is that, in the same way that teachers become more profitable with each period, the pupil is approved for subsequent periods and becomes more critical and reflective, in addition to a known method. Since, in each period, there is a need for new teachers, these will come up with classes already used with the method, masoprofessors will confront, for the first time, with the PBL. This fact is due to the insufficiency of teachers trained and prepared for the challenges of methodological change. At this point, several frontiers of study open up. In relation to the student progress line and the new teachers, he realized the need for continued education or a permanent teacher and teacher. Sorting and schedule these three dimensions to garantir educao resistncia e as dificuldades dos novos professors dosperod os subsequentes? The traditional suppression of this training by the Date, much used in the last years by the institutions of higher education served as an organization instrument and planning the content to be passed on by the teacher in the classroom. Here is the restlessness of teaching, training and changing the posture, how to identify the needs of teachers? The change in teaching methodology, even more than a training as valued as medical, could not go unnoticed or without controversy in the eyes of its representatives. However, it is worth remembering, as mentioned in the story, that the implantation of the scientific medicine in Brazil after the implementation of the first medical faculties had great opposition to the doctors, healers and midwives, who performed, since the discovery of the country, their way to heal. We can conjecture that the term biopsychosocial, widely used both in the citations of the referenced authors and in the interviews, seems to conflict with traditional training due to the expansion of domains that are not dealt with throughout the curriculum of traditional training. The government plan, when proposing new guidelines, saw the perception of social interests and change in the need for users' health, reaching the conclusion of what points to the formation of a generalist profile, in order to ensure the greater number of places in the workforce of the countries. As for the future doctor, it seems, we have to leave some gaps in the field of doubt still open. In the same way that we treat PBL as an innovative pedagogy, it does not have a contingent of trained teachers and graduates, sufficient for a comparative sampling, to arbitrate that the traditional method has no success or effectiveness in the PBL. Is teaching that we are promoting enough to meet the population's health needs? Will this biopsychosocial training serve the market, the future doctor and the medical market? It can be seen, at the conclusion of the work, that the professors of the Faculty of Medicine of Eunpolis are adapting to the method throughout the course, as proposed by innovative methodologies: a learning cycle similar to a constructivist spiral. Based on the perceptions of the research work, I realized the need to separate the triad that makes up the education of the new teacher. A product of this work that I named "Sparks of the tutor" (Appendix B), in which is the compendium that guides the step by step of tutoring by the active method with the posture that the facilitator must maintain in class. The instrument will serve to guide and reduce challenges, as well as the resistance of teachers to PBL: those who have experience in traditional teaching or those who have not yet managed tutoring. I affirm that this instrument does not have the proposal of plastering the facilitator, who must lead the moment of learning with the unusual situations that may appear in its unfolding. However, it is the product of this research reflective and pedagogical advisor to support the development from the beginning to the moment of tutoring assessment. Finally, if the method is based on problem situations or on problematizing real situations to achieve its goal of teaching and learning, then many current questions are capable of stimulating everyone to anxiety. I realize that these controversies in relation to the teaching method may be a sign that we are on the right path, looking for answers in future research. |
package com.tqmars.requisition.infrastructure.Specifications.removedinfo;
import com.tqmars.requisition.domain.Specification.Specification;
import com.tqmars.requisition.domain.Specification.expression.IHqlExpression;
import com.tqmars.requisition.domain.Specification.expression.OperationType;
import com.tqmars.requisition.domain.model.removedInfo.RemovedInfo;
import com.tqmars.requisition.infrastructure.Specifications.Expression.HqlExpression;
/**
* 规约,检验身份证是否在已迁户档案中存在
* @author jjh
* @time 2016-01-11 20:07
*
*/
public class RemovedInfoIdNumExistsSpecification extends Specification<RemovedInfo>{
private String idNumber;
public RemovedInfoIdNumExistsSpecification(Class<RemovedInfo> _t,String idNumber) {
super(_t);
this.idNumber = idNumber;
}
@Override
public IHqlExpression getHqlExpression() {
IHqlExpression expression = new HqlExpression();
expression.setSql("select count(1) from tb_removed_info where id_number=?");
expression.setParameters(idNumber);
expression.setType(OperationType.SQL);
return expression;
}
}
|
In addition, the judge indicated that he was very close to finding the proofs sufficient to grant Father’s petition for a change in custody. The trial court ordered that Mother pay Father’s attorney $20,912 in fees and $3,555.86 in costs and reimburse Father’s $738 cost to the Guardian Ad Litem (GAL). The Court of Appeals affirmed.
Father responded to the motion and also filed a motion for a change of custody. There were thirteen hearings over a period of nearly two years. At the conclusion of the final hearing, the Court declined to restrict the father’s parenting time and, in fact, increased it.
The case involved Sean O’Farrell [“Father”] and Kelly O’Farrell [“Mother”]. They were divorced in December 2008. The judgment of divorce provided that Father and Mother would have joint legal custody of their minor child, who was two years old at the time. The judgment of divorce granted Mother physical custody of the child until further order of the court and ordered an extensive, detailed parenting-time schedule. The fact that the PT schedule was extensive and detailed indicates that Father’s lawyer perceived the need for this—that there was already contention. No doubt! Mother filed a motion in later March 2009—about four (4) months after the ink was dry on the judgment of divorce. She sought to suspend or to restrict Father’s parenting time.
The Michigan Court of Appeals ordered a mother to pay over $25,000 in attorney fees and costs in a contentious post-judgment child custody and parenting time case. Parents who think they can just willy-nilly waltz into court and/or who use and abuse child protective laws in an attempt to restrict access to children should think again. I've seen lots of abusive litigation before, but this one is really quite amazing.
During this lengthy and contentious litigation, there were allegations of interference with Father’s parenting time, refusal to communicate with important matters involving the child, and attempts to disrupt the father-child relationship. (In fact, during the parties’ divorce proceedings, four neglect referrals were made to the Department of Human Services (DHS) because Mother made “veiled accusations” that Father was abusing the child and of redness of the child’s vagina and blood in her stool. Father further alleged that the child’s pediatricians determined that Mother was subjecting the child to excessive medical testing against their advice to create evidence to restrict Father’s parenting time).
The case involved many referrals to doctors, medical facilities, DHS (Protective Services). Eventually three psychologists testified. In addition, the trial court appointed a Guardian Ad Litem (GAL) for the child. Hearings were held in which the Court heard testimony from the parties, and also from the following:
Dr. Wayne Simmons, an expert in psychology, child development, and custody evaluations who met with the parties and conducted a custody evaluation;
Rebecca Johnston, the child’s preschool teacher; Dr. Diane Kukulis, the child’s play therapist who the court did not qualify as an expert;
Dr. Terrance Campbell, an expert in forensic psychology who, in response to an order of the court, was hired by Father to interview the parties and evaluate their interaction with the child;
Melanie Rand, a Child Protective Services (CPS) worker who received a referral regarding the child and observed a forensic interview of the child; and
David Breyer, a psychologist who performed a psychological evaluation of the parties and the child at CPS’s request.
During a hearing on January 24, 2011, the trial court issued an opinion on the record. Some of the comments of the judge were:
From the start of the case Defendant, Mother, has continued to attempt to limit Father’s visitation and interrupt his quality time with his daughter.
Mother also tried to limit visitations by taking the child to two different . . . pediatricians who quit because they were being used for gathering evidence. Emergency room visits were also undertaken for weighing before and after visits in an attempt to gather evidence. Then we had the allegations of Attachment Disorder in play therapy without both parents being involved in the therapy.
Now we have allegations that there have been . . . eight referrals to D.H.S.; all unsubstantiated and the last referral, as previously indicated, came the day after the Court cautioned the parents and grandparents that the conduct has to change. D.H.S. did a local investigation and used the services of David Breyer who the Court finds is one of the best evaluators I have witnessed both in private practice and on the bench.
* * *
His conclusions indicate . . . Mother is overly nurturing, in fact, I feel she is smothering the child
* * *
The Court is mindful that Mother has a medical background and both maternal grandparents do as well. . . . The Court finds that the medical knowledge has been misdirected by attempting to limit Dad’s ability to parent and alienate the daughter’s affection toward him.
Also, the Court finds that Mother self reports to people she knows are mandatory reporters and the Court finds that they are being used as agents of the mother.
* * *
The severity of the allegations to D.H.S. have increased. There is constant doctoring. The one report indicated there were 120 events that have been charged to Blue Cross Blue Shield. [My emphasis]
Parents need to be mindful that courts are wise to their tranparent attempts to create an appearance of abuse where there is none. Too often parents use false reports of abuse. In one such case, a trial court was particularly responsive and not only refused to allow the mother a change in her parnetning time, but also awarded the father sole legal custody and entered an order specifically prohibiting the mother from taking the parents' autistic child to any medical, pyschological treater, and to any treatment center except in the event of a life-threatening injury.
O’Farrell v O’Farrell, Docket No 303962, Decided November 20, 2012 [Unpublished] Download O'Farrell_v_O'Farrell |
DE 10 2004 030 428 A1 has already disclosed a solenoid valve, comprising an armature for actuating a valve closing element, actuated in opposition to the action of a return spring, in a tubular valve housing, which comprises a fluid passage in a tubular valve seat element, which on energizing of the armature is closed by means of the tappet-shaped valve closing element, wherein the valve closing element extends inside a through-bore in the valve housing through the helical return spring, which is supported on an end face of the valve seat element formed in the area of the through-bore. The valve seat element is fixed in the valve housing by means of a press-fit and carries a filter element on its end face remote from the return spring.
However, that the valve closing element is subjected not only to the permanent action of the return spring but also to the action of the hydraulic pressure prevailing in the valve housing, which can lead to an inadmissible axial displacement of the valve seat element in the valve housing.
The background description provided herein is for the purpose of generally presenting the context of the disclosure. Work of the presently named inventors, to the extent it is described in this background section, as well as aspects of the description that may not otherwise qualify as prior art at the time of filing, are neither expressly nor impliedly admitted as prior art against the present disclosure. |
“I believe I’m at the biggest and best club” - the words of Graham Arnold when taking over Sydney FC in May 2014.
Arnold wasn’t the first to invoke that hyperbole at one of the A-League’s so-called glamour clubs, and he surely won’t be the last.
But if Sydney FC truly are a “big” club, then they must expect and accept the attention - good, and bad - that goes with that label.
As things stand, the Sky Blues are 7th in the Hyundai A-League table. True, it’s the first time under Arnold’s reign that they have slipped out of the finals spots, and it was only last May they came within one game of being champions.
Round 21 Visit Match Centre Visit Match Centre Visit Match Centre Visit Match Centre Visit Match Centre
But that was then, this is now - and a win over Guangzhou Evergrande, as thrilling as that was, won’t be enough to paper over the cracks of an increasingly brittle domestic campaign, if results don’t improve.
Arnold doesn’t like the scrutiny his team is under at the moment. He won’t like this article. That’s unfortunate - but it can’t be helped. He feels it’s unfair to magnify Sydney’s failings when - for example - his old club, Central Coast Mariners, are enduring a woeful season, with just three wins all season.
Arnold believes there is a campaign to drive him out of Sydney FC. There isn’t. I haven’t heard one call for his dismissal from any fan, or facet of the media - perhaps for the simple reason that last year (and his previous triumphs) have bought him some time. Generally, Arnold is well-liked, respected, and has enough credit in his coaching bank to have earned him the right to try and correct the current malaise.
“Sack Arnie” agendas? Laughable. The truth is, Arnold has more friends in the Australian media than most. That’s not a slight against the Sydney FC coach - it’s a smart move on his behalf to have made allies down the years. Terry Venables and Harry Redknapp cultivated similar relationships in England. Relationships which - in the end - every coach needs when the inevitable sticky patch arrives. Just ask Holger Osieck.
Graham Arnold. Source: Getty Images
But not every journalist can be expected to toe the party line. The fans who pay their money every week want answers, and the media is duty bound to critique a self-styled big club, when it is underperforming.
There are mitigating factors to Sydney’s slide this season. The departure of Marc Janko and Bernie Ibini stand out - as does the mid-season flight of Alex Gersbach, plus the injury to the influential Alex Brosque. Filip Holosko hasn’t quite worked out as hoped as marquee, while Milos Dimitrijevic has been a shadow of the player who cleaned up at the clubs annual awards last season.
There have been plus points too. Especially the emergence of Brandon O’Neill, the improvement in Matt Jurman, and the gradual increase in output of Milos Ninkovic.
Still, the fact remains that Sydney are winless in seven, and have beaten only three clubs this season - Newcastle Jets (three times), Central Coast Mariners (twice), and Western Sydney Wanderers (twice). Four of those wins came by virtue of goals in the 82nd minute or later, and the team hasn’t won outside New South Wales all season.
But Sydney FC isn’t having any of it - insisting they are the victims of an insidious campaign.
The persecution complex started in earnest in late January, when they bristled at criticism of their defensive display in the Big Blue loss to Melbourne Victory. The club circulated an email the following week, containing statistics that proved (in their opinion), that the Sky Blues had actually played an attacking game - by courtesy of having made more entries into the final third, more penalty area incursions, and a similar number of total passes (to Victory) in the opposition half.
Personally, I didn’t buy it - but whatever. Defensive tactics are a perfectly legitimate part of the game, so long as it achieves the desired result - and therein lies the problem.
Sydney are not achieving results, even though they have tempered their approach of late. They will point out that, in their last seven games, they have played Victory (twice), Brisbane, City, Perth, Adelaide and Wanderers - all of the clubs now currently above them in the top six. They have lost only two of those games by more than one goal, and they should have beaten the Wanderers a third time. Fine margins and all that.
But that’s the measure of a big club - you have to win. Sydney FC hasn’t won a title in six years.
Imagine the coach of Manchester United or Real Madrid (some of the clubs that Sydney FC have been compared to - in an Australian context - down the years), being let off lightly with a similar record? No chance.
Constructing conspiracy theories simply doesn’t wash - the media doesn’t put rosters together, select the team, or pick the tactics.
That job belongs to Graham Arnold. At the Mariners, where the spotlight is less intense, a bad run doesn’t draw the same attention. At Sydney FC, they must live with that pressure.
The next two games - against Wellington and Central Coast Mariners - shape as season-defining, and with Roar, Adelaide and Perth to come after that, they must win both.
Because when the heat is on, that’s what big clubs do. |
from minpiler.std import M, L, use_object, util
unit = use_object()
flag = M.math.floor(M.math.rand(100000000))
initialized = use_object()
while True:
unit = util.get_uncontrolled_unit(unit, flag, M.at.UnitType.quasar)
if unit is not None:
M.unit.bind(unit)
M.unit.boost(1)
if unit != initialized:
if M.unit.within(M.at.const.thisx, M.at.const.thisy, 7):
initialized = unit
else:
M.unit.move(M.at.const.thisx, M.at.const.thisy)
else:
cap = M.at.const.unit.itemCapacity
if M.at.const.unit.totalItems < cap:
found, x, y = M.unit.locate.ore(L.sorter1.config) # type: ignore
if found:
M.unit.approach(x, y, 7)
M.unit.mine(x, y)
else:
M.unit.approach(L.container1.x, L.container1.y, 7)
M.unit.item_drop(L.container1, cap)
|
Heritage Foundation security and defense expert James Jay Carafano tells Newsmax the Obama administration is so eager to declare victory in the war on terror that it is “putting its head in the sand” and ignoring the rapid growth of non-al-Qaida terrorist groups.
Carafano, the vice president of foreign and defense policy studies at the conservative think tank, praised the post 9/11 homeland security effort as “very effective,” citing some 54 instances where attacks and bombings targeted the United States were thwarted. But he criticized the administration for downplaying the war on terror as if it already had been won.
In an exclusive Newsmax.TV interview on Friday, Carafano charged the administration has “defined their way out of the problem” by focusing only on al-Qaida.
“They’ve basically said that we are fighting al-Qaida central, and al-Qaida and its affiliates, where people are actually planning operations to attack the U.S. or its allies. Well, when you define the enemy that way, you look pretty good.
Carafano, a leading U.S. national security expert, advised it is simply too soon to save what motivated the two bombing suspects to commit their horrific act. But he said authorities are leaving no stone unturned in an effort to determine if someone radicalized the youths and encouraged them to commit the bombing, perhaps via the internet.
“Pretty much the No. 1 one tool of global radicalization is the internet,” he said. “Now, we use the internet to buy stuff on Amazon.com. But to terrorists, it’s their most important asset.
Carafano tells Newsmax that al-Qaida actually produces an online magazine called Inspire instructing Westerners how to commit acts of terrorism against Americans.
Carafano was critical of the quality of the mainstream media’s coverage of the attacks, which at several key points conveyed inaccurate information to the public. And he praised the national-security infrastructure for stopping as many terrorists attacks as it has.
Heritage Foundation security and defense expert James Jay Carafano tells Newsmax the Obama administration is so eager to declare victory in the war on terror that it is putting its head in the sand and ignoring the rapid growth of non-al-Qaida terrorist groups. |
from celery import Celery
from flask import Flask
flask_app = Flask(__name__)
flask_app.config.update(
CELERY_BROKER_URL='redis://localhost:6379',
CELERY_RESULT_BACKEND='redis://localhost:6379',
SECRET_KEY='top-secret!'
)
def make_celery(app):
celery = Celery(
app.import_name,
backend=app.config['CELERY_RESULT_BACKEND'],
broker=app.config['CELERY_BROKER_URL']
)
celery.conf.update(app.config)
class ContextTask(celery.Task):
def __call__(self, *args, **kwargs):
with app.app_context():
return self.run(*args, **kwargs)
celery.Task = ContextTask
return celery
celery = make_celery(flask_app)
@flask_app.route('/', methods=['GET'])
def index():
from rspm.third_library.whatweb.wrapper import WhatWeb
scanner = WhatWeb()
result = scanner.scan("https://csdn.net")
print(result)
return result
if __name__ == '__main__':
flask_app.run(debug=True)
|
package com.lambdasys.unidadefederativa.api;
import java.io.Serializable;
import java.util.List;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
import org.eclipse.microprofile.rest.client.inject.RegisterRestClient;
@RegisterRestClient
public interface UnidadeFederativaResource extends Serializable {
@GET
@Produces(value = MediaType.APPLICATION_JSON)
@Path("/unidadesfederativas")
public List<UnidadeFederativa> findAll();
@GET
@Produces(value= MediaType.APPLICATION_JSON)
@Path("/unidadesfederativas/{id}")
public UnidadeFederativa findById( @PathParam("id") Long id );
}
|
Dense Matter in Compact Stars: Theoretical Developments and Observational Constraints We review theoretical developments in studies of dense matter and its phase structure of relevance to compact stars. Observational data on compact stars, which can constrain the properties of dense matter, are presented critically and interpreted. INTRODUCTION Since their discovery almost 40 years ago, neutron stars have been recognized as promising laboratories for studying matter under extreme conditions. All the ambient conditions that characterize these objects tend to the extreme. The typical density inside a neutron star is comparable to the density inside nuclei, nuclear = 2.5 10 14 g cm −3, which corresponds to a number density of baryons n B ≃ 0.15 fm −3. Under these conditions the nucleons are very degenerate, with a typical Fermi momentum k F ∼ (3 2 n B ) 1/3 in the range of 300-600 MeV. In the interior, the interaction energy of the nucleons is several times larger than their Fermi degeneracy energy E F = k 2 F /2m ∼ 60-150 MeV. The baryon chemical potential, which is defined as the energy needed to introduce one unit of baryon number, can be 500-1000 MeV larger, at the center of the star, than the rest mass of the nucleon. There the internucleon distance becomes comparable to their intrinsic size of 1 fm. Under such extreme conditions several new states of matter, containing new high-energy degrees of freedom, may be favored. Novel hadronic phases -which contain, in addition to nucleons, a Bose condensate of pions or kaons, and/or hyperons -have been proposed and studied in some detail. There is also the exciting possibility that the core may contain deconfined quark matter, much like the quark-gluon plasma being probed in terrestrial experiments such as the Relativistic Heavy Ion Collider (RHIC). Although model descriptions of these various phases are still in rudimentary form, in several cases qualitative differences between the exotic phases and nuclear matter have been identified. A quantitative understanding of these trends and, in particular, how they could affect observable aspects of compact star structure and evolution is an exciting area of research. A primary purpose of this review is to convey some of this excitement in a pedagogic style to make it accessible to graduate students. This review is not intended to be a comprehensive account of all recent developments. Instead, we have chosen topics that serve (in our view) as pedagogic tools that help illustrate key concepts of relevance to dense matter in compact stars. Figure 1 illustrates a theoretician's view of a neutron star. The environment just outside the star, characterized by large magnetic (and electric) fields and temperatures, is extreme from plasma physics point of view. At the surface of the star we expect a very thin atmosphere composed of hydrogen and in some cases perhaps a mix of heavy elements, or even a condensed magnetic surface. This surface is of utmost observational importance because it allows us to determine the temperature, and possibly the radius, of the neutron star, an issue we describe in Section 2.2. An envelope exists just below the atmosphere where matter is not yet fully degenerate and, with a thickness of a few tens of meters, it acts as a thermal insulator between the hot interior and the surface. The outer 500-1000 meters of the star, its crust, contains nuclei, forming a lattice immersed in a quantum liquid of neutrons, most probably in a superfluid state (Figure 1, insert A). Owing to the rotation of the star, the neutron superfluid forms vortices, which very likely pin on the nuclei and participate in the glitches. As we go deeper into the crust, when approaching nuclear density there is a first-order phase transition from the inhomogeneous regime of the crust to the homogeneous core. In this regime, termed the nuclear pasta ( Figure 1, insert B), as density grows, nuclei are increasingly elongated (the spaghetti phase), then form bidimensional structures (lasagnas) -with the space between them still filled by the superfluid neutrons -untill the geometry inverts in the Swiss cheese phase, where bubbles of neutrons are immersed in the almost homogeneous neutron+proton liquid. The physical reasons for the peculiar aspect of this phase transition are elucidated in Section 4.4. At a density of approximately 0.6 nuclear, we enter the core where the neutron superfluid coexists with the proton fluid, which is probably a Type II superconductor that reacts to the neutron star magnetic field by confining it into fluxoids ( Figure 1, insert C). A few kilometers below may be the inner core, marked in Figure 1 by "?". This question mark is the focus of this review. In Section 2 we present a selected set of observational constraints. These include mass (Section 2.1) and radii (Section 2.2) measurements that probe the equation of state (EoS), i.e., the high-energy properties of dense matter. Measurements of temperatures and thermal luminosities of isolated neutron stars and accreting ones in binary systems are presented in Section 2.2 and Section 2.3, respectively. These observations provide information about the structure of the stars, i.e., the EoS, but also about the low-energy response functions, such as specific heat, transport properties, and neutrino emission, which probe the lowenergy (keV-MeV) excitation spectrum. In Section 3 we discuss the essential physics of dense nuclear matter and describe the current state of model calculations of the nuclear EoS and low-energy response. In Section 4 we describe theoretical models for phase transitions to novel states of matter such as Bosecondensed hadronic matter, hyperonic matter, and quark matter at supranuclear density. Section 5 confronts theoretical models of neutron star structure and of evolution derived from the studies described in the two previous sections with the observational baggage acquired in Section 2. Finally, we offer some conclusions in Section 6. OBSERVATIONAL CONSTRAINTS Among the more than 100 billion stars in the Milky Way, almost 1% of them (∼ 10 8 -10 9 ) are expected to be compact stars. Nevertheless, the number of known compact stars is of the order of only a couple thousand, the majority being radio pulsars (more than 1500) and the rest comprising approximately 300 accreting stars in binary systems, plus a few dozen single stars that do not show any pulsed radio emission and were discovered through their high-energy emission. Mass Measurements The masses of almost 40 neutron stars in binary systems have been measured, and the results are summarized in Figure 2. The major source of uncertainty usually is the inclination of the orbit with respect to the line of sight, and it is only in highly relativistic systems that general relativistic effects allow for a complete characterization of the orbit. The binary systems exhibited in Figure 2 are divided into four classes. The binary pulsar systems contain a radio pulsar whose companion is either a neutron star (PSR+NS) or a white dwarf (PSR+WD). In the PSR J0737-3039A and B system, both neutron stars are detected as radio pulsars, whereas in the three systems with the companion marked by "?" a white dwarf could be present instead of a second neutron star. The other two classes are X-ray binaries in which the compact object is accreting matter from its companion. In a high-mass X-ray binary (HMXB) the companion is a massive star, M c > 10M ⊙, whereas in a low-mass X-ray binary (LMXB) the companion's mass is below 1M ⊙. In a HMXB, once the companion ends its life in a core collapse supernova, producing a second neutron star, the system may be disrupted or remain bound, in which case it will most probably emerge as a PSR+NS binary system. A LMXB system will end its life once accretion stops, the companion's mass having been severely reduced, and the system will emerge as a PSR+WD system. Given the short lifetime of massive stars, accretion in a HMXB lasts at most a million years, whereas in a LMXB it can last for billions of years. Because accretion is and, with updated measurements of PSR J0751+1807 from Reference, PSR J1713+0747 from Reference, PSR J1748-2446I & J from Reference, and PSR J1909-3744 from Reference. Values for the high-mass X-ray binaries (HMXBs) Cen X-3, SMC X-1, and LMC X-4 are from Reference, Her X-1 from Reference, Vela X-1 from Reference, and 4U 1538-52 from Reference. Values for the low-mass X-ray binaries (LMXBs) Cyg X-2 is from Reference, Cen X-4 from Reference, X1822-371 from Reference, and XTE J2123-058 from Reference. The two objects plotted as crosses instead of dots, the HMXB 4U 1700-37 and the LMXB 2S 0921-630 are blackhole candidates, but may be neutron stars. Error bars are 1 errors. limited by the Eddington rate Edd ≈ 10 −8 M ⊙ yr −1, the compact stars in HMXBs, and consequently also in PSR+NS systems, have a mass very close to Figure 3: The initial mass function of neutron stars as predicted by stellar evolution theory. The continuous line shows results from Reference and the dotted line is adapted from Reference. their original birth mass, whereas in LMXBs, and consequently in PSR+WD systems, the neutron star's mass may have increased significantly. Stellar evolution theory predicts that stars with a main sequence mass below ∼ 18-19M ⊙ will produce, when ending their life in a core collapse supernova, proto-neutron stars with masses between 1.2 to 1.5M ⊙, whereas more massive ones will produce remnants with masses ∼ 1.7-1.8M ⊙, or a black hole. Fallback during the supernova explosion may alter this. In Figure 3 we plot two expected initial mass functions of neutron stars. These theoretical theoretical initial mass functions are in good agreement with the measurements shown in Figure 2, where PSR+NS masses are compatible with all PSR+NS in the range of 1.2-1.5M ⊙ as well as HMXBs, with the possible exception of Vela X-1, which may be a representative of the second predicted peak at 1.7-1.8 M ⊙. More massive neutron stars are expected in LMXBs and their PSR+WD offsprings, which is confirmed by the very recent high-mass measurement of PSR J0751+1807. Elementary modeling of Neutron Star Thermal Emission The radiation from a point at the surface of the star is specified completely by the specific intensity I( m, E), defined such that I( m, E)d dE dA dt is the energy radiated during a time dt from an area dA as photons of energy in the range , which are emerging in the direction m within a solid angle d. If the radiation is polarized, e.g., in presence of a magnetic field, one may separate I into two components corresponding to the two polarization modes. By integrating over d in the outward direction, one obtains the specific, or spectral, flux F (E) = d cos I( m, E), where is the angle between m and the outward normal n to the stellar surface. Then the flux F = dE F (E). The star's luminosity is then obtained by integration of F over the whole stellar surface, L = dA F = 4R 2 F, where F must an average value over the stellar surface in case it is not uniform. For blackbody emission and a uniform surface temperature T, we have F = SB T 4 and hence L = 4R 2 SB T 4. For nonblackbody or/and a non-uniform emission, it is custumary to express the luminosity as L = 4R 2 SB T 4 e, a relation that simply defines the star's effective temperature T e. Owing to gravitational redshift, a photon emitted at energy E will be detected by an observer at infinity with energy E ∞ = e E where (for a non-rotating star in the Schwarzschild metric) The luminosity L ∞ = e 2 L is redshifted twice because it has both energy and time content. Similarly, the fluxes at infinity are. A blackbody at temperature T, once red-shifted, would still be seen as a blackbody, but one at temperature T ∞ = e T ; hence one also defines the redshifted effective temperature T e ∞ ≡ e T e so that The radius at infinity has to be R ∞ = e − R for consistency, but also has the physical interpretation that an observer able to resolve the angular size of the star would see it as a disk of radius R ∞ > R. Observers commonly summarize the results of an observation's analysis by giving values of T e ∞ and R ∞. Note that a realistic atmosphere spectrum of temperature T e does not redshift into the spectrum at temperature T e ∞, so that the redshift is in principle measurable from spectral fits. Unfortunately, the luminosity L ∞, or the flux f ∞ = L ∞ /4D 2 = (R ∞ /D) 2 F ∞, with D as the star's distance, is not directly observable because of interstellar absorption. The probability for a photon of energy E to be absorbed by ionizing an atom in the interstellar medium is exp, where N H is the hydrogen column density and eff (E) is the effective cross section. The quantity that is actually observed, the star's spectrum, is hence the specific flux f obs (E ∞ ), given by in terms of the specific flux F (E) as emitted by the stellar surface. An example of eff (E), exhibiting its overall E −3 dependence, is shown in Figure 4, as well as the effect of a range of values of N H on the observable blackbody spectrum of a neutron star. Considering that most observed cooling neutron stars have temperature of the order of 10 6 K or lower and that a T = 10 6 K blackbody has its peak at 0.28 keV, one sees that most photons are absorbed unless the star is quite close, i.e. N H ≤ 10 20 cm −2. A spectral fit of f obs with a blackbody model involves only three parameters, T ∞, N H, and R ∞ /D, because using the blackbody spectrum F BB for F in Equation 3 yields e 3 F BB (T ; E) = F BB (T ∞ ; E ∞ ), where e drops out. For blackbody emission T ∞ and N H essentially determine the shape of the observed spectrum and R ∞ /D is a scaling factor. Having determined T ∞ and N H, if the distance D and. is known, one obtains a measurement of R ∞. A reliable measurement requires, however, that the real spectrum F (E) does not differ too much from that of a blackbody. Models of The Neutron Star Surface The assumption that the locally emitted spectrum F (E) is a blackbody is very dubious. Romani was the first to show that that deviations of F (E) from F BB (E) can be very large when the neutron star surface is an atmosphere. The extent of the deviations depends strongly on the chemical composition, which, unfortunately, is unknown. For a light-element composition, H or He, the opacity decreases strongly with photon energy, ∼ E −3, and high-energy photons emerge from deeper, and hence hotter, layers: The resulting spectrum has a strong excess compared with a blackbody's Wien tail and a strong deficit in the Rayleigh-Jeans regime, as can be seen in the examples shown in the left panel of Figure 5. For an atmosphere composed of heavier elements, the opacity energy dependence is not as strong and the emerging spectrum is closer to a blackbody than in the case of light elements, but presents numerous absorption lines, as, e.g., in the Fe atmosphere spectra shown in Figure 5. In the presence of a strong magnetic field, as is the case for all isolated cooling neutron stars, the specific intensity becomes strongly anisotropic and the resulting spectra are, usually, intermediate between a blackbody and the corresponding nonmagnetized spectra. Examples of magnetized H atmosphere spectra are shown in the right panel of Figure 5. Given the strong increase in electron binding energies in the presence of a strong magnetic field, it is also possible that the neutron star surface may be in a condensed state, i.e., a liquid or a solid. Models of the spectrum emitted by such a surface have been presented only recently, and for some specific field strength and chemical composition, it may simulate a blackbody in the observable energy range of 0.1-1 keV. The presence of a strong magnetic field is not only felt at the surface, but also felt much deeper in the star's crust where heat from the interior is transported to the surface by electron conduction. Thermal transport transverse to the field is strongly suppressed so that heat essentially flows along the magnetic field lines, resulting in a nonuniform surface temperature distribution. When ob- serving the neutron star thermal radiation in the X-ray band, one preferentially detects photons from the warm regions of the surface. If extended cold regions are present at the surface, they may nevertheless dominate the thermal flux at low energies, i.e., in the optical range. Examples of possible surface temperature distributions owing to the anisotropic thermal conductivity are illustrated in Figure 6. Temperature and Thermal Luminosity Measurements of Isolated Cooling Neutron Stars The very brief description presented above gives a flavor of what is involved in interpreting observational data about cooling neutron stars and what is needed to obtain reliable measurements of their temperatures, thermal luminosities, and, we hope, radii. We summarize in Figures 7 and 8 the best presently available results. All objects shown as PSR are active pulsars and have a very energetic magnetosphere -which is a copious X-ray, and in some cases also -ray, emitter -producing a power-law spectrum superposed on the thermal emission. Separating the surface thermal emission from this magnetospheric emission is not a trivial task. For the Crab pulsar, PSR B0531+21 and PSR J1124-5916, the magnetospheric emission is so strong that the thermal emission is undetectable and only upper limits on the latter are obtainable. In the similar case of PSR J0205+6449 in 3C58, the thermal emission is barely detected as a slight correction to a pure power-law spectral fit. RX J0007.0+7302 in the supernova remnant CTA1 manifests itself as a point source with a power-law spectrum embedded in a pulsar wind nebula, and no pulsations have been detected to date: The thermal emission is also undetected and hence only an upper limit on it is possible. The four putative objects in Figure 7 marked "?" come from a deep search for central objects in these four supernova remnants : No compact object has been found and the quoted upper limit is very restrictive if the compact object is a neutron star, but the latter may also be a black hole. For the stars in which the thermal spectrum is clearly detected, the dilemma is in the choice of the theoretical spectrum to be used in the fits. In the results shown in Figure 8 we distinguish between spectral fits performed with blackbody and those performed with magnetized hydrogen atmosphere models, depending on the resulting R ∞ estimate. Blackbody fits give higher T e ∞ and smaller R ∞ than atmosphere model fits: An atmosphere model fit is chosen when a blackbody fit gives too small an R ∞, and a blackbody fit is chosen when an atmosphere model fit gives a much too large R ∞. Moreover, in several of the cases where the atmosphere model fit is prefered, the blackbody fit is statistically unacceptable. Considering, moreover, that only the hottest part of the star may be detected in the X-ray band, the deduced temperature may not be T e ∞, as defined in Equation 2. The values of L ∞ we report in Figure 8 take into account this ambiguity and are probably more reliable observational values to compare with theoretical cooling calculations than T e ∞, a point of view we adopt in Section 5. The two stars RX J1856.5-3754 and RX J0720.4-3125 are two of the Magnificent Seven X-ray dim isolated neutron stars, all of which share the peculiarity that the blackbody fit of their X-ray thermal spectra results in R ∞ much smaller than 10 km. However, optical detections show in several cases a clear Rayleigh-Jeans spectrum corresponding, when fitted with a blackbody, to a much lower temperature than the X-ray-detected spectrum and to a much larger emitting area. They may be candidates for neutron stars having strong internal toroidal fields, as illustrated in the right panel of Figure 6, and reinforce the case that, to date, radii inferred from spectra of strongly magnetized neutron stars are unreliable. For example, RX J1856.5-3754 was proposed as a candidate for a quark star, but the optical data do not support an anomalously small radius. Radius Measurements of Neutron Stars in Quiescent Low-Mass X-ray Binaries It is clear from the previous discussion that measurements of neutron star radii through fits of their thermal spectra will give reliable results only if the following three conditions can be met: (a) The chemical composition of the atmosphere is known, (b) the magnetic field is small enough, < 10 10 G so as to not affect the spectrum, and (c) the star's distance can be accurately measured. Quiescent LMXBs in globular clusters fulfill these conditions. Owing to past accretion, H is present and, because of the strong gravity, sedimentation assures that it will float to the surface, whereas heavier elements will sink, resulting in a pure H atmosphere. As in all LMXBs magnetic fields are negligibly small and globular clusters, containing ∼ 10 5 -10 6 stars, have distances that can be measured with an accuracy of 5-10%. A possible drawback is that, despite the system being quiescent, it is not yet possible to exclude that accretion is still occurring at a very low rate, adding constantly to the atmosphere heavy-elements that significantly alter the spectrum. Being in themselves very interesting systems, many globular clusters have been, and are still observed by CHANDRA and XMM-Newton, which found more than 20 quiescent LMXBs (qLMXBs), and more candidates are constantly being detected. The radii of the two qLMXBs in globular clusters Centauri and M13, which show purely thermal spectra, have been recently estimated as R ∞ = 13.6 ± 0.3 km and R ∞ = 12.8 ± 0.44 km, respectively. However, the fits were realized with a library of H atmosphere spectra from Reference which were calculated at fixed surface gravity g s = 2.43 10 14 cm s −2. Because R ∞ and e are parameters of the spectral fit (Equation 3) and independent variations of them are equivalent to independent variations of M and R, a self-consistent spectral fit requires a set of model atmosphere spectra F for the whole range of fitted g s. Heinke et al. recently showed that such a self-consistent analysis leads to significant changes in the deduced R ∞, and we show in Figure 15 their results for the qLMXB X7 in the globular cluster 46 Tucanae. Transiently Accreting Compact Stars The soft X-ray transients (SXRT), also known as X-ray novae, are a class of LMXBs in which accretion is not continuous. They present repetitive phases of high accretion separated by periods of quiescence. During outbursts they reach very high luminosities, L o ∼ 10 37 -10 38 erg s −1, whereas in quiescence the luminosity drops by many orders of magnitude, L q ≤ 10 33 erg s −1. The typical duration of an outburst, t o, is much shorter than the recurrence time between outbursts, t r. A common interpretation of this bimodal behavior is the disk instability model that was developed to explain dwarf novae, which are similar systems in which the accreting star is a white dwarf, and extended to SXRTs : The companion star is losing mass at a very low rate and feeding a disk that becomes periodically unstable and empties rapidly onto the central star, producing a spectacular outburst. Many SXRTs contain black hole candidates, but in some cases Type I X-ray bursts occur and we can be certain that the accreting object is a compact star. Below we consider only the systems that are known to contain a compact star. During the quiescent phases between outbursts, many of the SXRTs show a thermal X-ray spectrum corresponding to surface temperatures of the order of 10 6 K: We see the surface of the neutron star that has been heated during the accretion phase(s). Given the high internal temperature of the star, the heat released in the upper layers by accretion and thermonuclear burning flows back to the surface, because of the large temperature gradient in the envelope, and is radiated away. Non-equilibrium processes inside the star, induced by the compression of matter because of accretion, release heat that is stored in the stellar interior and that leaks out slowly when accretion stops. Such processes certainly occur within the crust: Iron-peak nuclei produced by thermonuclear burning at the surface, when pushed to higher densities, undergo a series of reactions -electron capture, neutron emission and pycnonuclear fusion -until they dissolve into the star's core. Overall, an amount of heat, Q nuc ≈ 1.5 MeV, is released in the crust for each accreted baryon. Brown et al. showed that this energy is more than sufficient to explain the observed quiescent (thermal) luminosities L q in terms of the time-averaged accretion rate, taken over many accretion cycles, < >: where the coefficient f represents the fraction of Q nuc which is actually stored in the stellar interior, i.e., not lost by neutrino emission. The luminosity L o during an accretion outburst can be estimated as L o ≃ (∆M/t o )(GM/R), where ∆M is the mass accreted during the outburst, and similarly, < > can be estimated as ∆M/t r. From Eq. 4 one obtains where the 100 to 200 range corresponds to the possible range of 2GM/Rc 2 from ∼ 0.33 to ∼ 0.66, or Note that L o /L q is independent of the source's distance, which is often poorly constrained, whereas t o and t r can in principle be obtained directly by monitoring the source for a long enough time. We present in Table 1 a list of SXRTs for which enough information is available to deduce, from Equation 6, an estimate of the storage efficiency f. The six systems in the upper part of the Table have been detected in outburst many times, so estimates of t o and t r are reliable, whereas the four cases in the middle of the table have exhibited only two or three outbursts and for which estimates of f have to be taken with caution. For the five systems in the lower part of the table, only one outburst has been detected, t o is poorly constrained, and t r is at best a guess. Thus we give for f an entry of t r /number-of-years, i.e., a best estimate of the right side of Equation 6. However, for four of these five systems, an f of order one would require very long recurrence times t r, which are well beyond what the disk instability model can accommodate. However, the validity of the storage efficiency f rests on the assumption that the neutron star has reached a quasi-stationary state, which requires fairly regular recurrent accretion for at least ∼ 10 4 yrs, a time scale far beyond present coverage of the sources ! Notwithstanding these limitations, it is striking that in most cases the storage efficiencies seem to be quite low and seem to indicate the occurrence of strong neutrino losses. Other Possible Observational Constraints The above selection of observational constraints is of course incomplete and reflects our bias in judging relevant and promising probes of the physics of the inner core. We very briefly mention here some other possible constraints. Quasi-periodic oscillations at millisecond periods observed in several LMXBs may constrain the mass and radius of the neutron star, but interpretation of the origin and nature of these phenomena is still controversial and inconclusive. Detection of atomic absorption lines in the thermal spectrum of a neutron star can directly yield the gravitational redshift due to the star when the line is identifiable. The identification can be secure only for stars with a magnetic field sufficiently weak so that it does not affect the atomic structure. Candidates for these measurements are the neutron stars in LMXBs, and there is, to date, only one case of such detection: Fe XXVI and XXV for n=2-3 and O VIII for n=1-2 transitions in the burst spectrum of EXO 0748-676, which results in a redshift of z = 0.35, i.e., e − = 1.35 1. Also, in Type I X-ray bursts from LMXBs, fits of model spectra to the observed burst spectra can potentially measure both M and R, but model spectra do not seem to be accurate enough to produce reliable and reproducible results. The recently discovered superbursts are interpreted as the step following the normal short Type I X-ray bursts, which are due to explosive burning of 4 He into 12 C at the surface of an accreting neutron star in a LMXB, which consists of explosive thermonuclear burning of the accumulated layer of C into iron-peak nuclei. However, reproducing the observed properties of these superbursts imposes very tight constraints on the temperature in the crust of these stars and on the neutrino emission from the core. In isolated neutron stars, two more constraints emerge. From the observations of glitches in several young radio pulsars, Link et al have deduced, under the commonly accepted scenario that these involve differential rotation of the neutron superfluid in the inner crust, that the relative moment of inertia of the crust is at least 1.4% of the total stellar moment of inertia. The second constraint comes from the recently discovered pulsar PSR J1748-2446ad, the fastest known pulsar to date with a period of 1.39 ms. For a given assumed mass, it imposes the stringent limit that its equatorial radius must be smaller than the mass-shedding radius, at which point the centrifugal force becomes larger than gravity. Both results translate into M versus R constraints, which we use in Section 5.1 and Figure 15. DENSE NUCLEON MATTER Over the past decade there have been numerous attempts to compute the bulk properties of nuclear and neutron-rich matter. These include microscopic manybody calculations using realistic nucleon-nucleon potentials and phenomenological relativistic and non-relativistic mean-field theories. The former approach employs a potential that provides an accurate description of the measured nucleonnucleon scattering data and uses variational or quantum Monte Carlo techniques to obtain the EoS (for a review of these methods, see Reference, and for recent progress in using variational techniques to obtain the EoS of relevance to neutron stars, see Reference ). Variational techniques rely on the parameterization the nuclear wave-function. Correlations induced by interactions between pairs of nucleons are incorporated through suitably parameterized paircorrelation operators, which act on the symmetrized product of the Fermi gas wave-function. In contrast, quantum Monte Carlo treatments such as the Green's function Monte Carlo are not limited by the form of the variational wave-function and can potentially include all possible correlations in the many-body system. This approach is, however, computationally intensive owing to the spin and isospin dependence of the nuclear interaction and grows roughly as 2 A A!/(N !Z!) where A, N, and Z are the baryon number, neutron number, and proton number, respectively. Although this method suffers, in principle, from the fermion sign problem, Carlson et al. have developed algorithms to minimize its influence and have calculated the ground-state energy of 14 neutrons in a periodic box. The other notable method that employs realistic nucleon-nucleon potentials is the Brueckner-Hartree-Fock (BHF) approach. Here the bare nucleon-nucleon interaction is used to determine the interaction energy for pairs of nucleons. The pairs are treated as independent particles, and correlations that arise because of Fermi statistics are incorporated. This method, developed by Brueckner, Bethe, and Goldstone, is nonperturbative in the coupling but utilizes a perturbative expansion in the number of independent hole lines to make it tractable. Recent studies have shown that the convergence of this expansion can be improved if the single-particle dispersion relation is obtained using the Hartree-Fock approximation. In the lowest-order Brueckner-Hartree-Fock (LOBHF) method, only the lowest-order terms in the hole-line expansion are retained. The LOBHF results are in fair agreement with those obtained using variational methods. The realistic nucleon-nucleon potentials are well constrained by a collection of scattering data compiled by the Nijmegen group. However, these potentials are not unique. The data probes the scattering for energies below the pion threshold of 350 MeV. Thus, various models for the nucleon-nucleon interaction with different short-distance behavior can be constructed to reproduce the elastic scattering phase shifts up to the measured energy. These different potentials are equivalent from the point of view of the low-energy data. Alternatively, lowenergy observables cannot be sensitive to the details of short-distance physics. Nonetheless, these potentials can differ in their predictions for the many-body system because additional effective three-, four-, five-, etc. body forces can be relevant. The strength of these many-body forces is not unique and depends on the assumed form of the two-body force at short distances. Fortunately, for the realistic two-body potentials employed to date, the addition of three-body forces is sufficient to describe light nuclei. Several groups developed an alternate description of nucleon-nucleon interactions based on effective field theory (for a review, see Reference ). This approach allows for a systematic treatment of the short-distance physics by organizing the calculation in powers of momenta and identifies a small expansion parameter at small momenta. It obviates the need to make specific assumptions about the short-distance potential. Bogner et al. have shown that a momentum-space potential with a rather small cutoff of ∼ 2 fm −1 can describe low-energy data. Furthermore, they have shown that all realistic models of the nucleon-nucleon potential evolve (in the renormalization group sense) to this form. Although it is not clear if these low-momentum interactions will make many-body calculations more tractable (or less non-perturbative), preliminary work is promising. In the traditional nuclear mean-field models, the relation to nucleon-nucleon scattering is abandoned in favor of a phenomenological interaction whose parameters are determined by fitting the model predictions (in the mean-field approximation) to empirical properties of bulk nuclear matter at nuclear saturation density. These mean-field models, such as the non-relativisitic Skyrme model and the relativistic Walecka model and its variants, can be viewed as approximate imple-mentations of the Kohn-Sham density functional theory, which has been widely studied and used in quantum chemistry and condensed matter physics. Empirical properties of bulk nuclear matter are extracted from the analysis of nuclear masses, radii, and excitations of large nuclei. These analyses reliably disentangle the nuclear bulk properties from surface and electromagnetic contributions. Without delving into the details and caveats of these analyses, we simply state the empirical properties for which there is broad consensus: (a) Nuclear saturation density (the density at which symmetric nuclear matter is bound at pressure P = 0) is 0 = 0.15-0.16 nucleons fm −3 ; (b) the binding energy per nucleon at saturation is 16 MeV; (c) the nuclear compression modulus defined through the relation K 0 = 9dP/d| = 0 is determined to be in the range of 200-300 MeV; (d) the nucleon effective mass at saturation density is 0.7 − 0.8 m N ; and (e) the nuclear symmetry energy defined through the relation, where x p is the proton fraction, is in the range of MeV. This empirical knowledge provides valuable constraints and guidance for models of dense nuclear and neutron-rich matter. A Simple Model for Nuclear Matter To provide a description of nuclei and nuclear matter, Walecka proposed a field-theoretical model in which the nucleons interact via exchange of scalar and vector mesons. The model has been refined and used extensively to study nuclear properties in the mean-field approximation (for a review, see Reference ). With the intent of providing a pedagogic overview of the various forces at play in dense nuclear matter, we describe briefly one such model introduced by Boguta & Bodmer (a more elaborate discussion of this model and its application to the structure of neutron stars can be found in Reference ). The model proposes that in dense matter, nucleons interact with effective shortrange forces. The Lagrangian is given by where m * N = m N − g N is the nucleon effective mass, which is reduced in comparison to the free nucleon mass m N owing to the scalar field, taken to have m = 600 MeV. The vector fields corresponding to the isoscalar omega and isovector rho mesons are given by The exchange of these mesons mimics the short-range forces between nucleons. In addition to the coupling between nucleons and mesons, a self-interaction between scalar mesons given by where b and c are dimensionless couplings, is introduced to obtain good agreement with the empirical value of the nuclear compressibility. N is the nucleon field operator and N is the nucleon isospin operator. The five coupling constants, g N, g N, g N, b, and c, are chosen, as in Reference, to reproduce five empirical properties of nuclear matter at the saturation density listed above. The model is solved in the mean-field approximation. Here, only the time component of the meson fields have nonzero expectation values. The symbols, and denote sigma-, omega-, and rho-meson expectation values that minimize the free energy given by nuclear ( n, e ) = 1 2 where are the neutron and proton single-particle energies. The single-particle energy at the Fermi surface defines the respective chemical potentials. In stellar matter these chemical potentials are related by the condition of weak interaction equilibrium (n + e ↔ e − + p) given by (note that we have set e = 0 since neutrinos are not typically trapped). Consequently, only n and e are independent and correspond to the two conserved charges, namely baryon number and electric charge. Furthermore, we require that bulk matter be electrically neutral. To enforce local charge neutrality, we require the charge density Q = ∂ nuclear /∂ e = 0, which uniquely determines e. Therefore, in effect only n is independent and the free energy or pressure as a function of n completely specifies the EoS of dense matter. Figure 9 shows the thermodynamic properties of charge-neutral stellar matter. The left panel shows the baryon chemical potential, electron chemical potential and the energy per particle as a function of the baryon density. The right panel shows the energy density as a function of the pressure and is usually referred to as the equation of state (EoS). This relation that is required to solve for the structure of the neutron star and determines the neutron star mass and radius. An EoS that has on average a larger (lower) pressure for a given range of energy density is termed stiff (soft). Different nuclear EoSs constructed to satisfy the empirical constraints at nuclear density and hence similar in this regime can differ significantly at lower and higher densities. In addition, the difference between symmetric nuclear matter EoS and the neutron-rich stellar matter EoS could be significant. This difference arises mainly because of the difference in the density dependence of the nuclear symmetry energy. The magnitude and the density dependence of the proton fraction in particular is sensitive to it. In the mean-field model considered here, the nuclear symmetry energy arises owing to the isovector force from the exchange of mesons, and its density dependence is linear. In contrast, in more sophisticated treatments that employ realistic nucleon-nucleon interactions and correlations beyond mean-field theory, the symmetry energy has non-trivial density dependence and its magnitude is typically smaller. These differences lead to two generic trends that distinguish variational treatments such as those reported in the work of Akmal, Pandharipande, and Ravenhall (APR) from the mean-field EoSs. First, the APR EoS for beta-stable matter is considerably softer than the mean-field EoS at low density and stiffer at high density. Second, the typical proton fraction at high density in the APR EoS is smaller than those predicted by the mean-field EoS. Both of these trends lead to important consequences for neutron stars. The differences in the low-and high-density behavior of the EoS, as we discuss in more detail in Section 5, lead to more compact (smaller radii) and more massive (larger maximum mass) neutron stars in the APR EoS. Because these differences can in part be attributed to differences in the density dependence of the symmetry energy, ascertaining this directly from experiment would be desirable. Recent work has emphasized that this can be extracted from precise measurements of the neutron skin of heavy nuclei and heavy-ion collisions. The lower proton fraction seen in APR suppresses neutrino cooling of the core. This occurs because the single-nucleon neutrino-producing reactions such as n → p + e − + e and e − + p → n + e, which are termed direct URCA reactions, are kinematically forbidden when the proton fraction is small. These reactions, which occur on the Fermi surface, can conserve momentum only if | k F n | ≤ | k F p + k F e | (termed the triangle inequality). Because electric neutrality requires that | k F e | ≃ | k F p |, it follows that the triangle inequality is difficult to satisfy when the proton fraction is small. A detailed calculation shows that the direct URCA reaction occurs when the proton fraction is x p = n p /(n n +n p ) ≥ 1/9. For smaller values of x p, the dominant neutrino-producing reactions involve two nucleons in the initial and final state to ensure momentum conservation. They include the charged-current processes nn → npe − e and np → ppe − e (and their inverse), which are termed modified URCA reactions, and the neutral-current processes such as nn → nn, which are termed neutrino bremsstrahlung. These two nucleon reactions rates are suppressed by a factor of (T /T F ) 2 in degenerate matter relative to the direct URCA reaction. They are roughly five orders of magnitude slower for the typical temperatures of interest in neutron star cooling (see Section 5). Nucleon Superfluidity and Superconductivity The low-energy excitation spectrum and the response properties of the dense nuclear matter play key roles in the dynamics and thermal evolution of compact stars. As we discuss in Section 5, young neutron stars cool primarily owing to neutrino emission from the core. Thus, phenomena such as superconductivity and superfluidity, which drastically alter the low-lying spectrum, become relevant even if their influence on the EoS is not significant. Much like electron pairs, via the Bardeen-Cooper-Schreiffer (BCS) mechanism in low-temperature solids, nucleons can pair because of their intrinsic attractive interactions in nuclei and in dense matter. The typical nuclear pairing energy, or gap, is of the order of an MeV. Because the ambient temperature inside neutron stars is ≪ MeV at all times subsequent to a few tens of seconds after their birth, nucleon pairing can dominate the low-energy energy response properties of the neutron star interior. In the neutron star crust, as nuclei become increasingly neutron rich, neutrons drip out of nuclei at a density of drip ≃ 4.3 10 11 g cm −3. At these low densities the neutron-neutron interaction is attractive in the 1 S 0 channel, leading to a large scattering length of a ≃ 20 fm. This attractive interaction destabilizes the Fermi surface and results in pairing and superfluidity. The pairing gap calculated using BCS theory is given by where M is the neutron mass, N = M k F / the density of states at the Fermi surface and V (k F ) the interaction potential evaluated at the scale of the Fermi momentum. Clearly, the BCS formula provides only an estimate and cannot reliably calculate the magnitude of the gap in the strongly interacting system. Nonetheless, Equation 13 illustrates several key trends seen in more sophisticated treatments (for a recent review of superfluidity in dense nucleon matter, see Reference ). First, at low density where the range of the interaction is unimportant and only the scattering length is relevant, we can write V (k F ) ≃ a/M. Inserting this into Equation 13 we see that at low density the gap increases with increasing density. At higher density the range of the interaction becomes relevant, the nucleon-nucleon interaction changes character and eventually becomes repulsive. Consequently, the gap will eventually decrease in magnitude and vanish. This trend results in a bell-shaped curve for the gap as a function of density. Detailed calculations discussed in Reference indicate that the neutron 1 S 0 gap reaches its maximum value ≃ 1 MeV when the neutron Fermi momentum k F n ≃ 0.8 MeV, and vanishes when k F n ≥ 1.5 fm. Deeper, inside the core, 1 S 0 proton superconductivity and neutron pairing in the 3 P 2 − 3 F 2 channel become possible. The typical behavior of the proton superfluid gap and its dependence on the proton Fermi momentum is similar to that of 1 S 0 neutron pairing because the interaction and the momentum probed are nearly identical. However, because the protons coexist and interact strongly with the dense neutron liquid, this changes their dispersion relation and consequently the density of states at the Fermi surface. In the simple mean-field models, this is encoded in the proton effective mass which is significantly reduced. Typically, this leads to a factor of 2-3 reduction in the maximum value of 1 S 0 proton gap relative to the 1 S 0 neutron gap. The situation with the 3 P 2 − 3 F 2 neutron pairing in the core is much less understood. Even simple BCS estimates that ignore any form of medium-polarization effects, such as induced interactions and particlehole renormalization of the bare potential, lead to vastly different predictions for this gap at high density because modern models of the nucleon-nucleon potential are not constrained by data at these larger relative momenta. This is further exacerbated when medium-polarization effects are taken into account. Earlier work indicated that medium-polarization could strongly enhance the gap, whereas a recent calculation indicates that a medium-induced spin-orbit interaction could lead to a large suppression. BCS pairing between nucleons leads to an energy gap ∆ in the excitation spectrum. Consequently, neutrino processes are exponentially suppressed when T ≪ ∆ owing to the paucity of thermally excited quasi-particles. This would lead us to naively conclude that nucleon superfluidity leads to an exponential suppression of the neutrino cooling rates in the core, where neutron and proton pairing is likely. However, in the vicinity of the critical temperature, the system is characterized by strong fluctuations as Cooper pairs form and break. These fluctuations give rise to a very efficient neutrino-emission process termed the pair breaking and formation, or PBF, process. In the BCS approximation for the response function, the PBF process emissivity turns on just below T c, grows as T decreases, and eventually becomes exponentially suppressed when T ≪ T c. For typical values of the 3 P 2 gap, one finds that at T ∼ 10 9 K, the PBF rate can be up to one order of magnitude higher than the modified URCA rate. Many-body correlations beyond those induced by the pairing play a role in determining the response properties, especially the weak interaction rates. The effects of these correlations on the neutrino emissivity are poorly known. For example, until recently the modified URCA reaction and neutrino bremsstrahlung reactions were calculated using a simplified one-pion exchange interaction in the Born approximation. This calculation has been revisited using soft radiation theorems and a more realistic and nonperturbative treatment of the nucleonnucleon interactions. These results indicate a suppression by a factor of 2-3 in the neutrino rates relative to the calculation in Reference. The effects due to medium polarization have also been investigated, but the findings have been inconclusive. Researchers suspect that medium-polarization effects through particle-hole correlations lead to a softening of the pion dispersion relation and are relevant for neutrino emissivity. In a model calculation, this softening has enhanced the modified URCA rate at nuclear density by an order of magnitude. However, there is still no consensus about the role of these correlations. For example, in Ref. the authors find that particle-hole screening of the nuclear interaction leads to a suppression in the neutrino bremsstrahlung rate. From the preceding discussion it should be clear that there are many open issues relating to nucleon superfluidity in the core. The importance of the PBF process, which operates at relatively small values of the gap and dominates neutrino cooling, emphasizes the need to know the precise behavior of the 3 P 2 -neutron and 1 S 0 -proton pairing deep inside the core. It is also important to revaluate the role of correlations beyond BCS mean-field theory, both on the low-temperature specific heat and the neutrino emissivities. NOVEL PHASES With increasing density the chemical potential for baryon number and negative electric charge increase rapidly owing to the repulsive nature of strong interactions between nucleons at short distances. This furnishes energy for the production of strange baryons and the condensation of mesons. At higher densities, our knowledge of quantum chromodynamics (QCD) and its asymptotic behavior leads to the expectation that quarks inside nucleons will delocalize and form a uniform Fermi sea of quarks through a deconfinement transition. These expectations are borne from model descriptions. Unfortunately, these model predictions are difficult to quantify and the precise density at which these transitions may occur is poorly understood. Given the current state of the art, it appears that theoretical advances alone are unlikely to provide conclusive evidence for a phase transition at supranuclear density. Ultimately, only through their possible presence in compact stars can hope to learn of their existence. This would be possible if we could (a) elucidate in a model-independent manner generic properties of these novel phases and (b) identify and calculate some properties that would affect observable aspects of the compact star structure and evolution so that they are distinguishable from those predicted for dense neutron-rich matter. Below we consider a few of these scenarios and discuss key bulk and response properties that distinguish these phases. Fig. 10 shows the variation of the chemical potential associated with neutral and charged baryons. The thicker curves are predictions of the mean-field model described in the previous section and the thin lines correspond to the case wherein strong interactions are ignored. The electrons in dense nuclear matter provide a source for the charge chemical potential. Thus the chemical potential for a charged baryon B ∓ is (B ∓ ) = B ± e, where B ≡ n is the baryonic chemical potential and n is the neutron chemical potential. Consequently the source for baryons with different charge differs and negatively (positively) charged baryons are favored the most (least). The horizontal dashed lines indicate the vacuum masses of the, ± hyperons. The point at which the corresponding chemical potentials cross the vacuum masses are also indicated both for the interacting and non-interacting nuclear cases. These points correspond to second-order phase transitions at which specific hyperon species begin to be populated. If the strong interactions between hyperons and nucleons were attractive (repulsive) the second-order transition would occur at lower (higher) density. It is also clear from the figure that strong interactions between nucleons play an important role as it shows that for non-interacting nucleons the transitions occur at significantly larger density. Hyperon-hyperon interactions can also become relevant especially if they are strongly attractive as this could lead to a first-order transition at lower density. To infer if hyperons exist inside neutron stars it is crucial to either measure or compute directly from QCD the hyperon-nucleon potential. Experimental data on hyperon-nucleon interactions are very scarce. However, recent developments in studying baryon-baryon interactions using lattice QCD are promising and could potentially provide useful information in constructing a low-energy effective theory for hyperon-nucleon and hyperon-hyperon interactions. Ref. analyzed various experimental constraints on the hyperon-hyperon interaction and Ref. studied constraints arising from the binding energy of the in hypernuclei. This information was first employed in the mean-field models by Glendenning. Subsequently a more detailed analysis using models for the hyperon-nucleon potentials which are consistent with binding energy and the hyperon scattering lengths have been employed in relativistic BHF calculations. These studies indicate that the lightest hyperons could appear at relatively low density (2-3 times nuclear density). Hyperons The presence of hyperons typically softens the equation of state and enhances the response of dense matter. This occurs because relative to the nucleons the hyperons contribute more to the energy density than to the pressure because they have larger masses and smaller Fermi momenta. They furnish new degrees of freedom (that are less degenerate than the nucleons) and can readily participate in excitation processes and thereby enhance the response. In particular their presence enhances the neutrino cooling in the core because: they can participate in rapid direct URCA reactions such as → pe − e (not kinematically suppressed) ; and they reduce the neutron to proton ratio and facilitate the momentum conservation needed to initiate the direct URCA involving nucleons. Kaon Condensation A large number of electrons are required to ensure charge neutrality in dense nuclear matter. The typical electron chemical potential e ∼ 100 MeV at nuclear density. As density increases e increases to keep pace with the increasing proton number density. The magnitude of this increase is sensitive to the density dependence of the isovector interaction contribution to the nuclear symmetry energy. In Figure 11 the density dependence of the electron chemical potential in the mean-field model is shown as the thicker curve. For reference, the electron chemical potential for the case of noninteracting nucleons is also shown (thin curve). If the energy of a zero momentum negatively charged boson in the medium is less than the electron chemical potential it will condense. The amplitude of the condensation will in general be regulated by the repulsive interactions between bosons. In the hadronic phase the likely candidates for condensation are − and K −. In vacuum, pions are significantly lighter than kaons but this situation may be reversed in the dense medium owing to strong interactions between mesons and nucleons. The physical basis for this expectation is that the effective theory of meson-nucleon interactions must incorporate the repulsive s-wave interactions arising owing to the Pauli principle between nucleons and mesons composed of only up and down quarks, whereas interactions between mesons with ord quarks and strange quarks must be attractive. Experiments with kaonic atoms lend strong support to the aforementioned theoretical expectation. In Figure 11 the vacuum pion and kaon masses are shown as the dark and light dashed lines respectively. If the masses do not change in the medium, the figure indicates that − condensation occurs in the vicinity of nuclear density whereas K − condensation does not occur for the densities considered. When interactions with medium are included, a uniform charged pion condensate is disfavored owing to a weak and repulsive s-wave interaction. Instead, a spatially varying condensate can be favored because of attractive p-wave interactions (for a review see Ref. ). The kaon-nucleon interaction, however, is strongly attractive. For this simple reason we discuss kaon condensation below. Kaplan and Nelson proposed the idea that kaons could condense in dense nuclear matter. Using a simplified SU SU chiral Lagrangian they showed that K − could condense at a density approximately three times nuclear density. Subsequently, several authors have studied in detail the nature and the role of kaon condensation in neutron star matter (for a recent review see Ref. ). Here, we employ a simple, schematic, potential model for kaon-nucleon interactions considered in Ref. to illustrate the salient features. The scattering In this approximation, the attractive potential energy experienced by a kaon at rest can be related related to the scattering length and is given by where is the reduced mass of the neutron-kaon system and n is the neutron density. The effective mass of the kaon computed using Equation 14 is shown in Figure 11. In this case the kaon effective mass equals the electron chemical when ∼ 3 − 4 times nuclear density. This corresponds to the critical density for kaon condensation. At higher densities, the Hartree or mean-field approximation should be valid. In this case the attractive potential energy of the kaon cannot be related directly to on-shell low-energy kaon-nucleon scattering data. The Hartree potential is given by where V 0 and R are the depth and range of the nK − potential, respectively. From Figure 11 we see that in the Hartree approximation kaons would condense when ∼ 5 − 6 0. The presence of kaons inside neutron stars influences their structure and evolution in a manner similar to that of hyperons. The softening of the EoS owing to a kaon condensate is typically larger than the softening by hypernos because the zero-momentum kaon condensate contributes to the energy density but not to the pressure. By furnishing negative charge it favors a more isospin-symmetric nuclear phase containing nearly equal numbers of neutron and protons which results in additional softening. As we discuss in §5, a strong softening in the high-density EoS will result in a lower maximum mass and a significantly smaller canonical radius. The presence of either pion or kaon condensates result in enhanced neutrino cooling. In the case of a kaon condensation enhanced cooling occurs owing to both reactions involving kaon decays in the presence of a bystander nucleon and the nucleon direct URCA reaction made possible by the larger proton fractions present in the kaon-condensed phase. Normal and Superconducting Quark Matter The occurrence of novel hadronic phases depends on the nature of hadronic interactions and their many-body descriptions. In contrast, the asymptotic behavior of QCD, which requires that interactions between quarks become weak with increasing momenta, strongly supports that at sufficiently high densities nucleonic degrees of freedom must dissolve to form a gas of weakly interacting quarks. The precise location of this phase transition will depend on model descriptions of both the nuclear and quark phases. This is because all model studies indicate that the phase transition occurs at rather low densities where perturbative methods do not apply. The bag model provides a simple description of the quark matter and the confinement. The model was designed to provide a description of the hadron mass spectrum. The basic tenants of the model are a non-trivial vacuum and nearly free-quark propagation in spaces (Bags) wherein the perturbative vacuum has been restored. This restoration costs energy because it requires the expulsion of the vacuum condensates. The restoration energy per unit volume is termed the bag constant and is denoted as B. The model also provides a very simple and intuitive description of bulk quark matter. The pressure in the bulk quark phase containing up (u), down (d) and strange (s) quarks is due to the kinetic energy density of quarks and a negative Bag pressure. At zero temperature this is given by where = 2 spins 3 color is the degeneracy factor for each flavor, and i and k f i are the chemical potential and Fermi momentum of each flavor, respectively. In the limit of massless quarks and a common chemical potential for all the quarks, the pressure has the simple form The pressure of bulk quark matter computed using Equation 16 for of perturbative interactions between quarks can also be incorporated in the Bag model at high density. This has the effect of renormalizing the kinetic term. At densities of relevance to neutron stars, perturbative expansion in s = g 2 /4, where g is the QCD coupling constant, is not valid. Note that the order 2 s calculation of the free energy predicts a behavior similar to that of the bag model. Recently, Fraga, et al. recomputed the equation of state of massless quark matter to O( 2 s ). They found the perturbative result is well reproduced by the following, bag model-inspired, form for pressure over a wide range of densities relevant to neutron stars where a eff = 0.628 and B 1/4 eff = 199 MeV with a specific choice for the renormalization subtraction point, = 2. The pressure obtained using Equation 18 is shown in Figure 12. For reference, the nuclear APR EoS is shown and the crossing points denote the locations of the first-order transitions between nuclear matter and quark matter. In the extreme case of the bag model with B 1/4 = 150 MeV we see that quark matter is preferred over nuclear matter everywhere. In this scenario, quark matter is the true ground state of matter even at zero pressure and is the basis for the strange quark matter (SQM) and strange quark star hypotheses which we discuss below. Color Superconductivity. Since the early work of Bardeen, et al., it has been well known that degenerate Fermi systems are unstable in the presence of arbitrarily weak attractive interactions at the Fermi surface. The instability is resolved by the formation of a Bose condensate of Cooper pairs. For the case of charged fermions, such as electrons, this leads to superconductivity. By analogy, the presence of attractive interactions between quarks will lead to pairing and color superconductivity. Barrios and Frautschi realized this several decades ago. The recent realization that the typical superconducting gaps in quark matter are larger than those predicted in these earlier works has generated renewed interest. Model estimates of the gap at densities of relevance to neutron stars suggest that ∆ ∼ 100 MeV when = 400 MeV. Here, we provide a brief introduction to the subject and emphasize aspects that will impact neutron star phenomenology (see Reference for a more comprehensive recent review). One-gluon exchange between quarks is attractive in the color antisymmetric (triplet) channel and repulsive in the color symmetric (sextet) channel. The attraction in the triplet channel can result in s-wave pairing between quarks in spin-zero and spin-one channels. Explicit calculations show that the pairing energy is significantly large for the spin zero case. This type of pairing occurs only between dissimilar flavors of quarks to ensure that the pair (which is a boson) wave-function is symmetric. For three massless flavors the condensation pattern that minimizes the free energy is known as the color-flavor-locked scheme. The non-zero condensates in this phase are given by where and are color indices, a and b are flavor indices, and i and j are spinor indices. Because,,A a,b,A = a b − b a it follows that condensation locks color and flavor indices. In the color-flavor-locked phase all nine quarks participate in pairing. Consequently there is an energy gap ∼ 2∆ in the fermion excitation spectrum. The gluons acquire a mass via the Higgs mechanism by coupling to the colored condensates. The lowest excitations in this phase correspond to the Goldstone bosons. The number and spectrum of Goldstone bosons can be understood by noting that the condensate breaks baryon number and chiral symmetries. Chiral symmetry is broken by a novel mechanism because independent Left-handed (L) and right-handed (R) condensates are coupled by the vector interaction. The octet of flavor Goldstone bosons that result from the breaking od chiral symmetry acquire a mass because quark masses explicitly break chiral symmetry. The quantum numbers of these pseudo-Goldstone modes map onto the meson octet in vacuum. For this reason they are commonly referred to as the pions and kaons (or mesons) of the high density phase. Their masses have been computed in earlier work by matching the effective theory to perturbative QCD at high-density. Unlike in vacuum, the square of meson masses was proportional to the product of quark masses. This resulted in an inverted hierarchy, in which the pions were heavier than the kaons. Explicitly, at high density the masses are given by where a = 3∆/ 2 2. It is only the baryon number Goldstone boson that remains massless. This mode is responsible for the superfluid nature of this phase. Typically the pairing contribution to the pressure is small, being ∼ ∆ 2 2 because only particles in a shell of width ∆ at the Fermi surface pair. We can supplement the bag model pressure with the pairing contribution due to superconductivity to obtain the pressure of the CFL phase From this equation we can infer that, although the pairing contribution is parametrically small by the factor ∆ 2 / 2 compared to the kinetic pressure, it could become the dominant contribution in the region where kinetic pressure and the bag pressure nearly cancel each other. Furthermore, because the quark-hadron phase transition is expected to occur in this regime, pairing will likely lower the quark-hadron phase transition density. Similar trends are seen in other model calculations such as the Nambu-Jona-Lasino model of the quark EoS. Using Equation 21 with a eff ≃ 0.6 to describe the quark EoS and the APR model to describe the nuclear EoS, Alford et al. find that the quark EoS and the nuclear EoS yield nearly the same pressure for a range of energy densities in the vicinity of nuclear density. This surprising finding, albeit within a specific model, indicates that a phase transition at these densities is unlikely to result in significant softening. Nonetheless it would have dramatic consequences for the transport and cooling properties because the excitation spectrum of the CFL phase is very distinct from that of nuclear matter. Role of the Strange Quark Mass It is a reasonable approximation to neglect the up and down quark masses at densities of relevance to neutron stars where ∼ 400 MeV. The strange quark mass m s ∼ 200 MeV, however, cannot be neglected. When the strange quark mass is large, comparable to the baryon chemical potential, the Fermi surfaces of the strange quarks and light quarks will be different. If this difference is similar to ∆, pairing involving strange quarks is suppressed. In the limit of infinite strange quark mass, i. e., in the absence of strange quarks, only the light quarks pair. This phase twoflavor superconducting phase is termed 2SC and is also characterized by pairs that are antisymmetric in flavor. Antisymmetry in color space excludes one of the three colors from participating in the condensation. Thus SU color is broken down to SU color and quarks of a particular color and three gluons remain massless. Furthermore, unlike in the CFL phase no global symmetries are broken. Early attempts to bridge the gap between the 2SC and the CFL phases can be found in References and. The authors found that the CFL pairing scheme was preserved when m s < ∼ √ 2∆, whereas for larger m s, a first-order transition to the 2SC phase occurs. Bedaque & Schafer showed that the strange quark mass appeared in the effective theory for Goldstone bosons in the form of a chemical potential for antistrangeness. When the chemical potential, s ∼ m 2 s /2, exceeds the mass of K 0 which is lightest meson with antistrangeness, these bosons condense in the ground state. In the CFLK 0 phase hypercharge or strangeness symmetry is spontaneously broken resulting in the appearance of a massless Goldstone boson. For a detailed discussion of these novel mesoncondensed phases and their role in the birth and evolution of neutron stars see Reference. A finite m s which is larger than the up and down quark masses leads to a suppression in their number and requires electrons to ensure neutrality. The electron chemical potential needed to accomplish this is e ≃ m 2 s /4. This in turn results in a splitting between the up and down Fermi levels because in betaequilibrium u = − 2 e /3 and d = + e /3. In two-flavor quark matter charge neutrality requires a relatively large e ∼ because strange quarks are either absent or too few in number to furnish the necessary negative charge. Consequently, pairing will be favored only in strong coupling (∆ ≃ ) and in general this disfavors the 2SC phase in the neutron star context. However, in Nambu-Jona-Lasino model calculations of quark matter one typically finds that the effective strange quark mass is large and the quark pairing strength is strong. Both of these effects favor the possibility of realizing the 2SC phase at intermediate densities of relevance to compact stars. If, however, ∆ ≪ and pairing between up and down is not possible, then quarks of the same flavor can pair in the spin-one channel. The spin-one phase is characterized by a locking between spin and color, and model predictions indicate a small gap of the order of one MeV. BCS pairing between two species of fermions results in a locking of the Fermi surfaces in momentum space and hence enforces equality in the numbers. This rigidity arises owing to the finite gap which requires a finite energy to produce a quasi-particle excitation. For example, in the CFL phase this locking ensures equal number densities for all three quark flavors even though m s > m light. In the 2SC phase, the number densities of the up and down quarks are equal even when e = 0. This persists until e ≃ 2∆ when it becomes possible to generate a number asymmetry at the expense of pairing. However, in simple systems, without long-range interactions, as e increases one encounters a firstorder phase transition from the BCS phase to the normal phase when e ≥ √ 2∆. Surprisingly, even in weak coupling the phase structure of asymmetric fermion systems is not well known. Several candidate ground states have been suggested. They include(a) the mixed phase, where normal and superconducting phases coexist (b) the LOFF phase (named after the authors Larkin, Ovchinnikov, Fulde, and Ferrell who first suggested this possibility ), where the gap acquires a spatial variation on a length scale ∼ 1/∆ and (c) the gapless superfluid phase (also known as the breached-pair phase) which is a homogeneous superfluid phase with gapless fermionic excitations due to a finite occupation of quasiparticle levels. Recent work has shown that in systems with short-range forces the gapless superfluid phase is unstable with respect to phase separation. However, as we discuss below, all of these possibilities could be relevant in dense quark matter. The presence of long-range forces and resulting constraint of charge neutrality in the bulk influences the competition between the various proposed phases of asymmetric Fermi systems. For example although the mixed phase is favored over the gapless phase in neutral systems the neutrality constraint can stabilize the gapless phase with respect to phase separation in two-flavor and threeflavor quark matter. These phases which are termed the gapless 2SC and gapless CFL phases are remarkable because they are superconducting and yet have a large number of gapless excitations near the Fermi surface. In strong coupling, quantum Monte Carlo results show that gapless phases are stable with respect to phase separation. However, in weak coupling there remain several questions that need to be addressed to ascertain if these phases or minor variants of them are indeed stable. In particular in QCD the magnetic gluon masses are imaginary in the weak coupling gapless phases. This signals an instability known as the chromomagnetic instability and occurs both in gapless 2SC and gapless CFL phase. Because the Meissner masses can be related to the gradient energy associated with spatial deformation of the pairing field some type of heterogeneity is likely and it appears that either the mixed phase or the LOFF phase is favored over gapless 2SC. This may also be the case for three-flavor quark matter. Absolutely Stable Strange Quark Matter The conjecture that matter containing strange quarks could be absolutely stable is several decades old. This could occur if SQM has a lower energy per baryon at zero pressure relative to heavy nuclei such as 56 Fe, for which the energy per baryon is ≃ 930 MeV. If this were true, nuclear matter and nuclei would only be metastable, albeit with a very large lifetime because it would take several simultaneous weak interactions to generate the strangeness needed to lower the energy. However, this situation may not be difficult to achieve in the hot and dense interior of a neutron star. Once strangeness is seeded somewhere in the star it should be able to catalyze the conversion of the entire star on a short time scale, producing a "strange star", i.e., a compact star made entirely of SQM. For a recent review of SQM and its role in compact stars see Reference. We briefly mention a few recent findings of interest. For SQM to be preferred over nuclei or nuclear matter its pressure must be larger than the pressure in nuclear matter when the quark chemical potential ≃ 930/3. From Equation 21 we see that in the Bag model this would require a relatively small bag constant. However, the bag constant cannot be too small otherwise even two-flavor quark matter would have alower energy than nuclei and these would instantly convert to quark matter as it would not require the simultaneous weak interactions needed to produce strange quarks. In a simple model where a eff ≃ 1, ∆ = 0 and m s = 150 MeV a bag constant given by B 1/4 ≃ 145 MeV predicts SQM to be absolutely stable while ensuring that the 2 flavor quark matter is unstable. A large gap of the order of 100 MeV has the effect of increasing the effective bag constant at which quark matter can become stable. Recent work on the effects pairing in SQM can be found in Ref.. Early work indicated that SQM would be homogeneous liquid at arbitrarily low pressure. This is unlike low-density nucleon matter made of nuclei and electrons which is a heterogeneous solid with nuclei embedded in a background electron gas. Reference investigated the possibility that SQM could also be a heterogeneous solid at low pressure. Using a model-independent analysis this work shows that at relatively low density SQM could also become heterogeneous and phase-separate into positively charged nuggets of quark matter embedded in a background electron gas -much like matter with nuclei at low pressure. With increasing pressure the quark phase would be populated with voids of electrons. If stable, the heterogeneous crust could have important consequences for surfaces of strange quark stars. Unlike the conventional scenario for strange stars which are characterized by an enormous density gradient and a large electric field the heterogeneous solid crust would occupy a large radial extent (∆R ∼ 50 − 100 m) and obviate the need for an electric field. Excitation spectrum and response properties Response properties such as neutrino emissivities, viscosity and the low-temperature specific heat have been calculated in both normal and superconducting phases of quark matter. In the normal phase, a large number of particle-hole excitations exist at the Fermi surfaces of the quarks and result in enhanced response and a large specific heat. Consequently neutrino cooling due to processes analogous to the direct URCA reaction but involving quarks is rapid. The bulk viscosity is also enhanced owing to the weak interaction rate that converts d ↔ s quarks. In contrast, in the CFL phase where all quarks are gapped, the low-lying excitation spectrum is sparse and only the baryon number Goldstone mode contributes at temperatures of relevance to neutron star cooling. Consequently the neutrino emissivity, specific heat and bulk viscosity are all relatively negligible. This suppression in the neutrino processes also persists in the CFLK 0 phase. In less-symmetric superconducting phases such as 2SC where ungapped quark excitations exist the situation is similar to that in the normal quark matter. The existence of either gapless color superconductivity or the heterogeneous phases such as the LOFF phase or the mixed phase can lead to potentially observable consequences. In the gapless phase the number of gapless excitations is anomalously large. The resulting specific heat is a factor ∆/T larger than even the normal phase and could potentially impact neutron star cooling at late time. The coexistence of heterogeneity and superfluidity is key to understanding glitches. In this sense the heterogeneous superconducting phases could play a role. These connections, between the properties of the phase and the observables, are still in their infancy and warrant further work. Nature of the Phase Transition The novel phases discussed above could occur either via a first-or second-order phase transition. In first-order transitions phase coexistence is possible. A heterogeneous phase with coexisting phases is termed the mixed phase. Here charge neutrality is enforced globally. Each of the two coexisting phase have opposite electric charge and the volume fractions adjust to ensure overall neutrality. Heterogeneous phases are commonplace in the terrestrial context. For example a solid can be viewed as a mixed phase composed of positively nuclear matter (residing inside nuclei) coexisting with a negatively charged electron gas phase. In the relatively low-density region of the neutron star crust we encounter a similar mixed phase that upon increasing density changes to accommodate neutrons in the electron gas phase. The physics of the neutron-rich mixed phase in the crust is reviewed in detail in Ref.. Glendenning noted that similar considerations apply to first-order transitions in the context of high-density phase transitions. This applies in general to all first-order phase transitions with two conserved charges. In the neutron star context the conserved quantities are baryon number and electric charge. Unlike simple first-order transitions such the watervapor transition which is characterized by one conserved charge (number of water molecules) where phase coexistence occurs at specific value of the pressure, the phase coexistence in dense matter occurs over a finite interval in pressure owing to the presence of the extra degree of freedom, namely electric charge. To illustrate the physics of first-order phase transitions and the role of surface and Coulomb energies in the mixed phase we consider an explicit example. The phase transition from nuclear matter to CFL quark matter is a first-order phase transition. The nuclear phase has no strangeness and the bulk quark phase has no electrons. The possibility of phase coexistence between these phases was investigated in Ref.. We highlight some of the main findings here. In Figure 13, the pressure of the bulk nuclear, bulk CFL and mixed phases are shown as a function of, the quark chemical potential. At intermediate values of, the mixed phase has larger pressure and is therefore favored over both the nuclear and CFL bulk phases. The electron chemical potential, e, required to ensure charge neutrality in the bulk nuclear phase, which grows with, is shown. In the mixed phase neutrality requires a positively charged nuclear phase and a negatively charged CFL phase. This is easily accomplished by lowering e. The decreasing e in the mixed phase shown in the figure is obtained by requiring equal pressures in both phases at a given. In the mixed phase the Coulomb and surface energy costs must be met. The results shown in Figure 13 ignored these corrections. Only bulk free energy is included; surface and Coulomb energy is neglected. The mixed phase occurs between A and D. The vertical line connecting B and C denotes the at which the pressures of neutral CFL and nuclear matter are equal. This is where a sharp interface may occur. The pressure of the mixed phase exceeds that of neutral CFL or neutral nuclear matter between A and D. Were this the whole story, the mixed phase would evidently be favored over the sharp interface. In a simple description of the mixed phase one considers a thin boundary between the coexisting phases. A unit cell of the mixed phase is defined as the minimum size region that is electrically neutral. Three different simple geometries are considered: spheres (d = 3), rods (d = 2) and slabs (d = 1). In each of these cases, the surface and Coulomb energy cost are given by where d is the dimensionality of the structure (d = 1, 2, and 3 correspond to Wigner-Seitz cells describing slab, rod and droplet configurations, respectively), is the surface tension and ∆Q = Q nuclear − Q CFL+kaons is the charge-density contrast between the two phases. The other factors appearing in Equations and are x, the fraction of the rarer phase that is equal to where 0.5 and 1 − where 0.5 1; r 0, the radius of the rarer phase (radius of drops or rods and half-thickness of slabs); and f d (x), the geometrical factor of order one that arises in the calculation of the Coulomb energy. The first step in the calculation is to evaluate r 0 by minimizing the sum of E C and E S. The result is Using this value of r 0 in Equations, the surface and Coulomb energy cost per unit volume is obtained The lowest curve in Figure 14 shows ∆, the difference between the free-energy density of the mixed phase (calculated without including the surface and Coulomb energy cost) and the homogeneous electrically neutral nuclear and CFL phases separated by a single sharp interface, whichever of the two is lower. The mixed phase has lower bulk free energy, so ∆, plotted in Figure 14, is negative. The remaining curves in Figure 14 show the sum of the bulk free-energy difference ∆ and (E S + E C ), and the surface and Coulomb energy cost of the mixed phase calculated using Equation 25 for droplets, rods and slabs for three different values of QCD. Careful inspection of the figure reveals that for any value of QCD, the mixed phase is described as a function of increasing density by a progression from drops to rods to slabs of CFL matter within nuclear matter to slabs to rods to drops of nuclear matter within CFL matter. This is the same progression of Figure 14: The free energy difference between the mixed phase and the homogeneous neutral nuclear and CFL phases. In the lowest curve, the surface and Coulomb energy costs of the mixed phase are neglected, and the mixed phase therefore has the lower free energy. Other curves include surface and Coulomb energy for different values of QCD and different mixed phase geometry. geometries seen in the inner crust of a neutron star or in the mixed phase at a first-order phase transition between nuclear matter and unpaired quark matter or hadronic kaon condensate. We have also checked that for QCD = 10 and 40 MeV/fm 2, with the mixed-phase geometry at any taken to be favored, the regions of both the rarer and more common phases (r 0 and its suitably defined counterpart) are always less than 5 − 6 fm. In general, uniform regions of charge can exist only on length scales small compared to the Debye screen length. The Debye screening length in the quark and hadronic phase are typically 5 − 10 fm. When the size of the charged regions becomes comparable to the Debye screening length it becomes important to account for spatial variations of the charge density. This will influence the surface and Coulomb energy estimates presented in Equation 25. A detailed discussion of the importance of these finite size effects is presented in References and. For any given QCD, the mixed phase has lower free energy than homogeneous neutral CFL or nuclear matter whenever one of the curves in Figure 14 for that QCD is negative. We see that much of the mixed phase will survive if QCD ≃ 10 MeV/fm 2, whereas for QCD 40 MeV/fm 2 the mixed phase is not favored for any. This means that if the QCD-scale surface tension QCD 40 MeV/fm 2, a single sharp interface will be favored. The interface is characterized by a bipolar charge distribution, resulting in an intense electric field that ensures that the electric charge chemical potential is continuous across it (see Ref. for details). CONFRONTING OBSERVABLES Observations relating to the structure and the evolution of compact stars furnish complementary information about the interior state. The former provides information about the EoS of dense matter and probes the high-energy or shortdistance aspects, whereas the latter provides information about the low-lying excitation spectrum. This complementarity potentially allows us to probe the phase structure of matter at extreme density, as phase transitions can drastically alter the low-lying spectrum without strongly influencing the EoS or vice versa. For example, nucleon superfluidity and superconductivity dramatically alter the excitation spectrum (exponentially suppressing neutrino emission rates) but play no role in the EoS. In contrast, hyperons strongly soften the EoS but may not affect the linear response properties to the same degree. Bulk Properties and Structure Mass and radius are certainly the most natural observables that probe the EoS of the dense interior. Given the relationship between pressure and energy density, the general relativistic equation of hydrostatic equilibrium (termed the Tolman-Oppenheimer-Volkoff, TOV, equation) uniquely determines the structure of the star, in particular its mass and radius, for a given central density. For nonrotating stars, each high-density EoS uniquely specifies a mass-radius curve. In the context of neutron star structure, EoSs are characterized as soft or stiff relative to each other on the basis of the ratio of pressure to energy density. This, however, depends on the density at which the comparison is made. The EoS with the larger (smaller) ratio at a specific density is termed stiff (soft). An EoS that is soft on average will lead to more compact stars, compared with an EoS that is stiff, and reach a smaller maximum mass M max. This is easily understood by noting that the energy density is the source of gravity, whereas pressure provides resistance to the gravitational squeeze. From model studies it is empirically known that the maximum allowed mass is most sensitive to the EoS at the highest (supranuclear) density, whereas the radius is sensitive to the EoS in the vicinity of nuclear density. In particular, for stars composed of nucleon matter, the radius appears to be fairly sensitive to the density dependence of the nuclear symmetry energy. The symmetry energy can potentially be inferred from terrestrial measurements of the neutron-skin thickness and probably from heavy-ion experiments. For a recent review of the role of the nuclear symmetry energy in neutron star structure and terrestrial experiment, see Reference. These experiments likely constrain models of the nuclear EoS that currently differ significantly at both low and high density. For example, the mean-field models tend to be stiff at low density and soft at high density compared with variational calculations such as those obtained by Akmal et al.. Phase transitions to novel phases at supranuclear density typically result in a softening of the EoS. This arises because these transitions furnish new degrees of freedom that contribute more to the energy than to the pressure. For example, a hyperon that replaces a neutron at the Fermi surface has a much lower momentum and a larger rest mass. In general, this is true for all hadronic transitions studied to date. Clearly the extent of the softening depends on the details of both the nuclear and exotic EoSs. To successfully employ a mass-radius constraint that infers the phase structure of the dense interior, softening should be significant. This quantitative expectation is, however, difficult to assert and may not be true in general. For example, early studies that employed the naive bag model indicated that the quark-hadron transition would lead to significant softening, but recent work, which takes into account corrections due to quark-quark correlations and superconductivity, suggests that the quark EoS might mimic nuclear behavior. Figure 15 depicts the M − R relationship of three generic classes of stars: (a) nuclear stars, (b) hybrid stars, and (c) strange (SQM) stars. Here we show bands rather than individual M − R curves for specific models to indicate the inherent model dependence of these theoretical predictions. As discussed above, the broad range of predicted radii for nucleon EoSs will be narrowed in the near future owing to neutron-skin thickness and probably also to heavy-ion experiments. As anticipated in hybrid stars, where there is a phase transition from a nuclear to an exotic phase at supranuclear density, softening leads to smaller radii and lower maximum masses. In contrast, the most exotic scenario involving absolutely stable SQM overlaps the regions accessed by nuclear and hybrid stars for masses in the observed range (1-2 M ⊙ ). Numerous constraints on the EoS are depicted on the figure. General relativity requires that the radius be larger than the Schwarzschild radius R S and causality of the EoS requires that the speed of sound c s = (dP/d) 1/2 be smaller than the speed of light. This latter constraint is approximately equivalent to R max > 1.5R S max, where R max and R S max are the radius and the Schwarzschild radius, respectively, of the maximum mass star. (Note that the softest nuclear EoSs, resulting from nonrelativistic models, do violate causality at densities close to the maximum density reached at M max.) The other curves in Figure 15 refer to various constraints arising from observations: (a) lower limits on the mass of PSR J0751+1807, at the 68% and 90% C.L., and lower limits on the estimate (68% C.L.) of the mass of 4U 1700-37, which may be a neutron star or a black hole; (b) the mass-shedding limit on the maximum radius of the fastest known pulsar PSR J1748-2446ad ; (c) 90% C.L. of the radius at infinity R ∞ of the quiescent LMXB X7 discussed in Section 2.2.4; and finally (d) the minimal radius of glitching pulsars deduced from the constraint I s /I c > 1.4%, (see Section 2.4), assuming the glitch reservoir is the neutron superfluid in the crust. Converting this moment of inertia constraint into a mass-radius constraint simply requires integrating the TOV equation from the crust-core boundary to the surface: The pressure P t at this transition point is, however, uncertain, with values ranging from 0.25 up to 0.65 MeV fm −3 and the two I s /I c curve in Figure 15 labelled 0.25 and 0.65 correspond to these two extremes. For each of the two classes of EoSs, nucleon or hybrid, low P t EoSs give small radii at low masses, whereas high P t give large radii: The glitch constraint is hence much more restrictive for EoSs which are soft at densities around nuclear matter density. These various constraints shown in Figure 15 are already sufficient to draw some conclusions about the high-density EoS: 1. The softest range of the hybrid EoSs shown in the figure has a maximum mass above the accuretely measured mass of the PSR B1913+16 companion, 1.4408 ± 0.0003M ⊙, but they are now ruled out at the 90% C.L. by the new measurement of PSR J0751+1807. Moreover, all hybrid EoSs are ruled out at the 68% C.L. by the same measurement. Figure 16: Slow, "standard", cooling via the modified URCA process versus fast neutrino cooling from three neutrino emission processes with emissivities = 10 n T 6 9 erg cm −3 s −1. Trajectories marked as N assume neutrons in the core are normal, whereas cases marked SF assume neutrons are paired in the 3 P 2 channel in the whole core with a T c of the order of 2-3 10 9 K. The fastcooling case with n=24, 25, and 26 corresponds approximately to the emissivities of a kaon condensate, a pion condensate, and the nucleon direct URCA process, respectively. Note that at early times, t < ∼ 30 yrs, as long as the age of the star is inferior to the thermal diffusion time scale through the crust, all models result in the same surface temperature and luminosity. Neutrino emission from the Cooper pair formation process is not taken into account in these simple models. 5. Given the broad range of predicted radii for strange stars, none of the present constraints can exclude them or make a strong case for their existence. Low-Lying Spectrum and Thermal Behavior In contrast to bulk properties, the response properties influence observable aspects such as thermal evolution and typically probe energy scales of the order of tens to hundreds of keV, and are very sensitive to the low-lying excitation spectrum. In some respects, they also tell us about the subtle properties that determine the phase structure of dense matter. The Cooling of Isolated Neutron stars The long-term cooling of neutron stars is the best studied example in this class. This topic was recently reviewed in detail and we present only a short description here. This cooling is driven by neutrino emission from the core of the star when its temperature is above 10 7 − 10 8 K or 1 − 10 keV. The time scale for cooling can be estimated in terms of two key microscopic ingredients, namely the neutrino luminosity, L, and the total specific heat, C v, supplemented by the surface photon luminosity, L, from a simple energy balance consideration as follows where E th is the thermal energy content of the star. For a degenerate Fermi system, the specific heat C v is linear in T, i.e., C v ≃ C T, where the constant C depends on the details of the star's structure. The neutrino luminosities can be split, in a first approximation, into fast or slow processes and can be written as L fast ≃ N f T 6 or L slow ≃ N s T 8, respectively. Finally, L ∝ T 4 e and because, very roughly, T e ∝ T 1/2, one can also write L ≃ S T 2. Owing to their very different T dependences, at early times L ≫ L, whereas when T is sufficiently low the converse is true. From Equation 26, the two neutrino cooling time scales for fast and slow neutrino emission are and respectively. In obtaining the approximate numerical estimates (4 minutes and 6 months) we have used typical values of the constants C, N f, and N s, which are characteristic of degenerate dense hadronic matter. For fast cooling, L dominates L until T ∼ 10 6 K, whereas for slow cooling this happens when T ∼ 10 8 K. Coincidentally, in both cases this turns out to occur at ages of the order of 10 5 years. In general, exotic phases almost invariably result in fast neutrino emission (the CFL phase is the only known exception), and detection of a fast-cooling neutron star would be an argument in favor of the presence of one of these phases. However, because nuclear matter with a high enough proton fraction also results in fast neutrino emission through the nucleon direct URCA process, such a detection would only be an indication and not proof of one of these phases. Moreover, because pairing results in a strong suppression of the neutrino emissivity of all processes in which the paired component participates, an exotic phase may be present and may not manifest in the thermal evolution of the star. We illustrate these considerations in Figure 16, in which the time evolution of the thermal luminosity L ∞ of a model neutron star for the case of the slow "standard" cooling from the modified URCA process is compared with three different fast neutrino emission processes. Each scenario is presented in two forms, one in which neutrons are normal and one in which where they are assumed to be superfluid in the whole core. The occurrence of fast cooling leads to thermal luminosities, at ages ∼ 10 2 -10 5 yrs, which are about three orders of magnitude lower than in "standard" cooling. However, when the suppression of the neutrino emissivity by pairing is taken into account, the differences can be much smaller. Higher values of T c can even render fast and slow cooling indistinguishable. Moreover, once the fast neutrino emission is controlled by pairing gaps, the difference between cooling efficiencies of various fast processes becomes much smaller than when they proceed unhindered, as the three fast-cooling superfluid (SF) curves of Figure 16 show when compared with corresponding curves with normal (N) neutrons. Considering the plethora of channels for fast neutrino emission when the number of degree of freedom increases, (a) pion or kaon condensates, (b) hyperons, and (c) deconfined quark matter, it appears unlikely that these kinds of studies by themselves will be able to distinguish between types of exotica. Reference explicitely showed that model stars with only nucleons, or nucleons with hyperons, or nucleons with deconfined quark matter or nucleons with hyperons and deconfined quark matter can lead to essentially indistinguishable cooling trajectories once uncertainties about the size of the various pairing gaps are taken into account. With respect to strange stars, if the SQM surface is covered by a thin envelope of normal nuclear matter, their thermal evolution would be similar to the one of the more mundane neutron stars and only in the case of a bare quark surface would their thermal evolution be drastically different. Given these intricacies, Page et al. introduced the minimal cooling scenario as a paradigm in which all exotic phases are assumed to be absent and the proton fraction is also assumed to be below the direct URCA threshold. This paradigm is a benchmark against which data are compared so that observations that cannot be accommodated within the predictions of this scenario are serious evidence for the presence of some fast-cooling agent. Minimal cooling is not naive cooling: It considers all others processes that can affect the thermal evolution of the star and are based on very standard physics. Two standard ingredients can influence on the star's cooling significantly. The first is the occurrence of neutron superfluidity and/or proton superconductivity, which suppresses the specific heat and the neutrino emission by the modified URCA and bremsstrahlung processes but also opens the new neutrino emission channels owing to the constant formation and breaking of Cooper pairs. The net effect of pairing can hence be either a reduction or an enhancement of L slow. The second important ingredient is the chemical composition of the uppermost layers of the star, the envelope, which controls the photon luminosity L : A light-element envelope, H, He, C, or O, is less insulating than a heavy-element, iron-like envelope and results in a larger L and T e for a given internal temperature. A light-element envelope has the effect of making the star brighter during the neutrino cooling era because at that time L is negligible compared to L, and the surface temperature T e just follows the evolution of the internal temperature while accelerating the cooling owing to its higher L during the photon cooling era. In Figure 17, the predictions of the minimal cooling paradigm are displayed and compared with the observational data presented in Section 2.2.3. Models with a light-element and heavy-element envelope are separated, and for each class the spread in prediction is owing to the uncertainty in the size of the neutron and proton pairing gaps. Note that the luminosities at ages 10 2 -10 5 yrs can be quite lower than those of the "standard" cooling of Figure 16 because of the enhanced neutrino emission from the Cooper pair formation process. Overall the agreement between the theory and the observational data is quite good. Two objects, PSR J0205+6449 (in SNR 3C58) and RX J0007.0+7302, nevertheless have upper limits below these predictions and are good candidates for the occurrence of fast neutrino emission, controlled by pairing, but they may also be interpreted as evidence for medium-enhanced modified URCA process. The four upper limits, marked a, b, c, and d in Figure 17, from Kaplan et al.'s search, would, however, be definitive evidence for fast cooling if they can be proven to refer to neutron stars and not black holes. Similar conclusions have been reached by the St. Petersburg group, which has developed a complementary version of minimal cooling. The Thermal Behavior of Neutron Stars in Low-Mass X-Ray Binaries We presented in Section 2.3 observations of neutron stars in LMXBs that are undergoing transient phases of accretion separated by periods of quiescence. Brown et al.'s mechanism of heating of the star provides a simple explanation for the hot thermal spectrum observed in quiescence in the cases of Aql X-1, 4U 1608-522, and CX 1, but the results of Table 1 clearly show that many systems have quiescent luminosities much below the prediction of this model. A natural explanation for this discrepancy is the presence of fast neutrino cooling in the core of these too-cold neutron stars. This result condradicts sharply with the fact that most isolated cooling neutron stars have thermal luminosities in agreement with the results of the minimal cooling sce-nario, i.e., they show no conclusive evidence for the occurrence of fast neutrino emission. However, remembering that LMXBs are very long-lived systems in which the neutron star may accrete a significant part of a solar mass, these cold neutron stars may simply be massive enough to be above the threshold for fast neutrino emission, whereas most of the isolated cooling neutron stars are below this threshold. These results would be in good agreement with the expected mass range of neutron stars emerging from core collapse supernovae as illustrated in Figure 3 and are corroborated by the measured masses of PSRs in PSR+NS systems exhibited in Figure 2, whereas the measured masses of PSR+WD systems, offsprings of LMXBs, confirm the existence of more massive neutron stars. Limiting Spin Frequency The first, absolute, limiting frequency for a fast-spinning pulsar is the mass-shedding limit MS, which we used as a constraint in Figure 15, from the fastest known pulsar PSR J1748-2446ad. However, gravitational radiation reaction-driven instabilities such as the r-mode instability can result in a more stringent limit on the spin frequency. These modes generate gravity waves (which dissipate angular momentum) that, instead of damping, amplify mode amplitude. Viscosity is then the main source of damping and, depending on the bulk and shear viscosity of dense matter, different limiting spin frequencies are possible. At very low temperature, damping is due to the shear viscosity, whereas at very high temperature, it is due to the bulk viscosity. For these extreme temperatures, r-modes are effectively damped. At intermediate values of temperature, depending on the bulk viscosity and its temperature dependence, a limiting frequency as low as 50% of MS is possible. In LMXBs where the neutron stars are spun up to millisecond periods, a population study shows a sharp drop in the pulsars with spin frequency > ∼ 730 Hz. If this limiting behavior is indeed due to gravity waves from the r-mode instability, it provides useful information about the viscous damping rate in the star, which can be calculated for different EoSs for the neutron star interior. For example, a strange quark star made entirely of CFL quark matter would be incompatible with these observations because all low-energy weak interaction processes that can contribute to the bulk viscosity are exponentially suppressed by exp(−∆/k B T ), where ∆ is the pairing gap and T is the temperature. Galactic Supernova Neutrinos The enormous gravitational binding energy, E G ≃ GM 2 /R ≃ 310 53 ergs, gained during a core collapse supernova is stored inside the newly born proto-neutron star as thermal energy of the matter components and thermal and degeneracy energy of neutrinos. This energy then leaks out on a time scale determined by the rate of diffusion of neutrinos in the dense core. The detection of ∼ 20 neutrinos from SN1987A is testimony to this theoretical expectation. Current estimates indicate that we should see ∼ 10, 000 events in SuperKamiokande and ∼ 1000 events in the Sudbury Neutrino Observatory (SNO) from a supernova at the center of our galaxy (distance=8.5 kpc). Detection would provide detailed information about the temporal structure of the neutrino signal and thereby probe the properties of the dense inner core. Preliminary studies of the role of phase transitions in the evolution of proto-neutron stars and the supernova neutrino signal exist and show that the late time signal is sensitive to the high-density physics. In particular, a dramatic consequence of the transition to a very soft high-density EoS would be a delayed collapse to a black hole on the neutrino diffusion time scale. Less dramatic changes to the neutrino luminosity can be detected for a galactic supernova and could potentially probe significant changes to the neutrino mean free path that occur as a result of a phase transition. Like neutrino emissivities, the neutrino opacity of dense matter is sensitive to the phase structure and low-lying excitation spectrum of dense matter. Gravity Waves The detection of gravity waves from a binary system in Laser Interferometer Gravitational-Wave Observatory-like detectors, which can measure frequencies up to 1 kHz, can measure the chirp mass M ch = 3/5 M 2/5 T, where is the reduced mass and M T is the total mass of the binary. Perhaps more interestingly, during the inspiral the compactness M/R is also accessible and will thereby provide an estimate of the star radius. In addition, a first-order phase transition occurring in the inner core of a neutron star could lead to a density discontinuity if the surface tension is large. In Reference, the authors show that this would affect the frequency spectrum of the non-radial oscillation modes in two ways. First, it would produce a softening of the EoS, leading to more compact equilibrium configurations and changing the frequency of the fundamental and pressure modes of the neutron star. Second, a new nonzero-frequency g-mode would appear in association with each discontinuity. If these modes are excited in either a phase transition-induced mini-collapse or a glitch (binary neutron star mergers are unlikely to excite these high-frequency modes), they may be observable in the next-generation gravity wave detectors. The role of the EoS in binary neutron star mergers is also potentially interesting and largely unexplored. Reference finds that if there is time for stable mass transfer to occur during the merger, it could provide information about the mass-radius diagram and potentially distinguish between normal stars and strange quark stars. CONCLUSIONS To infer the phase structure and properties of matter at extreme density through observations of compact stars is a compelling but difficult task. Despite the rather rudimentary state of quantitative theoretical calculations in describing dense matter, the observational data are already providing guidance to and hinting at possible tensions between different models. In particular the prospect for a firm mass measurement of a heavy neutron star (or a low-mass black hole) and accurate radius measurements are excellent. Such measurements will directly confirm or rule out the possibility for a strong phase transition to a soft EoS in the vicinity of nuclear density, and presently available values already seem to disfavor a soft EoS. Observations of cooling neutron stars and the realization that superfluidity and superconductivity play a dominant role in neutrino cooling of the core has sparked renewed interest in theoretical studies of pairing correlations in dense matter. Furthermore, the connection between seemingly different observables such as long-term neutron star cooling, quiescent luminosity observed in LMXBs, and superbursts are indicating new trends that point to a diversity in the thermal evolution of neutron stars, with growing evidence in favor of the occurence of some fast neutrino emitting process(es), at least in heavy, > 1.4M ⊙, neutron stars. These developments call for a more global approach in the theory and simulations to confront different observables within a unified framework so that the sensitivity to the underlying dense matter physics can be properly assessed. Theoretical progress in describing dense nuclear matter and new insights into using terrestrial experiments on nuclei to better constrain EoS models provide compelling reasons to expect a quantitative description of neutron-rich matter for the densities of interest. Throughout this review we highlighted important qualitative differences and possible similarities between nucleon matter and other exotic phases. Minimizing the theoretical uncertainty for the predictions of the nucleon matter is crucial to confront observables. For example, a precise determination of the density dependence of the nuclear symmetry energy can reduce the uncertainty simultaneously in radii and cooling rates predicted for the different nuclear EoSs. The situation with exotic states of matter is less clear. Although softening and enhanced cooling appear to be common trends, there are important exceptions and quantitative estimates are often very model dependent. Nonetheless, as mentioned above, radius measurement smaller than 10 km or the discovery of low-mass black hole candidate with mass < ∼ 2M ⊙ would be very suggestive of a phase transition. Better experimental or theoretical constraints on the hyperon-nucleon and kaon-nucleon interactions are necessary to ascertain if novel hadronic phases are relevant. Quark matter remains an especially interesting possibility. Color superconductivity is relevant, especially for the calculation of neutrino processes and cooling. However, an understanding of how a finite strange quark mass affects the phase structure is needed because this can result in vastly different low energy properties. |
Canadian French and English newspapers portrayals of physicians role and medical assistance in dying (MAiD) from 1972 to 2016: a qualitative textual analysis Objective To examine how Canadian newspapers portrayed physicians role and medical assistance in dying (MAiD). Design Qualitative textual analysis. Setting Online and print articles from Canadian French and English newspapers. Participants 813 newspaper articles published from 1972 to 2016. Results Key Canadian events defined five eras. From 1972 to 1990, newspapers portrayed physicians MAiD role as a social issue by reporting supportive public opinion polls and revealing it was already occurring in secret. From 1991 to 1995, newspapers discussed legal aspects of physicians MAiD role including Rodriguez Supreme Court of Canada appeal and Federal government Bills. From 1996 to 2004, journalists discussed professional aspects of physicians MAiD role and the growing split between palliative care and physicians who supported MAiD. They also reported on court cases against Canadian physicians, Dr Kevorkian and suffering patients who could not receive MAiD. From 2005 to 2013, newspapers described political aspects including the tabling of MAiD legislation to change physicians role. Lastly, from 2014 to 2016, newspapers again portrayed legal aspects of physicians role as the Supreme Court of Canada was anticipated to legalise MAiD and the Qubec government passed its own legislation. Remarkably, newspapers kept attention to MAiD over 44 years before it became legal. Articles generally reflected Canadians acceptance of MAiD and physicians were typically portrayed as opposing it, but not all did. Conclusions Newspaper portrayals of physicians MAiD role discussed public opinion, politicians activities and professional and legal aspects. Portrayals followed the issue-attention cycle through three of five stages: 1) preproblem, 2) alarmed discovery and euphoric enthusiasm and 3) realising the cost of significant progress. AbstrACt Objective To examine how Canadian newspapers portrayed physicians' role and medical assistance in dying (MAiD). Design Qualitative textual analysis. setting Online and print articles from Canadian French and English newspapers. Participants 813 newspaper articles published from 1972 to 2016. results Key Canadian events defined five eras. From 1972 to 1990, newspapers portrayed physician's MAiD role as a social issue by reporting supportive public opinion polls and revealing it was already occurring in secret. From 1991 to 1995, newspapers discussed legal aspects of physicians' MAiD role including Rodriguez' Supreme Court of Canada appeal and Federal government Bills. From 1996 to 2004, journalists discussed professional aspects of physicians' MAiD role and the growing split between palliative care and physicians who supported MAiD. They also reported on court cases against Canadian physicians, Dr Kevorkian and suffering patients who could not receive MAiD. From 2005 to 2013, newspapers described political aspects including the tabling of MAiD legislation to change physicians' role. Lastly, from 2014 to 2016, newspapers again portrayed legal aspects of physicians' role as the Supreme Court of Canada was anticipated to legalise MAiD and the Qubec government passed its own legislation. Remarkably, newspapers kept attention to MAiD over 44 years before it became legal. Articles generally reflected Canadians' acceptance of MAiD and physicians were typically portrayed as opposing it, but not all did. Conclusions Newspaper portrayals of physicians' MAiD role discussed public opinion, politicians' activities and professional and legal aspects. Portrayals followed the issue-attention cycle through three of five stages: 1) preproblem, 2) alarmed discovery and euphoric enthusiasm and 3) realising the cost of significant progress. IntrODuCtIOn Medical Assistance in Dying (MAiD) is currently permitted on four continents-Europe, Asia, North America and South America-and being debated internationally. 1 2 Depending on the jurisdiction, MAiD may include passive, active, assisted suicide and euthanasia (other relevant definitions include: active euthanasia-accelerating or intentionally causing death by means of a single act from a third party; passive euthanasia-withdrawing or withholding life-sustaining care in order to allow a patient to succumb to their medical condition and assisted suicide-providing the means or circumstances where someone is able to take their own life, which sometimes may involve a physician 3 ). In Canada, the context of our study, the Federal Parliament passed Bill C-14 Medical Assistance in Dying in June 2016. 4 MAiD is a legal option for eligible Canadian patients suffering from an incurable medical condition to end their life with the help from a physician or nurse practitioner (NP) (in the province of Qubec physicians can only perform euthanasia, not assisted suicide. NPs cannot perform euthanasia in Qubec). Except in the province of Qubec, physicians and NP can legally provide knowledge to strengths and limitations of this study ► This study examines a large random sample of print and online newspaper articles published in French and English. ► Some important themes and codes may not have appeared in the random sample due to sampling bias, culture bias and the halo effect. ► Certain publications may be under-represented depending on whether their publication is available online or in databases. ► This study looks at online and print news media only and does not consider media from television, social media, external websites, RSS feeds, apps, etc or the general decline of print newspapers. Open access patients about self-administration of death and the means for a patient to take their own life (ie, write a prescription). They can also legally administer a lethal cocktail of pharmaceuticals to hasten a patient's death (while Canadian patients can self-administer MAiD, 99.8% of 2468 Canadian patients-from June 2016 to December 2017-chose to receive MAiD from a physician/NP; only 5 patients underwent self-administered deaths. 5 This may be because the preferred pharmaceuticals for MAiD self-administration were only approved in Canada in late 2017; it is unknown if more patients will choose self-administration 6 ). 3 7 8 Although some consider passive euthanasia MAiD, it was already permitted in Canada and is not included in the legislation. Polls from 1968 onwards 9 10 showed that Canadians supported MAiD, yet it was not legalised for over four decades. Healthcare budgets in Canada are tight and continually increasing. 11 While implementing MAiD could reduce healthcare spending, 12 Canadian newspapers consistently wrote about MAiD as a way to help decrease a patient's prolonged suffering and give them control over the dying process (ie, autonomy). 13 Despite growing public support for MAiD, physicians and the Canadian Medical Association (CMA) have long resisted changing their professional role to include it. When choosing subjects, journalists and editors have a duty to report current events and facts, and decide which issues are appealing and newsworthy. Since sensational stories capture the public's attention, newspapers generally report on emerging issues-which may or may not have much research. 16 Newspapers help inform the public and may shape their opinion, but may also prolong public misconceptions about an issue. Whether newspapers actually influence behaviour change or legislation has been debated in many disciplines. Research has found that the public relies on newspapers for health information; however, newspapers may inaccurately report medical issues 27 28 and bias towards positive health coverage has been found. 29 30 For example, the Australian media promoted popular opinion and specific interest groups, and ignored or misinterpreted scientific findings that did not fit into its portrayal of MAiD. 31 32 Newspaper studies do not often follow long-term, decades-long issues but rather focus on shorter time periods of a few days to several years, 17 18 33 34 limiting researchers to a snapshot of an entire issue. It is important that long-term issues discussed over decades are studied in their entirety, that is, from the beginning of newspaper stories being published about an issue to the end. 35 A long-term analysis of Canadian newspaper stories regarding physicians' MAiD role allows us to capture shifting portrayals as the discussion progressed. It also provides insight about events and opinions as they happened, were reflected on and resurfaced over time. As most studies focus on a few major newspapers, this could additionally overlook local and contextual factors. Thus, it is essential to collect broad data from both local and national Canadian newspapers that discussed MAiD. To our knowledge, there is limited research about MAiD in Canada (at the 2018 MAiD conference, Dr Wiebe, a Canadian researcher and physician, presented the handful of Canadian MAiD studies that have been published to date) and only a single study of MAiD and physicians' role in Canadian newspapers. 34 Existing studies of MAiD in newspapers have several shortcomings. First, they focus on a few years of print English articles, usually centred around a significant event, such as a court case. Second, they do not examine French-language or online newspapers. Third, because researchers only analysed a few years of articles, studies overlook the gradual accumulation of key MAiD events (eg, court cases, bills and polls) that occurred over the 44 years. 36 37 In this study, we attempt to address some of these shortcomings by investigating MAiD in French and English Canadian newspaper articles from 1972 to 2016. Our article does 'not focus on the merits of the assisted suicide debate nor do we comment on the intent of the news accounts. Rather, our purpose is to discuss the portrayals the news stories help to create'. 38 As MAiD in Canada is a new practice that has a small amount of published research, 2 39 40 we must examine the information that is available. Given the scarcity of published medical research, Canadian newspapers are a credible data source as journalists have written many stories about MAiD and have been discussing it since 1972. Thus, newspaper articles provide important information about MAiD, such as portrayals of events, legislation, public opinion polls and physicians' views. To our knowledge, no one has systematically studied portrayals of MAiD in Canadian newspapers since 1972. Hence, our objective is to analyse Canadian French and English online and print newspaper portrayals of physician's MAiD role from when suicide was decriminalised in 1972 to its legalisation in 2016. Our research question is: how do Canadian French and English online and print newspapers portray physicians' role and MAiD? Given Canadians' intense interest in physicians' MAiD role and its recent legalisation, our study gives unique insight into how newspapers portrayed it over time. MethODs theoretical perspective Theory about professional role change 41 42 and the issue-attention cycle are used in this study. 35 Professional role change theory includes practices and how professionals behave at work. 41 43 Some studies of professional role change show that physicians may resist changing their role, which can result in a lengthy and difficult change process. 44 45 The passing of new legislation may greatly change physicians' role. 42 In Canada, physicians' role was modified to incorporate MAiD in mid-2016 (with the exception of Qubec where euthanasia became legal in December 2015), which meant their role also expanded to include: consulting and supporting patients with MAiD requests, assessing patients' eligibility for MAiD and providing 'aftercare to bereaved relatives'. 46 Open access The issue-attention cycle 35 has five stages about how newspaper attention to issues differs over time: preproblem, alarmed discovery and euphoric enthusiasm, realising the cost of significant progress, gradual decline of intense public interest and postproblem. We use professional role change theory and the issue-attention cycle together, because newspaper portrayals about MAiD discuss physician role change over decades due to sustained public interest in the issue. Using these theories together helps shed light on the path and context within which physicians' role changed when MAiD became legal in Canada. They additionally help us understand the context within which communication through newspaper stories occurs. We conducted a qualitative textual analysis examining Canadian online and print newspapers in English and French from 1972 to 2016 (figure 1). 47 48 This method was chosen as it was the ideal way to understand and thoroughly analyse each portrayal of MAiD and physicians' role at the sentence level. All included articles were read to gain in-depth insight into the data. A quantitative approach was not used, such as content analysis, as this could have missed nuances in the qualitative data. No assumptions were made about newspaper articles or how MAiD and physicians' role were portrayed, rather we let the data tell the story by using grounded theory for coding. Articles were included from when suicide was decriminalised-1 January 1972-to the day after Bill C-14 passed-18 June 2016-when MAiD became legal. Because two other qualitative textual analyses of tobacco and gun control found that newspaper portrayals of the issue changed after legislation passed, we decided to end our study at the legalisation of MAiD to avoid this potential bias. 20 49 Additionally, most newspaper studies of attention to issues analyse these from beginning to end, often during a short time period of a few days to a few weeks. 22 As discussed in the 'Introduction' section, our topic was portrayed in French and English newspapers for 44 years thus we examined the topic over the entire time period. The Canadian Business and Current Affairs, Canadian Newsstream, the Canadian Broadcasting Corporation's website and Eureka. cc were searched using the terms: 'assisted death', 'assisted suicide', 'aide mdicale mourir', 'right to die', 'end of life', 'dying with dignity', 'euthanasia', 'euthanasie', 'Loi concernant les soins de fin de vie', 'mort dans la dignit', 'suicide assist' and 'suicide avec assistance mdicale'. Newspaper articles were downloaded into Microsoft Word and assessed by two authors using the following inclusion criteria: published in a Canadian newspaper, discussed MAiD in Canada and full-text. Articles were excluded if they exclusively discussed MAiD in other countries. Articles were also removed if they addressed withdrawal of life-sustaining treatment or passive euthanasia. Duplicate articles were removed, including articles that were in both English and French, by including the article that was chronologically published first. Disagreements about whether an article was included were resolved by discussing it with a third author and reaching consensus. A database of included MAiD newspaper articles was created in NVivo 11. The dataset was available to the research team via a private shared folder in Dropbox. The databases searched are available through most libraries and many articles are freely available on public websites. As newspaper articles are vetted by the journalist, editor(s), newspaper staff and the public, the integrity of the data was not further verified. Open access In health and other disciplines, the traditional approach to newspaper analysis focuses on stories that discuss a single event over a short span of time, sometimes a few days. 22 27 50 51 Furthermore, there is no standard for random sampling of large newspaper article databases. 52 Given this study's scope and aim, along with a lack of similar health articles to model from, the methods of qualitative textual analyses of newspapers in fields such as nanotechnology, electromagnetic fields and hybrid electric vehicles were used. In non-health studies with 1000+ included newspaper articles, the median random sample was 18.7% (13.5%-26.3%). Thus, we used a random sample of 19% by era generated by Stattrek's Random Number Generator. 59 The context of this study was newspapers in Canada, which included French and English languages as well as national and local events. Five eras were developed based on key Canadian MAiD events connected to physician role change. 36 Patient and public involvement Patients and the public were not involved in this study. Analysis The analyses were conducted by ETC and CS. To ensure that coding was consistent, each author independently coded a sample of sentences and compared the results, then one author completed the coding. Then, 10% of this coding was randomly checked and disagreements were resolved by discussion. For example, a disagreement about whether the sentence 'In Holland, euthanasia is widely practised' should be coded as Holland or practice was resolved by using both codes. All downloaded articles were imported into NVivo 11 and separated into folders by era, then language (eg, 2005-2013 English). The top 10 most frequent words (keywords) were derived in NVivo 11. English and French articles were initially analysed separately to evaluate whether they contained similar or different codes and themes. Since 2014-2016 was a very significant period leading up to the legalisation of MAiD and contained 50% of the total articles, these articles were initially analysed by year in case there were differences in codes and themes. A statistical analysis was performed using 2 and Fisher's exact tests to identify differences between the number of articles containing a top 10 keyword between both eras and languages. A p value <0.05 was considered to be statistically significant. Sentences containing one of the top 10 keywords 'euthanasia/euthanasie/suicide' were copied into Microsoft Word and analysed using grounded theory by opencoding within each era. This keyword was chosen as it best represented vocabulary being used to describe MAiD in all eras. For example, the previously mentioned Holland code was combined with similar codes with country names (eg, the USA, Belgium, Switzerland) under the theme Countries (other). The same process was used to coalesce all other codes into themes. The definitions of each theme are provided in the 'Results' section. Using the above process, similar codes were grouped together and preliminary themes were identified for each era. 61 Constant comparison was then used to compare emerging themes in each era 62 Because few of the top 10 keywords differed based on language, all articles were analysed together. There was a statistically significant decline in the number of French articles in 2005-2013 and 2014-2016 that had the keyword 'euthanasia/euthanasie/suicide', but this may be due to a sampling error in previous eras or that 'mort dans la dignit' was being used instead. The articles from 2014, 2015 and 2016 also had similar top 10 keywords, and therefore were analysed together (table 2B). Due to the topic being studied, MAiD, the top 10 keyword group 'euthanasia, euthanasie or suicide' was focused on. Sentences (n=1315) (figure 2) containing 'euthanasia, euthanasie or suicide' were analysed for themes in both languages (table 3). With the exception of the theme Countries (other) that was not included in era 1972-1990, the themes were similar across the other eras. euthanasia/euthanasie/suicide keyword analysis French and English newspaper articles within the five eras were analysed, which, as outlined in the 'Methods' section, were defined by key Canadian events. 36 37 Sample quotes from each theme and era are provided in table 4. Alberta Airdrie City News - The Calgary Herald - New Brunswick Daily Gleaner (Fredericton) Nova Scotia Chronicle Herald (Halifax) Type of article n (%) Le Devoir (Qubec City) Le Nouvelliste (Trois-Rivires) Le Soleil (Qubec City) La Tribune (Sherbrooke) Type of article n (%) At the beginning of this era, journalists portrayed physicians' MAiD role as being considered controversial and having limited public debate. By 1990, they were portraying MAiD as a social issue. The press drew attention to the taboo topic of committing suicide by featuring stories about ill patients who overdosed in desperation to die, often from stockpiling drugs, because their physician could not help them die (table 4). Journalists tended to feature favourable public opinion polls and supportive stories about MAiD. However, Christian and Catholic religious organisations and opponents voiced their discontent with newspapers for not portraying their view. To opponent's dismay, journalists wrote that the Anglican Church mistakenly indicated that it approved of MAiD, but this was not true. However, stories such as this fed into growing public discontent about physicians not being able to provide MAiD. Because newspapers published MAiD stories as early as 1968, this topic was considered newsworthy long before it became legal in Canada. In articles, journalists discussed MAiD as a choice and even a right that Canadians lacked. For example, newspapers featured stories about Canadians suffering and unable to die with dignity, because they could not access MAiD. Journalists also examined Canadians' growing fears that advancing medical technology meant that physicians could keep them alive against their wishes. Journalists revealed to the public that some Canadian physicians secretly performed euthanasia (table 4). Articles also provided information about how citizens could lobby the government to change Canada's Criminal Code (CCC) and legalise MAiD. By 1990, news stories were discussing the nuances of active and passive MAiD. From this era forward, physicians were typically portrayed as disagreeing about whether they should perform MAiD or not, but most stories quoted physicians that opposed it. The CMA did not take a stand for or against MAiD, instead promoting palliative care. Journalists' long-term attention to MAiD and whether physicians should provide it began in this era. 1991-1995-newspapers' portrayal of legal aspects of physicians' MAiD role (alarmed discovery and euphoric enthusiasm stage) In this era, newspapers mainly discussed court cases, the first tabling of legislation and the establishment of parliamentary committees to study MAiD. Sue Rodriguez' numerous provincial and federal court cases and appeals were well-covered by the press. Rodriguez poignantly asked Parliament in a highly publicised 1991 video statement "If I cannot give consent to my own death, whose body is this? Who owns my life?" 63 The frequent articles about Rodriguez reflected positive Canadian polls about the right to decide when and how to die. In 1994, a physician illegally provided her with MAiD. The press also described confusion about who had jurisdiction over MAiD-provinces, courts or the Federal government. Parliament and the SCC disagreed about which body could legalise MAiD. Starting in 1991, newspapers reported that three federal Members of Parliament (MP), including Robert Wenman and Chris Axworthy, tabled private members' MAiD Bills. In 1992 and 1994, Svend Robinson, who was present at Rodriguez' death, tabled two MAiD Bills. Although newspapers kept the public well-informed about MAiD developments in the federal legislature that would change physicians' role, they continued to point out the lack of opposition to it. However, journalists did discuss MAiD's illegality, indicating it could be a slippery slope for Canadian society. Although not MAiD, newspapers discussed Robert Latimer's widely publicised case and conviction for murdering his disabled daughter, Tracy, within the wider Canadian debate about having a choice in dying. Newspapers additionally portrayed MAiD as a choice by featuring 1972-1990 1991-1995 1996-2004 Assist(ed, ing, s) 37%, 10 Journalists kept Canadians informed about MAiD developments in other countries. For instance, Washington State's 1991 defeated this ballot question. As well, articles discussed international criticism of the Netherlands as physicians were euthanising dementia and mental illness patients, possibly against the patient's wishes. Journalists continued to link MAiD with patient suffering and intolerable pain by featuring people who had agonising and prolonged deaths because they were denied MAiD by their physician and the government. MP Robinson, who was calling for MAiD to become legal, publicly discussed his friend who suffered terribly before dying (table 4). Newspapers additionally reported on two new contentious books about how to commit suicide, Final Exit and How to Die with Dignity, which empowered patients to take matters into their own hands if they could not change physicians' role. Numerous stories discussed how MAiD should be defined and what it should include (eg, assisted suicide, euthanasia, etc). Although not MAiD, newspapers published numerous stories about patients who won court cases to disconnect medical equipment and/ or refuse treatment/food, like Nancy B in Qubec (ie, passive euthanasia). Journalists reported that palliative care continued to distance itself because MAiD conflicted with its philosophy. In the midst of court cases, bills and growing support from the Canadian public, newspapers reported that the CMA was considering developing a position against MAiD, since it did not have one. While there were reports of illegal MAiD occurring, Canadian physicians continued to be portrayed as opponents who advocated instead for palliative care. In 1993, newspapers reported on a landmark survey which found that two-thirds of Alberta physicians did not want to make MAiD decisions and were evenly split over its morality. Some physicians who took the survey said they feared being incriminated for abuse, while others said they did not receive MAiD requests from patients. MAiD opponents were portrayed as criticising journalists for their one-sided, favourable portrayal. In letters and stories, opponents voiced their concerns about the dangers of MAiD (ie, allowing the euthanasia of humans). Opponents also warned Canadians that safeguards might not protect vulnerable people who may be discarded, rather than take up valuable bed space. 1996-2004-newspapers' portrayal of professional aspects of physicians' MAiD role (realising the cost of significant progress stage) Some Canadian medical professionals began to be featured in newspapers as advocates for physicians to change their role and accommodate MAiD. In 1996, Dr Maurice Gnreux made headlines when he was arrested, and eventually jailed, for writing HIV patients fatal barbiturate prescriptions. Shortly after, Dr Nancy Morrison made the national news for hastening the death of Paul Mills, a patient with terminal cancer. Although physicians advocated for change from within the profession and a national survey found that 20% of them would participate in MAiD, the CMA continued to oppose it. Journalists observed medicine's growing split: palliative care providers strongly opposed MAiD and said that patients did not request it, while other physicians were either undecided or supported it. Newspapers portrayed MAiD as an open, yet polarised, discussion that had captured Canadians' interest. They described MAiD as an 'act of love', 'good death' and 'based on mercy' and as 'cruel' and 'unjust'. In light of physician's role, journalists continued to explore the differences between passive euthanasia, active euthanasia and assisted suicide. Stories about the first national survey of Canadian patients with cancer revealed they wanted MAiD. Newspapers persisted in Keyword 1972Keyword -1990Keyword 1991Keyword -1995Keyword 1996Keyword -2004Keyword 2005Keyword -2013 2014-2016 100%, 3 Open access featuring stories about incapacitated and suffering patients with untreatable pain being denied MAiD, which reflected many Canadians' desire for it to be part of physicians' role. In contrast, physicians continued to be portrayed as advocates for improving palliative care and pain control. Reporters continued to publish stories about MAiD in other countries, especially the Netherlands, where physicians were not prosecuted for euthanising patients. Stories about Dr Kevorkian reported that he had assisted over 100 patients and continued to be arrested and appear in US court. Journalists also reported about Australia's first MAiD death. All MAiD deaths in other countries occurred with the aid of physicians. As newspapers reported that national polls showed growing public support, newspapers publicised that the Prime Minister, Jean Chrtien, promised to hold a free vote about MAiD, which did not occur. Politicians also defeated MP Robinson's motion to review and revise the CCC to accommodate MAiD. Public calls to track and study the occurrence of MAiD, even though it was illegal, went unanswered due to physicians' fear of being prosecuted. Physicians disputed stories about illegal MAiD deaths in Canada, which were reported from being none to hundreds annually. Information about MAiD in countries other than Canada. Debating Controversy, critiques of MAiD, public discussions, ethics, morals and questions. Defining The practices associated with MAiD and the types of MAiD (eg, active, passive). Legalising The legality of MAiD, court cases and decisions, government, legislation and the law. Opposing Resistance to or disagreement with euthanasia and assisted suicide. Physician's role Physician responsibilities, practices and tasks of physicians and hospitals. Supporting Advocating for MAiD and patient's rights; economics of healthcare. Polls were included as most favoured MAiD. Physician's role 'Il faut non seulement soulager les souffrances du malade, mais aussi lui fournir une prsence et de l'affection'. Le Soleil 1997 English: It is necessary not only to relieve patient suffering, but also to provide him with a presence and affection. Open access Supporting "I am definitely in favour of euthanasia and hope when my time comes I will be able to die with dignity and not rot away in a nursing home". The Province 2001 2005-2013 Committing 'Smith says the risk of incarceration denies the right of freedom to relatives who assist by taking their loved ones to jurisdictions where physician-assisted suicide is legal'. Legalising 'Circumstances are pushing the nation in that direction: In Quebec, the Dying with Dignity Commission (an all-party group drawn from the National Assembly) recently issued a comprehensive report suggesting, in part, that doctors who help a terminally ill patient die by suicide not be charged criminally'. Guelph Mercury 2012 'assisted suicide is already quietly taking place in hospitals and hospices across Canada without any rules or guidelines'. The Gazette 2005 Opposing 'She said those who are particularly vulnerable are the elderly, disabled, and people who may worry about being a "burden to society" and safeguards are not effectively protecting vulnerable people in jurisdictions where assisted suicide is already allowed.' CBC News 2012 Physician's role 'Should physicians remain steadfastly committed to one of the most fundamental tenets of ethical practicenamely, to respect the value of human life, and not actively participate in (physician-assisted death) and euthanasia-or does the physician have the moral responsibility to relieve suffering even if by doing so death is hastened?' Dr Pierre Harvey, of Riviere-du-Loup, Que., said the CMA's mandate is to be leaders in healthcare'. Ottawa Citizen 2013 'En 23 ans, je n'ai jamais eu de vritable demande d'euthanasie' (Daoust-Boisvert A. Assemble de l'Association mdicale canadienne-Les mdecins qubcois ont d s'expliquer sur l'aide mdicale mourir). Le Devoir 2013 English: "In 23 years, I have never had a real request for euthanasia". 'To offer assisted suicide to patients is a betrayal of their trust. A clear medical line between caring and killing is essential'. Telegraph Journal 2016 Supporting 'The evidence from years of experience and research where euthanasia and/or assisted suicide are permitted does not support claims that decriminalisation will result in vulnerable persons being subjected to abuse or a slippery slope from voluntary to non-voluntary euthanasia', reads a summary of the report's findings. The Vancouver Sun 2011 2014-2016 Committing 'While family members were supportive of not including life-prolonging treatment, they asked that the euthanasia advance directive not be followed because of uncertainty about the person's current wishes, not being ready for the person to die or not sensing that the person is suffering'. The Macleod Gazette 2016 Open access to the SCC in the next era. A 2009 poll of Qubec physicians found 74% support for MAiD and 4 years later the province introduced Bill 52 An Act Respecting End-of-Life Care/Loi concernant les soins de fin de vie, which passed in the fifth era. Newspapers again portrayed the growing awareness of patient rights and autonomy regarding the option and choice for MAiD. As patients were more actively participating in their health and access to the internet grew, these began to affect the patient-physician relationship. This was evident by the number of stories about and letters from Canadians reporting that they wanted physicians to accommodate MAiD. Additionally, a 2011 Royal Society of Canada report recommended that MAiD be legalised and monitored by a national body. Newspapers reported that the Euthanasia Prevention Coalition of Canada attempted to discredit the Royal Society of Canada report because the panel members were publicly known for being in favour of MAiD. Physicians expressed their disappointment that newspaper articles continued to discuss Canadian patients' fears about palliative care and suffering during the dying process. Journalists revealed that Canadians were secretly accessing MAiD in other countries, such as Switzerland, if they could afford it and were well enough to travel. But they also reminded readers that participating in this process could result in jail time. Newspapers reported that many physicians and the conservative Federal government were opposed to MAiD. For instance, a 2011 CMA poll found that only 34% of physicians supported MAiD. Some physicians said they had never had a request for MAiD. While some physicians requested clarification about what was legally permissible, many articles discussed their continued call for changes to palliative care. Stories also indicated that Canada's conservative government opposed changes to the CCC and voted against MAiD Bills. Nevertheless, MP Lalonde continued to publicly maintain there should be guidelines for the illegal MAiD deaths that were occurring. The courts were also conflicted. Stories about rulings revealed some judges agreed with the 1993 SCC decision, while others had a different interpretation. Newspaper reports about the growing number of unwanted hastened deaths and fears about the slippery slope in European countries did little to quell Canadian's support. Opponents publicly voiced their fear that because hospitals made thousands of errors, the same could occur with MAiD. In letters and stories, they also The CMA defines euthanasia as 'knowingly and intentionally performing an act, with or without consent, that is explicitly intended to end another person's life and that includes the following elements: the subject has an incurable illness; the agent knows about the person's condition; commits the act with the primary intention of ending the life of that person and the act is undertaken with empathy and compassion and without personal gain'. The Gazette 2014 Legalising 'Dans une dcision rendue la semaine dernire, la Cour suprieure avait donn raison la Coalition des mdecins pour la justice sociale (CMJS) et Lisa D'Amico, une patiente atteinte de paralysie crbrale, qui rclamaient que les articles de la Loi Qubcoise concernant les soins de fin de vie (LCSFV) soient suspendus jusqu'au 6 fvrier prochain, soit jusqu' la fin du dlai de douze mois accord par la Cour suprme au gouvernement fdral pour se conformer son jugement favorable au suicide assist'. Physician's role As reported by Postmedia colleague Sharon Kirkey, the Canadian Medical Association recently reversed its opposition to doctor-assisted suicide, saying 'there are rare occasions where patients have such a degree of suffering, even with access to palliative and end-of-life-care, that they request medical aid in dying. In such a case, and within legal constraints, medical aid in dying may be appropriate'. Calgary Herald Supporting "I am definitely an advocate", says Taylor, a physician assistant whose late husband, Dr Donald Low of SARS crisis fame, made an impassioned video plea for physician-assisted suicide for the uncoerced terminally ill in his final days of fighting brain cancer 2 years ago. The Star 2015 Open access questioned MAiD's impact on vulnerable people who wanted to live, such as the elderly and disabled. Disability advocates received more attention from journalists in this era, but their arguments did not seem to be as powerful as stories about people's horrible deaths from being denied MAiD. Since it seemed that Canadians wanted to determine their own death, journalists began to portray MAiD as a national debate that needed to occur. They highlighted calls of discrimination against ill patients who were physically unable to take their life or travel to receive MAiD. Newspapers also explored the various interpretations of quality of life and the process of dying, and featured frameworks and processes that could be used for MAiD in Canada. Although the terms 'euthanasia/euthanasie' and 'suicide' had long been used by the press to describe MAiD, journalists began to use phrases such as 'dying with dignity' and 'mort douce et sans souffrance'. Reports about Qubec's legislative debate fuelled the idea that MAiD could become legal in Canada and many newspapers took notice. The biggest shift in newspaper discussions in this era was from whether to legalise MAiD to what the legislation should contain, who should be included and how it could be accessed. Journalists reported that legislation would allow physicians to practice euthanasia and participate in assisted suicide. Patients would be required to consult two physicians and wait 2 weeks before receiving MAiD. Some citizens and organisations such as Dying with Dignity are still making the news today for rallying against Canada's restriction of MAiD to physical suffering only. Although newspaper columnists and letter-writers debated whether to allow minors to access MAiD, it is only available to adults over 18 years. Journalists reported that safeguards would protect vulnerable people, but reminded Canadians that physicians felt strongly about providing additional resources to palliative care. Until mid-2016, newspapers reported that Canadians continued to travel to other countries to end their life. When legislation was delayed for 4 months in 2016, stories revealed that some grievously ill Canadians obtained a court order to access MAiD. Some hospitals indicated that it was going to be difficult to provide both palliative care and MAiD, and palliative care staff did not want to administer MAiD. Stories about publicly funded Catholic hospitals and nursing homes indicated they would not participate, causing problems for patients in smaller centres. Journalists interviewed hospital executives and physicians about the changes needed including: staffing, compensation, location, standards, timing, assessment and criteria. As MAiD is administered provincially and some hospitals and physicians were not participating, articles discussed Canadians' concerns about uneven or no access. Whether to change physicians' role became moot and newspapers focused on which changes to make. Newspapers quoted physicians who voiced discomfort about how their role would change and that they would betray patient trust. Some physicians believed that being required to refer patients for MAiD made them indirectly responsible for a patient's death. These physicians formed groups such as Physicians Alliance for Total Refusal of Euthanasia and Collectif des mdecins contre l'euthanasie. The CMA softened its opposition to MAiD, indicating physicians should follow their conscience. The SCC ruling changed physicians' role in every province except Qubec where a long debate between politicians, physicians and the public resulted in legislation being passed before the ruling was known. Newspapers reported that the paradox of teaching medical students about MAiD and healing bothered physicians, but they updated the curriculum to incorporate MAiD. Physicians expanded their role to include: referring patients, determining a patient's eligibility and participating in MAiD. DIsCussIOn AnD COnClusIOn Our qualitative content analysis of Canadian French and English newspaper articles over four decades found that different aspects of physicians' MAiD role were portrayed in each era. Although table 3 may be interpreted as: 1) the discourse has not changed significantly or 2) the discourse within the themes has changed, our qualitative analysis of themes indicates that newspapers' attention focused on different aspects of MAiD over time. The long debate about MAiD over time was crucial: newspapers kept Canadians current about developments by regularly publishing articles about polls, legislation, court cases and public perceptions about suffering and dying. Additionally, the press continuously reported the disconnect between physician opposition, Canadians' growing support and politicians' attempts to legalise MAiD. Although religious groups and many physicians opposed MAiD, newspapers primarily focused on portraying MAiD developments positively. Our findings were consistent with other Canadian MAiD studies 7 33 34 40 and our study enabled us to examine Open access the issue from beginning to end in both languages. Like other studies of MAiD, 33 34 we found that newspapers reflected popular opinions about it. Similar to newspaper studies about health issues, 18 65 and encompasses responsibilities, practices and tasks. The roles that could change with the legalisation of MAiD include collaborator (ie, shared decision-making), leader (ie, patient safety), health advocate (ie, partnership) and professional (ie, commitment to saving or prolonging life for patient, ethical judgement, personal well-being). 66 Other themes we found, such as supporting and committing, were intertwined with physician role change. Supporting patients' choice is connected with the partnership, educator, and shared decision-making role of physicians, while committing could be connected to physicians' role to uphold ethical and legal decision-making. We reveal a rich story about how MAiD portrayals shifted as the issue unfolded, going through the issue-attention cycle's stages 1, 2 and 3, then back to stage 2 when MAiD was going to become legal. 35 For physicians' MAiD role portrayals, the first three of the five stages were observed before legalisation: 1) public opinion ('preproblem', 1972-1990), 2) legal aspects ('alarmed discovery and euphoric enthusiasm stage', 1991-1995 and 2014-2016) and 3) professional and political aspects ('realising the cost of significant progress stage ', 1996-2004 and 2005-2013). Interestingly, even though Qubec legalised MAiD in 2015, newspaper articles in the 2014-2016 era did not move to 'gradual decline of intense public interest' (stage 4). This may be because MAiD was still not legal across the rest of Canada. To summarise, the portrayal of physicians' role in MAiD evolved from: stage 1) a descriptive look at physician actions (ie, number of physicians secretly performing MAiD) and public opinion of whether they believe MAiD should be legalised; to stage 2) a passionate ethical debate of defining what a physician's role is in end-of-life care and the legal ramifications of changing their role; to stage 3) addressing the physician's role in policy change and professional obligation to relieve patient suffering and respect patient choice and dignity and then back to stage 2) the portrayal of the physicians as conservative/resistant to adopting MAiD, but legally required to define and educate role change in order to provide service. Understanding newspapers' portrayal of physicians' role in MAiD may provide important insights for professionals, policymakers and legislators. Four continents have legalised MAiD and more are currently examining it. 1 For over four decades, Canadian newspapers played a part in keeping attention on MAiD, but physicians did not change their professional role until they were legislated to do so. Based on stories about physicians favouring palliative care and opposing MAiD, Canadian patients and politicians may have perceived that physicians would not support their end-of-life wishes, and thus were reluctant to change. 14 15 67 Health policymakers could examine the issues newspapers are paying more attention to and devote more time to understanding what might need to be changed to resolve an issue. Although not appropriate for every issue or in every context, our study reveals that legislation might be a path to achieve change in professional roles. This study's strengths are that it thematically captures MAiD's portrayal of the public, politicians, physicians and key events in Canadian newspapers for the issue's entire duration. Our article sample may be biased because it is based on newspapers that allow their publications to be entered into searchable online databases, which helps explain why some provinces and territories are not well represented in the random sample. The random sample may not represent the full sample, signifying sampling bias 68 and meaning that some themes may not be present. Articles written before MAiD was legal may be predisposed to culture bias and the halo effect (ie, the tendency for an impression created in one area to influence opinion in another area). 68 To surmount confirmation and sampling bias, 34 53-58 each era was randomly sampled and the last era was oversampled. Analysing the full database instead of a random sample could yield additional insights. We observed that women who fought MAiD court cases and tabled Bills were prominently featured in newspapers, but more male physicians and politicians were quoted. Additionally, Health Canada reports that 2% more women than men received MAiD up to June 2017. 5 These suggest that gender bias may be present. Exploring gender and MAiD may yield additional insights into gender differences in healthcare use and access. 69 As well, future researchers will want to pay careful attention to patient empowerment and its connection to MAiD as it is under-represented in our sample. This may be because newspapers did not explicitly connect these two topics or that the random sample is biased. Although two-thirds of Canadians identify as Roman Catholic and/or Christian, journalists paid less attention to religion in the random sample. This also warrants further investigation, because religious physicians, politicians, hospitals and citizens helped defeat Bills, stall MAiD legislation after the SCC ruling and voiced their Open access discontent in letters and news stories. There is also little known about the relationship between religion, opposing MAiD and religion's decline in Canada. The lack of attention to palliative care, advocated by physicians and MAiD opponents, should be investigated as it appears to be an important topic for physicians. Future research could also examine newspaper article tone to understand whether Canadian newspapers used a balanced approach when portraying MAiD. 70 Based on our reading of all the articles in the random sample, it appears that many had a positive tone towards MAiD. However, this may be a reflection of Canadians' general support for MAiD. It also anecdotally seems that newspapers negatively portray physicians' and their resistance to MAiD. Other qualitative research such as interviewing physicians involved in MAiD legalisation may reveal whether newspaper portrayals of MAiD helped them change their perspectives. Investigating articles published in both Canadian medical journals and newspapers after legalisation could also expand our understanding about MAiD portrayals and changes in physicians' role. We note the growing importance of social media, websites, RSS feeds, apps, etc and the general decline of newspapers, which may affect the results of future newspaper studies. It would be prudent for future researchers to consider how different generations access news about MAiD (eg, seniors are more likely to watch television and millennials are more likely to use an app or social media). As well, it is also important to note that further research about the portrayal of MAiD and physicians' role in Canadian French-language newspapers and in the French language is needed. Because MAiD legalisation occurred differently in Qubec (ie, public debate, then legislation) than the rest of Canada (ie, SCC ruling, then legislation), the different pathways to legalisation should be explored. Because Canada is officially bilingual, future researchers should be attentive to this issue. |
/**
* Class to produce a single, mega-report, from all available
* report documents.
*
*/
public class ReportMerger {
private final static String TEMPLATE_FILE = "ca/phon/app/opgraph/report/AllReportsTemplate.xml";
private Project project;
public ReportMerger() {
super();
}
public ReportMerger(Project project) {
super();
this.project = project;
}
public void setProject(Project project) {
this.project = project;
}
public Project getProject() {
return this.project;
}
// recursive method to update all node ids in a graph
private void updateIds(OpGraph graph) {
for(OpNode node:graph.getVertices()) {
final UUID uuid = UUID.randomUUID();
node.setId(Long.toHexString(uuid.getLeastSignificantBits()));
if(node instanceof MacroNode) {
updateIds(((MacroNode)node).getGraph());
}
}
}
private boolean checkName(String name) {
if(name.contains("legacy") || name.contains("deprecated")) return false;
if(name.startsWith(".") || name.startsWith("~") || name.startsWith("__")) return false;
return true;
}
private OpGraph loadTemplate() throws IOException {
final InputStream is = getClass().getClassLoader().getResourceAsStream(TEMPLATE_FILE);
if(is != null) {
OpGraph retVal = OpgraphIO.read(is);
updateIds(retVal);
return retVal;
} else {
throw new FileNotFoundException(TEMPLATE_FILE);
}
}
/**
* Used to create a macro node for a category of reports (e.g., Stock, User, Project)
*
* @param title
* @return
*/
private MacroNode createReportCategoryMacroNode() throws IOException {
final OpGraph graph = loadTemplate();
final MacroNode node = new MacroNode(graph);
// publish inputs
final OpNode queryHistoryNode =
graph.getNodesByName("Query History").stream().findFirst().orElse(null);
// should not happen
if(queryHistoryNode == null)
throw new IllegalArgumentException("Report template does not contain 'Query History' node");
return node;
}
private MacroNode addReportCategoryMacroNode(OpGraph document, String title) throws IOException, ItemMissingException, VertexNotFoundException, CycleDetectedException, InvalidEdgeException {
final MacroNode retVal = createReportCategoryMacroNode();
retVal.setName(title);
// add macro node to document
document.add(retVal);
connectMacroNode(document, retVal);
// make macro node optional
final WizardExtension ext = document.getExtension(WizardExtension.class);
if(ext != null) {
ext.addOptionalNode(retVal);
ext.setOptionalNodeDefault(retVal, true);
}
return retVal;
}
private void connectMacroNode(OpGraph parent, MacroNode macroNode) throws ItemMissingException, VertexNotFoundException, CycleDetectedException, InvalidEdgeException {
final OpGraph macroGraph = macroNode.getGraph();
// find object nodes in reportA
final OpNode queryHistoryNodeA =
parent.getNodesByName("Query History")
.stream().findFirst().orElse(null);
if(queryHistoryNodeA == null)
throw new IllegalArgumentException("No 'Query History' node found in destination graph");
final OpNode queryHistoryNodeB =
macroGraph.getNodesByName("Query History")
.stream().findFirst().orElse(null);
if(queryHistoryNodeB == null)
throw new IllegalArgumentException("No 'Query History' node found in source graph");
// find 'Query' node in reportA
final OpNode uuidNode = parent.getNodesByName("UUID#toString()")
.stream().findFirst().orElse(null);
if(uuidNode == null)
throw new IllegalArgumentException("'UUID#toString()' node not found in source graph");
// collect fields
final InputField queryHistoryProjectInputField = queryHistoryNodeB.getInputFieldWithKey("project");
final InputField queryHistoryUUIDInputField = queryHistoryNodeB.getInputFieldWithKey("queryId");
final InputField projectInputField = macroNode.publish("project", queryHistoryNodeB, queryHistoryProjectInputField);
final InputField queryIdInputField = macroNode.publish("queryId", queryHistoryNodeB, queryHistoryUUIDInputField);
final OutputField projectOutputField = queryHistoryNodeA.getOutputFieldWithKey("project");
final OutputField queryUUIDField = uuidNode.getOutputFieldWithKey("value");
// create and add links
final OpLink projectLink = new OpLink(queryHistoryNodeA, projectOutputField, macroNode, projectInputField);
final OpLink uuidLink = new OpLink(uuidNode, queryUUIDField, macroNode, queryIdInputField);
parent.add(projectLink);
parent.add(uuidLink);
}
public OpGraph createAllReportsGraph() throws IOException, IllegalArgumentException, ItemMissingException, VertexNotFoundException, CycleDetectedException, InvalidEdgeException {
final OpGraph retVal = loadTemplate();
NodeEditorSettings nes = retVal.getExtension(NodeEditorSettings.class);
if(nes == null) {
nes = new NodeEditorSettings();
retVal.putExtension(NodeEditorSettings.class, nes);
}
nes.setGenerated(true);
final WizardExtension ext = retVal.getExtension(WizardExtension.class);
if(ext != null)
ext.setWizardTitle("All Reports");
retVal.setId("root");
final ReportLibrary library = new ReportLibrary();
final MacroNode stockReportsNode = addReportCategoryMacroNode(retVal, "Stock Reports");
for(URL reportURL:library.getStockGraphs()) {
final String name = reportURL.getFile();
if(!checkName(name)) continue;
final OpGraph graph = OpgraphIO.read(reportURL.openStream());
if(graphGenerated(graph)) continue;
updateIds(graph);
addReport(retVal, stockReportsNode.getGraph(), graph);
}
if(library.getUserGraphs().iterator().hasNext()) {
final MacroNode userReportsNode = addReportCategoryMacroNode(retVal, "User Reports");
for(URL reportURL:library.getUserGraphs()) {
final String name = reportURL.getFile();
if(!checkName(name)) continue;
final OpGraph graph = OpgraphIO.read(reportURL.openStream());
if(graphGenerated(graph)) continue;
updateIds(graph);
addReport(retVal, userReportsNode.getGraph(), graph);
}
}
final Project proj = getProject();
if(proj != null && library.getProjectGraphs(proj).iterator().hasNext()) {
final MacroNode projectReportsNode = addReportCategoryMacroNode(retVal, "Project Reports");
for(URL reportURL:library.getProjectGraphs(proj)) {
final String name = reportURL.getFile();
if(!checkName(name)) continue;
final OpGraph graph = OpgraphIO.read(reportURL.openStream());
if(graphGenerated(graph)) continue;
updateIds(graph);
addReport(retVal, projectReportsNode.getGraph(), graph);
}
}
return retVal;
}
private boolean graphGenerated(OpGraph graph) {
final NodeEditorSettings settings = graph.getExtension(NodeEditorSettings.class);
return (settings != null && settings.isGenerated());
}
/**
* Wrap reportB in a {@link MacroNode} and add it to reportA
* as a new optional node. Any nodes marked as optional or
* available in reportB will also be marked as such in the
* {@link MacroNode} of reportA. Reports templates are not
* merged, instead the default report template is available.
*
* @param document
* @param report
* @return reference to reportA
* @throws IllegalArgumentException
* @throws ItemMissingException
* @throws CycleDetectedException
* @throws VertexNotFoundException
* @throws InvalidEdgeException
*/
private OpGraph addReport(OpGraph document, OpGraph macroGraph, OpGraph report)
throws IllegalArgumentException, ItemMissingException, VertexNotFoundException, CycleDetectedException, InvalidEdgeException {
final WizardExtension extA = document.getExtension(WizardExtension.class);
if(extA == null || !(extA instanceof ReportWizardExtension))
throw new IllegalArgumentException("Destination graph is not a report document");
final WizardExtension extB = report.getExtension(WizardExtension.class);
if(extB == null || !(extB instanceof ReportWizardExtension))
throw new IllegalArgumentException("Source graph is not a report document");
// publish input fields of Query History node in macro
final MacroNode macroNode = new MacroNode(report);
macroNode.setName(extB.getWizardTitle());
// add macroNode to reportA
macroGraph.add(macroNode);
extA.addOptionalNode(macroNode);
extA.setOptionalNodeDefault(macroNode, true);
connectMacroNode(macroGraph, macroNode);
// setup optionals for reportA
for(OpNode optionalNode:extB.getOptionalNodes()) {
extA.addOptionalNode(optionalNode);
extA.setOptionalNodeDefault(optionalNode, extB.getOptionalNodeDefault(optionalNode));
}
// setup settings nodes
for(OpNode settingsNode:extB) {
extA.addNode(settingsNode);
extA.setNodeTitle(settingsNode, macroNode.getName() + ": " + extB.getNodeTitle(settingsNode));
extA.setNodeMessage(settingsNode, extB.getNodeMessage(settingsNode));
extA.setNodeForced(settingsNode, extB.isNodeForced(settingsNode));
}
return document;
}
} |
Immunization with cytoplasmic repetitive antigen and flagellar repetitive antigen of Trypanosoma cruzi stimulates a cellular immune response in mice In previous studies, we demonstrated that CRA and FRA recombinant proteins, used for diagnosis of Chagas' disease, elicited a humoral immune response in susceptible and resistant mice. To understand better the immune response to these proteins, we have evaluated, the cellular immune response in CRA- and in FRA-immunized BALB/c and C57BL/6 mice. A specific cellular lymphoproliferative response was observed in both strains of mice. Spleen cell cultures mainly from CRA-immunized C57BL/6 and FRA-immunized BALB/c mice produced high levels of IFN-, indicating the induction of a Type 1 immune response. Regarding the T cell subsets, CD4+ T cells were the major source of IFN- in CRA- and FRA-immunized mice. These results suggest that CRA and FRA are important immunogens in inducing a Type 1 immune response and that they may be considered as potential vaccine antigens. |
<reponame>Ogeon/rustful
//Include macros to be able to use `insert_routes!`.
#[macro_use]
extern crate log;
extern crate env_logger;
extern crate rustful;
use std::error::Error;
use rustful::{Server, Context, Response, DefaultRouter};
fn say_hello(context: Context, response: Response) {
//Get the value of the path variable `:person`, from below.
let person = match context.variables.get("person") {
Some(name) => name,
None => "stranger".into()
};
//Use the name from the path variable to say hello.
response.send(format!("Hello, {}!", person));
}
fn main() {
env_logger::init();
println!("Visit http://localhost:8080 or http://localhost:8080/Olivia (if your name is Olivia) to try this example.");
//Create a DefaultRouter and fill it with handlers.
let mut router = DefaultRouter::<fn(Context, Response)>::new();
router.build().many(|node| {
//Handle requests for root...
node.then().on_get(say_hello);
//...and one level below.
//`:person` is a path variable and it will be accessible in the handler.
node.path(":person").then().on_get(say_hello);
});
//Build and run the server.
let server_result = Server {
handlers: router,
//Turn a port number into an IPV4 host address (0.0.0.0:8080 in this case).
host: 8080.into(),
//Use default values for everything else.
..Server::default()
}.run();
match server_result {
Ok(_server) => {},
Err(e) => error!("could not start server: {}", e.description())
}
}
|
from setuptools import setup
def readme():
with open('README.rst') as readme:
return readme.read()
setup(name='flask-now',
version='0.2.1',
description='Flask App Generator',
long_description=readme(),
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Code Generators',
'Development Status :: 3 - Alpha',
'Framework :: Flask',
'Programming Language :: Python :: 3 :: Only',
],
keywords='extension flask flask-extension build-tool flask-build-tool flask_now flask-now app-generator flask-app-generator app generator',
url='http://github.com/ozanonurtek/flask-now',
author='<NAME>',
author_email='<EMAIL>',
packages=['flask_now'],
entry_points={'console_scripts': ['flask-now = flask_now.now:main']},
include_package_data=True,
zip_safe=False)
|
K-PdM: KPI-Oriented Machinery Deterioration Estimation Framework for Predictive Maintenance Using Cluster-Based Hidden Markov Model Explosive increase of industrial data collected from sensors has brought increasing attractions to the data-driven predictive maintenance for industrial machines in cyber-physical systems (CPSs). Since machinery faults are always caused by performance deterioration of components, learning the deteriorating mode from observed sensor data facilitates the prognostics of impeding faults and predicting the remaining useful life (RUL). In modern CPSs, several key performance indicators (KPIs) are monitored to detect the corresponding fine-grained deteriorating modes of industrial machines. However, the overall deterioration estimation and RUL prediction based on these KPIs with various patterns have been a great challenge, especially without labels of deteriorating index or uninterpretable of root causes. In this paper, we proposed K-PdM, a cluster-based hidden Markov model for the machinery deterioration estimation and RUL prediction based on multiple KPIs. The method uncovers the fine-grained deteriorating modes of machines through each unlabeled KPI data and learns a mapping between each deteriorating KPI index and RULs. Accordingly, an overall deterioration estimation and RUL prediction of machine are able to be achieved based on the combination of each KPIs deterioration estimation. Moreover, a set of interpretable semantic rules are setup to analyze the root cause of performance deterioration among KPIs. An experimental application is proposed to demonstrate its applicability based on the PHM08 data sets. The obtained results show their effectiveness to predict the RULs of machines. |
Dendritic spine instability and insensitivity to modulation by sensory experience in a mouse model of fragile X syndrome Fragile X syndrome (FXS) is the most common inherited form of mental retardation and is caused by transcriptional inactivation of the X-linked fragile X mental retardation 1 (FMR1) gene. FXS is associated with increased density and abnormal morphology of dendritic spines, the postsynaptic sites of the majority of excitatory synapses. To better understand how lack of the FMR1 gene function affects spine development and plasticity, we examined spine formation and elimination of layer 5 pyramidal neurons in the whisker barrel cortex of Fmr1 KO mice with a transcranial two-photon imaging technique. We found that the rates of spine formation and elimination over days to weeks were significantly higher in both young and adult KO mice compared with littermate controls. The heightened spine turnover in KO mice was due to the existence of a larger pool of short-lived new spines in KO mice than in controls. Furthermore, we found that the formation of new spines and the elimination of existing ones were less sensitive to modulation by sensory experience in KO mice. These results indicate that the loss of Fmr1 gene function leads to ongoing overproduction of transient spines in the primary somatosensory cortex. The insensitivity of spine formation and elimination to sensory alterations in Fmr1 KO mice suggest that the developing synaptic circuits may not be properly tuned by sensory stimuli in FXS. |
At least 20 people were killed when a bus collided with a fuel tanker in southwestern Nigeria, a newspaper said Tuesday.
The Nigerian Tribune said the accident occurred on a busy highway linking the economic capital Lagos to the third largest city of Ibadan on Monday and also left several people injured.
The driver of the bus apparently lost control before ramming into an approaching truck transporting petrol.
Accidents occur regularly on Nigeria's poorly-maintained roads due to speeding and a disregard for basic traffic rules. |
<reponame>Kestrong/rocketmq
package com.xjbg.rocketmq.autoconfigure;
import org.apache.rocketmq.client.producer.LocalTransactionState;
import org.apache.rocketmq.client.producer.TransactionListener;
import org.apache.rocketmq.common.message.Message;
import org.apache.rocketmq.common.message.MessageExt;
import org.springframework.transaction.support.TransactionSynchronization;
import org.springframework.transaction.support.TransactionSynchronizationManager;
import java.util.HashSet;
import java.util.Set;
/**
* must be thread safety
*
* @author kesc
* @since 2019/4/3
*/
public class DefaultSpringMqTransactionListener implements TransactionListener {
private Set<String> transactionSet = new HashSet<>();
@Override
public LocalTransactionState executeLocalTransaction(Message msg, Object arg) {
TransactionSynchronizationManager.registerSynchronization(new TransactionSynchronization() {
@Override
public void afterCommit() {
transactionSet.add(msg.getTransactionId());
}
});
return LocalTransactionState.COMMIT_MESSAGE;
}
@Override
public LocalTransactionState checkLocalTransaction(MessageExt msg) {
boolean contains = transactionSet.contains(msg.getTransactionId());
if (contains) {
transactionSet.remove(msg.getTransactionId());
return LocalTransactionState.COMMIT_MESSAGE;
}
return LocalTransactionState.ROLLBACK_MESSAGE;
}
}
|
SAFETY CASE-ORIENTED ASSESSMENT OF HUMAN-MACHINE INTERFACE FOR NPP I&C SYSTEMS A safety assessment approach for human-machine interfaces (HMI) of Nuclear Power Plant (NPP) instrumentation and control systems (I&Cs) based on the Safety Case methodology is proposed. I&C assessment model is described taking into account human factor impact. Normative profile based on harmonization and standard requirements selection for choice of HMI safety assessment methods is developed. Ranking of major design principles of safe HMI is provided. Set of methods for comprehensive human machine interface safety assessment at life cycle stages is analyzed and adopted taking into consideration features of HMI safety attribute. |
Event-Driven Sensor Deployment in an Underwater Environment Using a Distributed Hybrid Fish Swarm Optimization Algorithm In open and complex underwater environments, targets to be monitored are highly dynamic and exhibit great uncertainty. To optimize monitoring target coverage, the development of a method for adjusting sensor positions based on environments and targets is of crucial importance. In this paper, we propose a distributed hybrid fish swarm optimization algorithm (DHFSOA) based on the influence of water flow and the operation of an artificial fish swarm system to improve the coverage efficacy of the event set and to avoid blind movements of sensor nodes. First, by simulating the behavior of foraging fish, sensor nodes autonomously tend to cover events, with congestion control being used to match node distribution density to event distribution density. Second, the construction of an information pool is used to achieve information-sharing between nodes within the network connection range, to increase the nodes field of vision, and to enhance their global search abilities. Finally, we conduct extensive simulation experiments to evaluate network performance in different deployment environments. The results show that the proposed DHFSOA performs well in terms of coverage efficacy, energy efficiency, and convergence rate of the event set. |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-02-08 10:29
from __future__ import unicode_literals
from decimal import Decimal
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('confs', '0046_auto_20170207_1603'),
]
operations = [
migrations.AddField(
model_name='conference',
name='no_fees',
field=models.BooleanField(default=False, verbose_name='Pas de frais'),
),
migrations.AlterField(
model_name='conference',
name='deleted',
field=models.BooleanField(default=False, verbose_name='Supprimée'),
),
migrations.AlterField(
model_name='conference',
name='price',
field=models.DecimalField(decimal_places=2, default=Decimal('0.5'), help_text='', max_digits=6, verbose_name='Prix de vente'),
),
]
|
/*
* Copyright (C) 2012-2014 DataStax Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datastax.driver.mapping;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import com.google.common.base.Function;
import com.google.common.base.Functions;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.datastax.driver.core.*;
/**
* An object handling the mapping of a particular class.
* <p>
* A {@code Mapper} object is obtained from a {@code MappingManager} using the
* {@link MappingManager#mapper} method.
*/
public class Mapper<T> {
private static final Logger logger = LoggerFactory.getLogger(EntityMapper.class);
final MappingManager manager;
final ProtocolVersion protocolVersion;
final Class<T> klass;
final EntityMapper<T> mapper;
final TableMetadata tableMetadata;
// Cache prepared statements for each type of query we use.
private volatile Map<QueryType, PreparedStatement> preparedQueries = Collections.<QueryType, PreparedStatement>emptyMap();
private static final Function<Object, Void> NOOP = Functions.<Void>constant(null);
final Function<ResultSet, T> mapOneFunction;
final Function<ResultSet, Result<T>> mapAllFunction;
Mapper(MappingManager manager, Class<T> klass, EntityMapper<T> mapper) {
this.manager = manager;
this.klass = klass;
this.mapper = mapper;
KeyspaceMetadata keyspace = session().getCluster().getMetadata().getKeyspace(mapper.getKeyspace());
this.tableMetadata = keyspace == null ? null : keyspace.getTable(Metadata.quote(mapper.getTable()));
this.protocolVersion = manager.getSession().getCluster().getConfiguration().getProtocolOptions().getProtocolVersionEnum();
this.mapOneFunction = new Function<ResultSet, T>() {
public T apply(ResultSet rs) {
return Mapper.this.map(rs).one();
}
};
this.mapAllFunction = new Function<ResultSet, Result<T>>() {
public Result<T> apply(ResultSet rs) {
return Mapper.this.map(rs);
}
};
}
Session session() {
return manager.getSession();
}
PreparedStatement getPreparedQuery(QueryType type) {
PreparedStatement stmt = preparedQueries.get(type);
if (stmt == null) {
synchronized (preparedQueries) {
stmt = preparedQueries.get(type);
if (stmt == null) {
String query = type.makePreparedQueryString(tableMetadata, mapper);
logger.debug("Preparing query {}", query);
stmt = session().prepare(query);
Map<QueryType, PreparedStatement> newQueries = new HashMap<QueryType, PreparedStatement>(preparedQueries);
newQueries.put(type, stmt);
preparedQueries = newQueries;
}
}
}
return stmt;
}
/**
* The {@code MappingManager} managing this mapper.
*
* @return the {@code MappingManager} managing this mapper.
*/
public MappingManager getManager() {
return manager;
}
/**
* Creates a query that can be used to save the provided entity.
* <p>
* This method is useful if you want to setup a number of options (tracing,
* conistency level, ...) of the returned statement before executing it manually
* or need access to the {@code ResultSet} object after execution (to get the
* trace, the execution info, ...), but in other cases, calling {@link #save}
* or {@link #saveAsync} is shorter.
*
* @param entity the entity to save.
* @return a query that saves {@code entity} (based on it's defined mapping).
*/
public Statement saveQuery(T entity) {
PreparedStatement ps = getPreparedQuery(QueryType.SAVE);
BoundStatement bs = ps.bind();
int i = 0;
for (ColumnMapper<T> cm : mapper.allColumns()) {
Object value = cm.getValue(entity);
bs.setBytesUnsafe(i++, value == null ? null : cm.getDataType().serialize(value, protocolVersion));
}
if (mapper.writeConsistency != null)
bs.setConsistencyLevel(mapper.writeConsistency);
return bs;
}
/**
* Save an entity mapped by this mapper.
* <p>
* This method is basically equivalent to: {@code getManager().getSession().execute(saveQuery(entity))}.
*
* @param entity the entity to save.
*/
public void save(T entity) {
session().execute(saveQuery(entity));
}
/**
* Save an entity mapped by this mapper asynchonously.
* <p>
* This method is basically equivalent to: {@code getManager().getSession().executeAsync(saveQuery(entity))}.
*
* @param entity the entity to save.
* @return a future on the completion of the save operation.
*/
public ListenableFuture<Void> saveAsync(T entity) {
return Futures.transform(session().executeAsync(saveQuery(entity)), NOOP);
}
/**
* Creates a query that can be used to delete the provided entity.
* <p>
* This method is a shortcut that extract the PRIMARY KEY from the
* provided entity and call {@link #deleteQuery(Object...)} with it.
* <p>
* This method is useful if you want to setup a number of options (tracing,
* conistency level, ...) of the returned statement before executing it manually
* or need access to the {@code ResultSet} object after execution (to get the
* trace, the execution info, ...), but in other cases, calling {@link #delete}
* or {@link #deleteAsync} is shorter.
*
* @param entity the entity to delete.
* @return a query that delete {@code entity} (based on it's defined mapping).
*/
public Statement deleteQuery(T entity) {
Object[] pks = new Object[mapper.primaryKeySize()];
for (int i = 0; i < pks.length; i++)
pks[i] = mapper.getPrimaryKeyColumn(i).getValue(entity);
return deleteQuery(pks);
}
/**
* Creates a query that can be used to delete an entity given its PRIMARY KEY.
* <p>
* The values provided must correspond to the columns composing the PRIMARY
* KEY (in the order of said primary key).
* <p>
* This method is useful if you want to setup a number of options (tracing,
* conistency level, ...) of the returned statement before executing it manually
* or need access to the {@code ResultSet} object after execution (to get the
* trace, the execution info, ...), but in other cases, calling {@link #delete}
* or {@link #deleteAsync} is shorter.
*
* @param primaryKey the primary key of the entity to delete, or more precisely
* the values for the columns of said primary key in the order of the primary key.
* @return a query that delete the entity of PRIMARY KEY {@code primaryKey}.
*
* @throws IllegalArgumentException if the number of value provided differ from
* the number of columns composing the PRIMARY KEY of the mapped class, or if
* at least one of those values is {@code null}.
*/
public Statement deleteQuery(Object...primaryKey) {
if (primaryKey.length != mapper.primaryKeySize())
throw new IllegalArgumentException(String.format("Invalid number of PRIMARY KEY columns provided, %d expected but got %d", mapper.primaryKeySize(), primaryKey.length));
PreparedStatement ps = getPreparedQuery(QueryType.DEL);
BoundStatement bs = ps.bind();
for (int i = 0; i < primaryKey.length; i++) {
ColumnMapper<T> column = mapper.getPrimaryKeyColumn(i);
Object value = primaryKey[i];
if (value == null)
throw new IllegalArgumentException(String.format("Invalid null value for PRIMARY KEY column %s (argument %d)", column.getColumnName(), i));
bs.setBytesUnsafe(i, column.getDataType().serialize(value, protocolVersion));
}
if (mapper.writeConsistency != null)
bs.setConsistencyLevel(mapper.writeConsistency);
return bs;
}
/**
* Deletes an entity mapped by this mapper.
* <p>
* This method is basically equivalent to: {@code getManager().getSession().execute(deleteQuery(entity))}.
*
* @param entity the entity to delete.
*/
public void delete(T entity) {
session().execute(deleteQuery(entity));
}
/**
* Deletes an entity mapped by this mapper asynchronously.
* <p>
* This method is basically equivalent to: {@code getManager().getSession().executeAsync(deleteQuery(entity))}.
*
* @param entity the entity to delete.
* @return a future on the completion of the deletion.
*/
public ListenableFuture<Void> deleteAsync(T entity) {
return Futures.transform(session().executeAsync(deleteQuery(entity)), NOOP);
}
/**
* Deletes an entity based on its primary key.
* <p>
* This method is basically equivalent to: {@code getManager().getSession().execute(deleteQuery(primaryKey))}.
*
* @param primaryKey the primary key of the entity to delete, or more precisely
* the values for the columns of said primary key in the order of the primary key.
*
* @throws IllegalArgumentException if the number of value provided differ from
* the number of columns composing the PRIMARY KEY of the mapped class, or if
* at least one of those values is {@code null}.
*/
public void delete(Object... primaryKey) {
session().execute(deleteQuery(primaryKey));
}
/**
* Deletes an entity based on its primary key asynchronously.
* <p>
* This method is basically equivalent to: {@code getManager().getSession().executeAsync(deleteQuery(primaryKey))}.
*
* @param primaryKey the primary key of the entity to delete, or more precisely
* the values for the columns of said primary key in the order of the primary key.
* @return a future on the completion of the deletion.
*
* @throws IllegalArgumentException if the number of value provided differ from
* the number of columns composing the PRIMARY KEY of the mapped class, or if
* at least one of those values is {@code null}.
*/
public ListenableFuture<Void> deleteAsync(Object... primaryKey) {
return Futures.transform(session().executeAsync(deleteQuery(primaryKey)), NOOP);
}
/**
* Map the rows from a {@code ResultSet} into the class this is mapper of.
*
* @param resultSet the {@code ResultSet} to map.
* @return the mapped result set. Note that the returned mapped result set
* will encapsulate {@code resultSet} and so consuming results from this
* returned mapped result set will consume results from {@code resultSet}
* and vice-versa.
*/
public Result<T> map(ResultSet resultSet) {
return new Result<T>(resultSet, mapper, protocolVersion);
}
/**
* Creates a query to fetch entity given its PRIMARY KEY.
* <p>
* The values provided must correspond to the columns composing the PRIMARY
* KEY (in the order of said primary key).
* <p>
* This method is useful if you want to setup a number of options (tracing,
* conistency level, ...) of the returned statement before executing it manually,
* but in other cases, calling {@link #get} or {@link #getAsync} is shorter.
*
* @param primaryKey the primary key of the entity to fetch, or more precisely
* the values for the columns of said primary key in the order of the primary key.
* @return a query that fetch the entity of PRIMARY KEY {@code primaryKey}.
*
* @throws IllegalArgumentException if the number of value provided differ from
* the number of columns composing the PRIMARY KEY of the mapped class, or if
* at least one of those values is {@code null}.
*/
public Statement getQuery(Object... primaryKey) {
if (primaryKey.length != mapper.primaryKeySize())
throw new IllegalArgumentException(String.format("Invalid number of PRIMARY KEY columns provided, %d expected but got %d", mapper.primaryKeySize(), primaryKey.length));
PreparedStatement ps = getPreparedQuery(QueryType.GET);
BoundStatement bs = ps.bind();
for (int i = 0; i < primaryKey.length; i++) {
ColumnMapper<T> column = mapper.getPrimaryKeyColumn(i);
Object value = primaryKey[i];
if (value == null)
throw new IllegalArgumentException(String.format("Invalid null value for PRIMARY KEY column %s (argument %d)", column.getColumnName(), i));
bs.setBytesUnsafe(i, column.getDataType().serialize(value, protocolVersion));
}
if (mapper.readConsistency != null)
bs.setConsistencyLevel(mapper.readConsistency);
return bs;
}
/**
* Fetch an entity based on its primary key.
* <p>
* This method is basically equivalent to: {@code map(getManager().getSession().execute(getQuery(primaryKey))).one()}.
*
* @param primaryKey the primary key of the entity to fetch, or more precisely
* the values for the columns of said primary key in the order of the primary key.
* @return the entity fetched or {@code null} if it doesn't exist.
*
* @throws IllegalArgumentException if the number of value provided differ from
* the number of columns composing the PRIMARY KEY of the mapped class, or if
* at least one of those values is {@code null}.
*/
public T get(Object... primaryKey) {
return map(session().execute(getQuery(primaryKey))).one();
}
/**
* Fetch an entity based on its primary key asynchronously.
* <p>
* This method is basically equivalent to mapping the result of: {@code getManager().getSession().executeAsync(getQuery(primaryKey))}.
*
* @param primaryKey the primary key of the entity to fetch, or more precisely
* the values for the columns of said primary key in the order of the primary key.
* @return a future on the fetched entity. The return future will yield
* {@code null} if said entity doesn't exist.
*
* @throws IllegalArgumentException if the number of value provided differ from
* the number of columns composing the PRIMARY KEY of the mapped class, or if
* at least one of those values is {@code null}.
*/
public ListenableFuture<T> getAsync(Object... primaryKey) {
return Futures.transform(session().executeAsync(getQuery(primaryKey)), mapOneFunction);
}
}
|
<reponame>kartikeyas00/cowin-automate
import requests
import pandas as pd
from email.message import EmailMessage
import smtplib
import sqlite3
from sqlite3 import Error
from datetime import datetime
def create_connection(url):
"""
Create a database connection to the SQLite database specified by url.
Parameters
----------
url : str
Sqlite3 database file's url.
Returns
-------
conn : sqlite3.Connection or None
Sqlite3 connection object.
"""
conn = None
try:
conn = sqlite3.connect(url)
return conn
except Error as e:
print(e)
return conn
def get_vaccine_availability_data(date, districts, api_url):
"""
Returns a dataframe containing data received from the API. Data cleaning has
been done to prepare the data for our use case.
Parameters
----------
date : str
The date parameter for the cowin API.
districts : dict
Dictionary containing distict data for the district parameter for the
cowin API .
api_url : str
URL for the cowin API.
Returns
-------
df_final : pandas.core.frame.DataFrame
Pandas Dataframe containing data received from the API.
"""
df = pd.DataFrame() # Empty DataFrame
for district_number in districts.values():
api_parameters = {"district_id": district_number, "date": date}
requested_data = requests.get(
api_url, params=api_parameters, headers={"Cache-Control": "no-cache"}
).json()
df_temp = pd.DataFrame.from_dict(requested_data["centers"])
df = df.append(df_temp, ignore_index=True) # append to the df
df_final = pd.DataFrame()
for i in range(len(df)):
df_test_temp = df.iloc[[i]]
center_id = df_test_temp["center_id"].iloc[0]
df_session_temp = pd.DataFrame(df_test_temp["sessions"].explode().tolist())
df_session_temp["center_id"] = center_id
df_test_temp = df_test_temp.merge(df_session_temp, how="inner", on="center_id")
df_final = df_final.append(df_test_temp, ignore_index=True)
df_final = df_final.drop(columns=["sessions", "vaccine_fees", "slots"])
return df_final
def is_vaccine_available_18(df):
"""
Return True if vaccine is available for 18 plus else return False.
Parameters
----------
df : pandas.core.frame.DataFrame
Pandas Dataframe containing data received from the API.
Returns
-------
bool
True if vaccine is available for 18 plus else False.
"""
if df.empty:
return False
df_available = df[(df["available_capacity"] > 0) & (df["min_age_limit"] == 18)]
return not df_available.empty
def is_vaccine_available_45(df):
"""
Return True if vaccine is available for 45 plus else return False
Parameters
----------
df : pandas.core.frame.DataFrame
Pandas Dataframe containing data received from the API.
Returns
-------
bool
True if vaccine is available for 45 plus else False.
"""
if df.empty:
return False
df_available = df[(df["available_capacity"] > 0) & (df["min_age_limit"] == 45)]
return not df_available.empty
def get_email_content_18(df):
"""
Return html string for 18 plus vaccine availability.
Parameters
----------
df : pandas.core.frame.DataFrame
Pandas Dataframe containing data received from the API.
Returns
-------
html_string_18 : str
Html string for 18 plus vaccine availability.
"""
html_string_18 = ""
if df.empty:
return html_string_18
df_available_18 = df[(df["available_capacity"] > 0) & (df["min_age_limit"] == 18)]
if not df_available_18.empty:
html_string_18 += """<p align="center"><b><i> 18 - 45 age group</i></b></p>
<br>
<br>"""
for index, row in df_available_18.iterrows():
html_string_18 += f"""
<p>{row['available_capacity']} {row['vaccine']} are available at {row['name']}
in {row['district_name']} for {row['min_age_limit']} and above years old.
</p>
<br>
<br>"""
return html_string_18
def get_email_content_45(df):
"""
Return html string for 45 plus vaccine availability.
Parameters
----------
df : pandas.core.frame.DataFrame
Pandas Dataframe containing data received from the API.
Returns
-------
html_string_45 : str
Html string for 45 plus vaccine availability.
"""
html_string_45 = ""
if df.empty:
return html_string_45
df_available_45 = df[(df["available_capacity"] > 0) & (df["min_age_limit"] == 45)]
if not df_available_45.empty:
html_string_45 += """<p align="center"><b><i> 45+ age group</i></b></p>
<br>
<br>"""
for index, row in df_available_45.iterrows():
html_string_45 += f"""
<p>{row['available_capacity']} {row['vaccine']} are available at {row['name']}
in {row['district_name']} for {row['min_age_limit']} and above years old.
</p>
<br>
<br>"""
return html_string_45
def send_email(from_email, to_email, subject, content, credentials, smtp_host):
"""
Send email containing given content to the given addressee from the given
address
Parameters
----------
smtp_host: str
The mail service host address.
from_email : str
The address from which the email will be sent.
to_email : list
The addressee(s).
subject : str
The subject of the email.
content : str
The body of the email.
credentials : dict
Dictionary containing credential information for the email.
Returns
-------
Email sent or Failed
"""
msg = EmailMessage()
msg["From"] = from_email
msg["To"] = to_email
msg["Subject"] = subject
msg.set_content(content, subtype="html")
user_name = credentials["user_name"]
password = credentials["password"]
with smtplib.SMTP(smtp_host, port=587) as smtp_server:
smtp_server.ehlo()
smtp_server.starttls()
smtp_server.login(user_name, password)
response = smtp_server.send_message(msg)
return "Notification sent" if str(type(response)) == "<class 'dict'>" else "Failed to Send mail"
def get_data_for_daily_statistics_table(df):
"""
Return data which is ready to be inserted to the daily_statistics table in
the database.
Parameters
----------
df : pandas.core.frame.DataFrame
Pandas Dataframe containing data received from the API.
Returns
-------
df_daily_statistics_data : pandas.core.frame.DataFrame
Pandas Dataframe containing data to be inserted to the daily_statistics
table in the database.
"""
df_daily_statistics_data = (
df.groupby(["district_name", "min_age_limit", "vaccine"])["available_capacity"]
.sum()
.reset_index()
)
df_daily_statistics_data["vaccine"] = df_daily_statistics_data[
"vaccine"
].str.upper()
df_daily_statistics_data["timestamp"] = datetime.utcnow().strftime(
"%Y-%m-%d %H:%M:%S"
)
return df_daily_statistics_data
def insert_data_to_daily_statistics_table(df, conn):
"""
Insert data to the daily_statistics table in the database.
Parameters
----------
df : pandas.core.frame.DataFrame
Pandas Dataframe containing data to be inserted to the daily_statistics
table in the database.
conn : sqlite3.Connection
Sqlite3 connection object.
Returns
-------
None.
"""
c = conn.cursor()
for index, row in df.iterrows():
try:
insert_sql_statement = f"""
INSERT INTO daily_statistics
(
district_name,
min_age_limit,
vaccine,
available_capacity,
timestamp
)
VALUES(
'{row["district_name"]}',
{row["min_age_limit"]},
'{row["vaccine"]}',
'{row["available_capacity"]}',
'{row["timestamp"]}'
)
"""
c.execute(insert_sql_statement)
conn.commit()
except Error as e:
conn.rollback()
print(e)
|
Heather McCartney
Biography
McCartney was born in Tucson, Arizona to Linda Eastman (later McCartney) and Joseph Melville See Jr. (April 19, 1938– March 19, 2000), an American geologist. Her parents separated after eighteen months of marriage, with her mother marrying Paul McCartney in 1969 when Heather was six years old. During this time Heather was formally adopted by McCartney, also making an appearance in the Beatles film Let It Be. A half-sister, Mary, was born in 1969, followed by another half-sister, Stella, in 1971 and a half-brother, James, in 1977. Although Heather has said that her biological father had a lifelong influence on her, she considers Paul McCartney to be her father. Her mother was Jewish.
McCartney began showing an interest in art, taking up printing at the Photographers' Workshop in Covent Garden and winning the Young Black and White Printer of the Year Award for a photo she called "Waterfall". She later went on to art college, where she focused on pottery and design. McCartney traveled to Mexico, where she lived among natives of the Huichol and Tarahumara tribes. McCartney later moved to Arizona to live with her biological father and eventually returned to England to work as a potter.
In line with the beliefs of her parents and half-siblings, she is a vegetarian and is passionate about animal rights.
In 1999 McCartney launched a line of houseware products called the Heather McCartney Houseware Collection. |
How Long Is the Course for a Pharmacy Tech?
3 How Long Does it Take to Earn a Physical Therapist Degree?
Pharmacy technicians work alongside pharmacists in drugstore and hospital pharmacies. Technician duties include filling prescriptions under the supervision of a pharmacist, handling insurance paperwork and providing customer service to patrons. While many pharmacy technicians receive on-the-job training, some choose to complete a formal educational program in pharmacy technology. Program lengths vary, but many take less than a year to complete. Some pharmacy technicians enroll in two-year associate degree programs that may provide additional career opportunities as well as a foundation for completing a bachelor's degree and eventual admission to pharmacy school.
Community colleges, vocational schools and even hospitals and stand-alone pharmacies offer pharmacy technician training classes in pharmacy operations, mathematics, legal issues and pharmacology. In addition, many programs also incorporate an internship or externship that provides students with experience working in a pharmacy. Program length varies by school or training provider: Some schools offer a two-year associate degree in pharmacy technology while others offer certificate programs that can take only a few months to complete. The American Society of Health-System Pharmacists accredits only those pharmacy technician training programs that include a minimum of 600 hours of training and must take 15 weeks or longer to complete.
About 80 percent of the states regulate pharmacy technicians by requiring them to become licensed, register with the state or hold professional certification. According to the Pharmacy Technicians Certification Board, as of April 2013, eight states require licensure, while 29 require registration. Licensure and registration requirements vary but may include either the completion of a training program or earning certification from a recognized pharmacy technician professional association. States may also require pharmacy technicians to take and pass a comprehensive examination before working behind the counter at a pharmacy.
National certification of pharmacy technicians is conducted through organizations such as the National Healthcareer Association and the Pharmacy Technician Certification Board. Some states require pharmacy technicians to hold professional certification, while others allow pharmacy technicians to substitute professional certification for formal training.
State governments, along with professional certification boards, often require pharmacy technicians to complete continuing education units as a condition of license or registration renewal. Your state licensing or registration board can provide you with information on continuing education requirements. Both the National Healthcareer Association and the Pharmacy Technician Training Board require 20 hours of continuing education every two years as a condition of certification renewal.
What College Courses Should I Take for a Nursing Assistant?
What Degrees Can You Earn at a Trade School?
I Am a High-School Graduate: How Do I Pursue Becoming an MRI Technician?
How Long Do You Have to Go to School to Be a Physical Therapist's Assistant?
How Long to Earn an Ultrasound Degree? |
Technological evolution of cyclodextrins in the pharmaceutical field We herein disclose how global cyclodextrin-based pharmaceutical technologies have evolved since the early 80s through a 1998 patents dataset retrieved from Derwent Innovation Index. We used text-mining techniques based on the patents semantic content to extract the knowledge contained therein, to analyze technologies related to the principal attributes of CDs: solubility, stability, and taste-masking enhancement. The majority of CDs pharmaceutical technologies are directed toward parenteral aqueous solutions. The development of oral and ocular formulations is rapidly growing, while technologies for nasal and pulmonary routes are emerging and seem to be promising. Formulations for topical, transdermal, vaginal, and rectal routes do not account for a high number of patents, but they may be hiding a great potential, representing opportunity research areas. Certainly, the progress in materials sciences, supramolecular chemistry, and nanotechnology, will influence the trend of that, apparently neglected, research. The bottom line, CDs pharmaceutical technologies are still increasing, and this trend is expected to continue in the coming years. Patent monitoring allows the identification of relevant technologies and trends to prioritize research, development, and investment in both, academia and industry. We expect the scope of this approach to be applied in the pharmaceutical field beyond CDs technological applications. Introduction Cyclodextrins, cyclic oligomers linked by -1,4 glycosidic bonds, are well known for their truncated cone structure, comprising a hydrophilic surface and a cavity bearing a hydrophobic microenvironment. CDs have been considered "all-purpose molecular containers" because their cavity can selectively accommodate a diversity of molecules through supramolecular host-guest interactions, giving rise to an IC (Fig. 1). CDs are chemically versatile and can be modified to get mono-or polysubstituted derivatives, which can improve their properties (i.e., solubility and stability), and tune their complexation abilities. The complexation process leads to significant changes in guest spectral properties, reactivity, volatility, solubility, and stability; thus, giving the CDs a great potential to be applied in a diversity of technological fields. CDs have been of particular importance on drug delivery and pharmaceutical technologies. Undoubtedly, the most acknowledged application is the enhancement of aqueous solubility of poorly soluble drugs through the formation of CD/drug ICs. However, the complexation can also protect drugs from heat, light, hydrolysis, and oxidation, thereby improving the formulations stability. In other cases, it allows the manipulation of volatile compounds, reduces unpleasant tastes and odors, and decreases the effect of irritating compounds. Moreover, CDs can modify the release rate of drugs working as excipients for immediate or sustained release. Native CDs and several CD-derivatives are FDAapproved for pharmaceutical use and their success is evident with more than 50 medications containing CDs currently marketed. The growth in the number of approved formulations over time suggests CDs are still a useful tool in the pharmaceutical field and that their applications could be expanding into a promising future. Comprehensive reviews describing the CDs' abilities to enhance the solubility and stability of drugs, and the mechanisms of CD/drug complexation, have previously been reported . As we mentioned before, the first patent of our dataset dates from 1983 and belongs to Teijin Limited. This document deals with the role of CDs as adjuvants for the stabilization of vitamin D 3 and its preparation process. Fig. 2. Workflow showing a) general search to retrieve CD patents from DII; b) specific search to obtain CD patents disclosing solubility, stability, and taste-masking applications; c) text mining strategy to arrange the temporal evolution of patents according to the CD applications according to administration routes; d) procedure to analyze the CD patent dataset based on geographical region and assignee. a A61K: Preparations for medical, dental, or toilet purposes b A61P: Specific therapeutic activity of chemical compounds or medical preparations c A61K-047/40: Cyclodextrins and derivatives thereof (medicinal preparations characterized by the non-active ingredients) d A61K-031/724: Cyclodextrins (medicinal preparations containing organic active ingredients). It is worth defining a couple of concepts related to patents. On the one hand, the term "priority art" refers to all the knowledge needed to develop the invention. In this case, all the priority art is linked to other patents or scientific papers that have demonstrated how CD complexation modifies the physicochemical properties of a given host, in particular, solubility and stability. It also refers to the relevance of the biological and physicochemical characterization of CD derivatives and their complexes. Hence, this patent (No. US 4729895, Teijin Limited) and its corresponding priority art are directly related to the primary application of CDs, which is the solubility enhancement of a guest molecule as a result of the formation of an IC. On the other hand, patent citations are the count of citations of the document in subsequent patents and, therefore, indicative of its impact on the development of new technology. In our search, the most cited patent, accounting for 413 citations, was registered by Kansas University in 1994. This document reports the successful functionalization of,, and CDs with sulfoalkyl substituents, for purposes of improving the physicochemical features of native CDs, their complexation capacity, and the decrease of their toxicity profile. This patent also mentions how CD derivatives can increase the solubility of drugs and, as a consequence, implement different administration routes such as oral, intranasal, parenteral, and rectal for ICs. As we already mentioned, it was after the year 2000 when a breakthrough in CD patenting activity was observed. In fact, the most cited patent in the period between 2000 and 2019 corresponds to an innovation registered by CyDex Inc. in 2000, with a total of 193 citations. This patent describes sulfoalkyl ether CD-based controlled release solid pharmaceutical formulations, in which the CD derivatives were used, in combination with other components, to modify the bioavailability and/ or rate of bioabsorption of therapeutic agents. We consider this document to be a relevant innovation for several reasons. First, it introduces the terms "controlled release", "sustained release", "delayed release", and "targeted release", which are very popular in current CDs scientific papers but were revolutionary at that time, when CDs were pigeonholed as solubilizers/stabilizers. Second, it was based on modified rather than native CDs (sulfoalkyl ether moiety). Although the solubility enhancement could be implicit, the main goal of this invention was to control the delivery of drugs through CD-based solid platforms. All the priority art of this patent was published after 1989 and suggests a different driving force in the research and development activity related to CDs: the design of novel materials to control drug release and optimize the drugs' bioavailability by chemically modified CD derivatives, and the combination of different types of molecules and/or building blocks. Thus, the milestone of CD patenting behavior is parallel to the emergence of intensive research activity in the drug delivery field, which in the last two decades has focused mainly on the design of versatile structures for carrying drugs and releasing them in a controlled manner. To date, this breakthrough has also integrated other disciplines into the field of CDs, such as supramolecular chemistry, materials sciences, and nanotechnology. Despite the novelty projected for CDs in advanced drug delivery, their use in modifying the aqueous solubility of poorly soluble drugs has been-and continues to be-of great importance in the pharmaceutical industry. For instance, the use of CDs to solubilize an antineoplastic compound assigned to Pfizer Inc. Fig. 4 summarizes the representative patents over time described above. In 2004, Szejtli published a comprehensive review of CDs status in both, industrial and academic research, in which he forecasted that the use of CDs would expand in the coming years, due to more efficient forms of production. Also, he concluded that CDs in the pharmaceutical field would show slow but steady development. Sixteen years later, we can say that the first two hypotheses were right. Contrary to what was thought, the interest in CDs has not grown slowly but, rather, has been rapidly increasing and is still engaging pharmaceutical and drug delivery research, with a boom stage starting in the early 2000s and continuing to the present day. Solubility, stability, and taste-masking CDs are primarily used for enhancing the aqueous solubility of poorly soluble drugs; improving the stability and masking unpleasant taste/ odor of a formulation. Accordingly, we analyzed the CDs dataset to retrieve those patents devoted to each one of the mentioned attributes to then inspect their behavior over time. Solubility Aqueous solubility determines many aspects of drug discovery and development processes, including formulation, administration routes, and pharmacokinetics. A poorly soluble drug cannot be formulated into a solution for parenteral or other administration routes (nasal, ocular, and otic). In addition, its bioavailability is limited if it is CDs modify the apparent solubility of drugs through the formation of CD/drug ICs. In some cases, the solubility increase arises from noninclusion aggregates, in which the CDs display the ability to form and stabilize supersaturated drug solutions. Indeed, the supremacy of CDs is given by this attribute and the evolution of patents associated with solubility enhancement have substantially influenced the global trend of CDs pharmaceutical patents over time (Fig. 5). Between the mid-80s and just before 1998, patenting activity was steady, reaching a peak in 1994, with 24 patents registered. After 2001, an inflection point marks the beginning of a very active period of patenting that is still observed to date; and 2013 outstands with 58 filed patents, the highest record in terms of solubility. The first patent of our dataset is from 1983 and is related to watersoluble CD polymers substituted by ionic groups that, in addition to their abilities to complex with diverse guest molecules, they form salts thus broadening their applications. The most cited patent regarding solubility enhancement corresponds to Stella & Rajewski, 1994, the most cited patent of the whole dataset (described in Section 3.1), just followed by a patent whose invention corresponds to Pitha, 1985, with 385 citations. The latter is another landmark in CDs history as it describes the preparation of alkylated CDs including HPCD, one of the most important CD-derivatives, since its use has been approved for any administration route. The patent is also associated with the preparation of drug/alkylated-CD mixtures, emphasizing their amorphous state and high aqueous solubility. Although Fig. 3 points out that 2018 was the year with the highest number of patents, Fig. 5 shows that, in this year, little activity was detected regarding solubility. After a while, the solubility enhancement of a poorly soluble drug was not a novelty anymore. Therefore, new effects, of course, associated or based on improved solubility, have had to be found. This may explain why the number of related patents started to decline. Nonetheless, this does not mean that a simple IC to modify the solubility of a drug is not important. On the contrary, high-throughput screening strategies continuously propose new candidates, of which a large majority have low solubility. Therefore, CDs remain a valuable strategy for overcoming the challenges associated with these compounds. The same would happen with the repositioning of drugs, in which a change in their solubility, could be a trigger for their use in the treatment of a disease different from that for which they were originally created. Similarly, CDs may enable formulations for the most convenient routes of administration, or reformulations for a relaunch of the product. For example, remdesivir, the drug that could be used to treat the SARS-CoV-2 infection, is poorly soluble in water-a limitation that has been overcome by the formation of an IC with SBECD for IV administration. Other examples of patents using CDs for parenteral formulations are discussed in Section 3.3.1. Stability The effect of CDs on the chemical and physical stability of drugs has been well documented. In the solid state some CDs like MCD can retard or suppress the degradation of some drugs. In addition, CDs can prevent thermal-sensitive drugs from degrading into oily products or, in turn, protect oily and volatile therapeutic molecules. Moreover, CDs can be used as a stabilizer agent for the whole formulation. From 2002, an increase in the interest in the use of CDs as a stabilizing agent is seen, and 2010 is the year in which the number of patents of CDs as stabilizers, slightly exceeds the number of those for solubility. Today, the number of patents related to solubility and stability is comparable (Fig. 5). To note, a single patent can claim the use of both solubility and stability. This may be the case for patents that protect a vast number of drugs or formulations. Although less frequent, this could also happen for patents concerning a drug in which its hydrophobic part is also the sensitive part. The first patent in this regard is related to the stabilization, conferred by CDs, of a solid formulation of vitamin D 3. An invention granted in 1985 by Janssen Pharmaceutica N.V. is on the preparation of HPCD. The patent also describes the use of this derivative in pharmaceutical compositions to overcome the instability or low solubility of a variety of drugs, namely, non-steroid anti-rheumatic agents, steroids, benzodiazepines, imidazoles, and others. HPCD is the most important CD-based solubilizer used on any type of administration, including the parenteral route, so far, which explains why this patent accounts for a significant number of citations (79 citations) and is the most cited patent concerning stability from our dataset. Strikingly, it was few months later when the similar patent comprising pharmaceutical formulations using the alkylated CD-derivatives, discussed in Section 3.2.1, was filed. The use of CDs as stabilizers to make a product last longer or to optimize the conditions of manufacturing, packing, and storage are examples of other CD applications. When designing a formulation employing CDs as stabilizers, it must be considered that the formation of an IC can make some drugs more stable but some others more labile. Furthermore, it has been observed that drugs that are stabilized in aqueous solution by a CD can be destabilized by the same CD in a solid dosage form. Therefore, the use of CDs to improve the stability of a given drug or formulation must be thoroughly studied and the formulation carefully designed. Taste-masking Many active pharmaceutical ingredients have undesirable taste and/ or odor, which can lead to low patient compliance, thereby compromising the treatment efficiency, especially for geriatric and pediatric populations. As the oral administration is the most accepted route, masking the unpleasant taste of drugs to an acceptable degree of palatability is important during formulation. Masking techniques can be integrated into three levels: 1) formulation level, through sweeteners and flavors; 2) particle level, by creating a physical barrier between the bitter component and the taste receptors; and 3) molecular level, through the complexation of the drug with CDs or ion-exchange resins. In general, the global number of patents concerning taste-masking is considerably lower in comparison to solubility and stability applications and has slowly gained interest over time (Fig. 5). The most cited patent (151 citations) describes a formulation of a nicotine lozenge for smoking cessation, to release nicotine in the buccal mucosa for reaching a maximum systemic level faster than the nicotine transdermal patch. Besides other components, the formulation involves an IC between CD and an essential oil flavoring. Another patent with a significant number of citations is a technological innovation reporting CD/ibuprofen complexes with an enhanced taste profile and bioavailability in comparison to sodium ibuprofen. A patent published in 2002, entitled "Oral pharmaceutical compositions containing cyclodextrins as taste masking agent", claims that CDs can mask the unpleasant taste of drugs without the preparation of an IC between the CD and the drug, which was thought to be essential. Besides the scientific contribution to the field, this was considered a breakthrough in terms of manufacturing processes, regarding simplicity and costs. Later, in 2005 it was argued that the preparation of the CD/drug IC might not be necessary if the drug dose is small and the CD is in excess. If so, the CD will dissolve quickly in the saliva, giving rise to a saturated CD solution in which the bitter component instantly forms a complex with the CD. CDs have also been useful as a taste-masking agent for chewable, fast-disintegrating, buccal, and sublingual tablets. Taste-masking is also needed in nasal and pulmonary administration routes. Several potential drugs for inhalation therapy have an unpleasant taste, which, again, may result in incomplete therapy due to low patient compliance. For these formulations, bitter molecule encapsulation is the best option because other types of methodologies, such as coating, are not feasible. Some examples in this regard are reviewed in Section 3.3.4. The use of CDs for taste-masking is still emerging and represents a great area of opportunity. Besides the improvement of organoleptic properties, CDs can simultaneously modify solubility and impart stability to the formulation, making them exceptional multifunctional excipients. Administration routes According to the previous sections, it is clear that CDs can be present in a variety of dosage forms intended for practically all administration routes. Hence, we were interested on knowing how the presence of CDs has evolved in formulations for different administration routes over time (Fig. 6). Parenteral administration Certainly, the major strength of CDs is their ability to increase the apparent solubility of a drug through the formation of an IC. Therefore, their most attractive and robust application has been for preparing aqueous for parenteral administrations, clearly confirmed by the 208 patents observed in Fig. 6. ; the development of CD-based formulations for lansoprazole ; and sartans drug family. This trend is expected to continue growing in the future as the formulation of new drugs, reformulations, and drug repurposing still strongly consider CDs technologies as an excellent tool for formulating low-water-soluble drugs,. This scenario is possible only if research and development of pharmaceutical technology, as well as studies on pharmacokinetic and toxicological profiles of the ICs, are continuously conducted, as they have been so far. Because not all CDs or CD derivatives can be parenterally administered, the design and synthesis of new CD derivatives are highly desirable. Besides being biocompatible, a derivative must be water-soluble and present good complexation abilities. Furthermore, its production process should be robust and scalable to produce volumes that can fulfill the pharmaceutical industry's demands. Oral administration Oral route is the most accepted way to administer medications and, in turn, solid dosage forms, are the most common formulations intended for this aim. Despite its popularity, the oral route is challenging or sometimes not possible for several drugs, due to their low solubility/ permeability, instability, degradation in the GI tract, extensive metabolism, and unpleasant organoleptic properties. CDs have demonstrated great potential to overcome these limitations directly or indirectly and even to modify a drug release profile. Hydrophilic CDs can improve the oral absorption of the BCS Class II drugs (low solubility, high permeability), as CDs augment their solubility without altering their permeability to biological membranes. In the case of BCS Class IV drugs (low solubility, low permeability), CDs may increase their solubility and improve their availability at the mucosal surface to enhance their absorption. Lipophilic CDs such as MCD are ideal to increase permeability through membranes, although their use for oral delivery is hampered by their toxicity. CDs can also be beneficial for BCS Class I drugs (high permeability, high solubility), not for modifying their bioavailability but for reducing gastrointestinal irritation, as in the case of some NSAIDs. The CDs effect on Class III drugs is negligible. Although oral administration addresses liquid and solid forms, the success of CDs is reflected majorly in the latter, with several oral tablets already marketed: CD/cefotiam-hexetil HCl (Pansporin T ™), CD/ omeprazole (Omebeta ™), CD/piroxicam (Brexin ™), and CD/tiaprofenic acid (Surgamyl ™). Although less common than tablets, capsules are also present included in the list of marketed CDs oral formulations, like Ulgut ™, a product based on a CD/benexate IC. In this context, our analysis will be focused on solid forms including conventional, sublingual and buccal tablets, and from here, with oral administration, we will only refer to these pharmaceutical forms. Patents for CD-based oral formulations account for 173 files which makes them second in importance after parenteral formulations. The increasing number of patents (Fig. 6), especially from the early 2000, clearly indicates that they have been, and still are, a very attractive resource for oral pharmaceutical technologies. An example of a patent published in 1999 discloses the use of CDs in oral tablets for preventing sodium pravastatin degradation and isomerization by humidity or temperature before oral administration. An example of a patent issued in 2019 discloses a CD-based oral tablet to enhance the bioavailability of meloxicam. For a successful development of CD-based conventional tablets, some aspects must be carefully considered, for instance, whether CDs are used as ICs or as a physical mixture; their interaction with other components of the formulation; and the type of drug, its dose, and the size of the dosage form. In addition, the technologies to process them play a fundamental role in determining an outstanding performance. Also, care must be taken in the amount of CD used in the formulation, as an excess of CDs could hinder the absorption of the drug through the GI tract. Nonetheless, the fascinating recent research devoted to oral CD-based pharmaceutical technologies will certainly maintain the increasing trend observed herein. Buccal formulations aim to deliver drugs through the buccal mucosa, which possesses a large surface area for absorption, to achieve a local or systemic effect. On the other hand, sublingual tablets, in which the drug is placed beneath the tongue, seek a more rapid systemic absorption, in comparison to the conventional oral route, and avoids intestinal and hepatic first-pass metabolism of the drug. In this regard, CDs can enhance drug dissolution in the saliva, improve the organoleptic properties, or work at the absorption and bioavailability levels. In fact, there are some buccal and sublingual medications containing CDs on the market: CD/PGE2 (Prostarmon E ™), CD/nitroglycerin (Nitropen™), and CD/nicotine (Nicorette™), also formulated as a chewing gum (Nicogum™). Compelling research shows the promising potential of CDs for developing sublingual dosage forms. Therefore, an important role of innovations in this matter is expected. The following are two examples of patents for sublingual formulations: 1) the use of CDs to provide faster dissolution times for reaching high levels of apomorphine in plasma to treat female sexual dysfunction ; 2) the use of CDs for the transformation of therapeutic oils into water-soluble dry powders for sublingual administration. The combination with polymers has enabled the development of mucoadhesive buccal films, which appear to be emerging as a trending research area. In this sense, very innovative approaches are being investigated and patented, like a CD-based hydrogel, which disintegrates at the human body temperature, to increase the bioavailability of aurantiin, with a good taste and suitable for children or particular patients. CDs are an excellent resource for developing oral formulations and patents in this matter are expected to continue increasing in the next years. The progress in mucoadhesive materials is paving the way to design buccal mucoadhesive devices to provide convenient therapies to pediatric and geriatric patients, thus generating a very attractive research opportunity area. Ocular administration Ophthalmic preparations must allow the drug to permeate the structure of the corneal epithelium without irritating the ocular surface; otherwise, it will be rapidly cleared from the precorneal area a few minutes after administration, with an incomplete absorption. Suspensions, drops, gels, ointments, and solid inserts have been used to deliver drugs to the eye. Aqueous eye drops are the most common because they are the ones with the least adverse effects, especially irritation and blurred vision, which may influence the medication adherence. In eye drop formulations, the drug must be dissolved in a small aqueous volume, but at the same time, must preserve a moderately lipophilic character to penetrate the corneal epithelium and stroma into the aqueous humor. CDs offer numerous advantages that can facilitate the development of convenient ocular formulations [69,. CDs enhance drug solubility, without interfering in its ability to permeate the lipophilic barriers, stabilize the formulation, and decrease irritation to the ocular surface. CDs do not cross the corneal epithelium; however, if they are complexing a lipophilic drug, they can keep it in the aqueous solution and afford a higher availability at the surface of the corneal barrier. There are currently two marketed ophthalmic drops employing CDs: the antibiotic Clorocil ™ containing MCD/chloramphenicol, and the anti-inflammatory Voltaren Ophthalmic ™ comprising the HPCD/diclofenac sodium IC. Based on the number of patents, the use of CDs in ocular medications is one of the most important, just below parenteral and oral solid forms. Moreover, the interest on CDs has increased from the year 2000 and is still appealing, as evidenced by the rapid increase in the number of patents in the last 20 years. An example of a recent patent is the nano-and micro-suspensions containing CD and CD, where CD forms an IC with cyclosporin A, while CD promotes the formation of CD/cyclosporin A ICs aggregates. The formulation is intended to treat inflammatory ocular surface disorders and to enhance tear formation. As we have discussed, the research on bioadhesive materials is driving the development of innovative technologies. Proof of this is a patented composition called nanoglue, comprising CDs, one or more bioadhesive polymers, one or more dendrimers, and (optionally) one or more therapeutic, prophylactic, or diagnostic agents. After external stimuli, like UV irradiation, the nanoglue forms a hydrogel at the target tissue that seals corneal wounds. Recent research on ocular formulations containing CDs include: sustained-delivery eye drops ; in situ gelling systems ; mucoadhesive hydrogels ; CD/drug-loaded contact lenses ; and micro and nanosystems. Also, CD-based formulations have shown potential to treat eye posterior segment diseases, such as diabetic retinopathy and age-related macular degeneration, which are commonly treated by intravitreal drug injections. Certainly, the outcomes of such compelling investigations will be reflected in a higher patenting growth in the following years. Nasal, intranasal, and pulmonary administration The obvious and best way to treat nose and paranasal sinuses ailments is through nasal delivery medications. For a successful nasal delivery, drugs must dissolve in a very small volume of water, as the volume of the aqueous diffusion layer is small. Permeation enhancers and mucoadhesive components are highly desirable for nasal and intranasal formulations as they will promote the delivery of drugs before their clearance. The promising role of CDs in formulations for nasal administration is associated to the modification of the absorption rate of the drugs at the site of delivery due to an increase of drug solubility and changes in nasal mucosa permeability. For example, a patent disclosing a dry powder formulation of a group of indazoles, designed to inhibit the Janus dependent kinase for blocking the interplay of multiple inflammatory cells, in which CDs act as both solubilizers and bioadhesive components for the nasal mucosa. Suitable formulations of corticosteroids are needed for rhinitis, sinusitis, asthma, and nasal polyps, among others. Several innovations have responded to this necessity through the implementation of CDs (especially sulfoalkyl ether derivatives) to enhance drugs solubility and permeability in nasal medications, while improving their organoleptic properties. Intranasal delivery has gained great interest due to their potential to deliver drugs systemically, while avoiding phase I and II metabolism. Hence, is an attractive route for administering peptide drugs and hormones. Moreover, this route has been explored for brain delivery. Recently, a new product called Baqsimi ™, used for the treatment of diabetes mellitus, has been approved by the FDA and EMA. It contains CD as an inactive ingredient that improves the stability, solubility, and bioavailability of glucagon. Another commercial product administrated nasally is RMCD/17-estradiol (Aerodiol ®). Thus, intranasal technologies using CDs have arisen over time. One example is a powder formulation containing glucagon or a glucagon analog for nasal administration, useful in the treatment of hypoglycemia, in which CD performs as a filler and as a mucoadhesive agent to the nasal mucosal surface to promote the absorption of the active agent. Another interesting patent is the intranasal formulation for the parathyroid hormone, in which CD enables an aqueous formulation while preventing drug aggregation. Patents of nasal and intranasal preparations employing CDs have evolved slowly over time. Although they have gained strength in the last 20 years, they can still be considered emerging. However, recent investigations will generate new opportunities for innovations and a slow but steady increase in patents may be coming in the next years. Although the pulmonary route has been mainly proposed for localized treatments, the large lung surface area and abundant blood supply make this route an alternative for systemic drug delivery. The efficacy of this route relies on the adequate aerosolization properties of the dosage form, as well as the drug permeability through the lung, its solubility in small aqueous volumes, and its suitable organoleptic properties. Both solid and dissolved CD/drug ICs have been formulated as dry powders and nebulizers, improving the aerosolization properties of formulations and enhancing the drug dissolution in the lung fluids and recent research continues showing the promising potential of using CDs for pulmonary medications. Based on technological information, we found, for example, a preparation of a group of fluoroquinolones suitable for aerosolization for the treatment of pulmonary bacterial infections. In this formulation, CDs are complexing the therapeutic molecules to improve their solubility and stability. Another example includes the CD complexation with compounds to treat inflammatory and fibrotic disorders at the protein kinase level, in which the CD is used as a solubilizer. Innovations on pulmonary formulations containing CDs have progressed slower than nasal formulations (Fig. 6). Nonetheless, is notable that the interest in this regard has strengthened throughout the last decade, which will probably increase the number of patents in the short term. Other administration routes The number of patents regarding topical, transdermal, vaginal, and rectal routes is significantly lower than the number of patents for the administration routes already discussed and has remained unchanged over time (Fig. 6). The following section briefly discusses the roles of CDs for each of them. Topical and transdermal. Topical delivery refers to medications that minimally penetrate the skin layer, creating a local effect. A meticulous selection of the vehicle is necessary for the CDs to display suitable performance. For example, hydrophilic CDs can increase the in vitro release rate of corticosteroids from water-based ointments but delay the drug release in oily-based vehicles. Moreover, some components of the ointments can displace the drug from the CD/drug IC. These demanding requirements, however, have achieved fruitful results with Glymesason ™, an ointment containing dexamethasone and CD. Our study revealed that only 18 patents disclose dermal formulations employing CDs. Some examples of them include a bio-adhesive film-forming pharmaceutical composition created for application directly to the skin or to a substrate to treat skin disorders, in which CDs perform principally as solubility enhancers. Another invention uses the SBECD/silymarin IC for a composition useful in reducing facial redness in rosacea-prone skin, preventing skin aging, inhibiting oxidative stress in epidermal and dermal cells, and increasing collagen production. CD is used to enhance the solubility and availability of the active compound in the topical formulation. Although the patenting behavior has remained without significant changes over time, the outcomes of recent research could change this trend as investigations range from ointments to wearable biomimetic films for wound healing, including supramolecular gels and nanosystems. The transdermal route requires a formulation capable of penetrating the skin to exert its effect in deeper tissues or in systemic circulation. Transdermal formulations require penetration enhancers to enable the drug to cross the stratum corneum and reach systemic circulation. In this respect, CDs increase drug availability at the barriers surface, differently to penetration enhancers, which induce physicochemical changes within the barrier. However, the combination of both CDs and penetration enhancers results in additive effects. Thus, CDs can support the adequate performance of a transdermal preparation. Only 7 patents were retrieved from the dataset, however, this behavior may change in the future due to the increasing interest in delivering drugs to systemic circulation with all the advantages that the transdermal route offers. Proof of this is the fascinating research for transdermal delivery using CDs: CD-based hydrogels ; CD/drug ICs loaded into microneedles or patches ; and ICs with ionic CDs for iontophoretic transdermal delivery. Vaginal and rectal administration. In vaginal formulations, drug absorption, distribution, and residence time may vary. The most common vaginal formulations are semisolid and fast-dissolving solid dosage forms, notwithstanding, bioadhesive systems have become highly desirable for local or systemic vaginal effects. Those drugs administered by this route include hormones, antibiotics, and antimycotics. However, other diseases, like those related to human papillomavirus, herpes simplex virus, and HIV, along with the unfortunate increase in the prevalence of cervix carcinoma, have recently driven the interest to develop vaginal formulations. Several compelling investigations have shown that CDs are useful, as solubilizers, in the development of these type of formulations such as mucoadhesive gels, creams, and films for antifungal and antiviral activities ; gels for contraception purpose ; vaginal discs for the controlled delivery of antiretroviral drugs ; and mucoadhesive nanosystems for cervical cancer treatment. Also studied are the mucoadhesive properties of CD derivatives in which CDs perse comprise the delivery systems. From the 15 patents retrieved from the dataset we selected a recent invention for systemic effect: a vaginal formulation, in which CDs are used for solubility and stability enhancement containing MAGL inhibitors to treat systemic MAGL-mediated disorders such as pain, inflammatory disorders, traumatic brain injury, depression, anxiety, and Alzheimer's disease, among others. The patenting pattern has remained without change, nevertheless, it is highly desirable to change the trend for the coming years. Likely the advances in the development of functional biomaterials will make an outstanding contribution to these technologies. Rectal administration is an advantageous alternative to the oral route for children and for patients with difficulty for swallowing or those with intense nausea and vomiting. The constraints associated with this route are the limited surface area for drug absorption and the small volume of the rectal fluids in which the drug must be dissolved. CDs and their derivatives have also been employed to optimize drug rectal delivery. CDs can improve drug stability in the suppository base and decrease the rectal irritation caused by drugs. Also, CDs can modify the release rate of drugs from the vehicles and promote their permeation through the rectal epithelium, with the subsequent optimization of the drug pharmacokinetic profile. If the formulation comprises a CD/lipophilic drug IC in an oleaginous vehicle, the IC will be well dispersed in it. Therefore, the drug dissolution at the interface of the oily base and the rectal fluids will improve. At the same time, the reverse diffusion of the drug into the vehicle is hindered. As with ointments, the success of a formulation depends on the vehicle (aqueous or oleaginous), the physicochemical features of the CD in use, the drug, the CD/drug IC, and their interactions with the other components of the preparation. Despite these challenges, there are some rectal suppositories currently marketed: CD/piroxicam (Cicladol ™ and Brexin ™), CD/meloxicam (Mobitil ™), and HPCD/cisapride (Prepulsid ™). According to our search, the number of patents concerning suppositories is relatively low (Fig. 6), nonetheless herein we discuss two recent examples of interesting technologies: 1) a novel rectal composition for the treatment of pediatric cancer in which CDs works as solubilizer ; 2) a rectal composition containing rifaximin, hydrocortisone, and CD, in which the latter is employed as a mucosal permeation enhancer improving the local retention and bioavailability of the drug for the treatment of anal diseases like anal fissure, ulcers, or hemorrhoidal diseases. The number of academic publications is also low. A general search in the Scopus database for the last ten years (search criteria: cyclodextrin rectal delivery) revealed that only a few articles per year were published-or even no articles, as in the case of 2015. Fortunately, after that, compelling research has been done. For example, the study of HPCD/budesonide ICs in the form of thermoreversible gels for ulcerative colitis ; or HPCD/5-fluorouracil IC encapsulated in a thermoreversible gelling film for colorectal cancer. Despite its low popularity, this administration route may still be hiding its potential to deliver drugs locally or systemically, and the use of mucoadhesive and thermoresponsive materials whose development could be supported by CDs may provide interesting progress to the field. CD patents: Where and who? We aimed to identify those regions with high patenting activity around the world, as these may correspond to the regions with high market potential. This analysis was carried out using the dataset of 1998 patents. Fig. 7a shows the top ten countries that hold most of the CD pharmaceutical patents. Since 2011, China has become the nation with the highest patent filing activity in practically all kinds of technological fields. CDs pharmaceutical innovations are no exception. According to our analysis, 1356 patents were filed in this country, just followed by the geographical region represented by WIPO, with 710 records. These numbers position China as the global technological leader in the field of CDs for pharmaceutical applications, as the number of patents registered here is 47% higher than that registered in the WIPO countries. Japan and the U.S. also stand out with 688 and 656 patents, correspondingly. Besides the regions with the highest market potential, we aimed to provide information about the assignees-this is, the entities that have the right to exploit the patent. In accordance with what we have mentioned, Fig. 7b highlights that 7 out of the top 20 assignees are located in China, including both industry (five pharma companies) and academic institutions (two universities). Japan and Brazil (positions 3 and 8, respectively, in the top-10 countries) also appear in the top-20 assignees with patenting activity in both universities and pharma companies. Certainly, the holistic technological knowledge coming from industry and academia is highly relevant for the technological development of a region. China, Japan, the U.S.A., and Brazil, each belonging to the top-10 ranking regions, are also present among the top-20 ranking of assignees. U.S.A has only 2 assignees in the top-20 list, Pfizer Inc. and CyDex Pharmaceutical Inc. Both companies have shown to have an active CDs patent portfolio (some of their patents have been discussed throughout this work). In particular, CyDex Pharmaceutical Inc. has played an important role in the development of CDs technologies, and owns one of the most influential patents in the area: Patent No.US 6046177, which today is still a breakthrough in the evolution of CDs innovations in the pharmaceutical field. There are several channels for spreading knowledge and technology across boundaries. Among them, FDI has been widely studied. FDI refers to an investment made by a firm or individual in one country into a business located in another country. This implies that patents that protect the same invention can be filed in different locations from where they were created, thereby generating different economic phenomena between developed and undeveloped countries. FDI could be the reason why some countries in the top-10 ranking regions do not have an assignee backing up their position, as could be the case for Australia, the Korean Republic, Mexico, and Spain. For the WIPO and the EPO regions, the dynamic is different because they encompass different cooperation treatments. On the one hand, the Patent Cooperation Treaty enables patents to be registered in the 193 countries that are part of the WIPO through only one procedure. On the other hand, EPO grants European patents in 44 countries and also facilitates the registration of a patent in different EPO countries in a single grant procedure. This explains why these two entities appear in the second and fifth positions of the top-20 ranking for geographical regions. Hence, it is expected that WIPO and EPO will remain at the top positions in terms of regions in which a certain technology is protected. Conclusions We analyzed the evolution of CD-based pharmaceutical technologies, using patent data as the technical source, through a text-mining approach based on the patents semantic content. In our dataset, the first-filed patent dated from the early '80s. During that decade, slow growth in CD patents was observed. However, the early 2000s saw very fast growth in the use of CDs for pharmaceutical applications. The abilities of CDs to enhance the solubility and stability of drugs have determined their technological progress. Nonetheless, their abilities to modify organoleptic properties are emerging and represent a great area of opportunity. CDs are used in formulations for practically any route of administration. Although patents are majorly associated with the parenteral aqueous solutions, oral and ocular formulations are significantly growing, while nasal and pulmonary formulations seem to be promising. Of great importance was to revise patents associated with formulations for topical, transdermal, vaginal, and rectal routes. The interest on patenting these technologies seems to be neglected, however they may be hiding a great potential and represent opportunity research areas. Certainly, the better understanding of CDs, along with the progress in materials science, supramolecular chemistry, and nanotechnology, will drive a change in their patenting trend. Bottom line, the interest in CDs is still increasing and this trend is expected to continue in the coming years. Patent monitoring allows the identification of relevant technologies and trends to prioritize research, development, and investment. Thus, knowledge mined from patents can be applied to foster technological innovations based on CDs or any other platform. Declaration of competing interest The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper. |
<reponame>fgimenez/podman
package containers
import (
"context"
"fmt"
"os"
"strconv"
"strings"
"github.com/containers/common/pkg/completion"
"github.com/containers/common/pkg/config"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
"github.com/containers/podman/v3/cmd/podman/common"
"github.com/containers/podman/v3/cmd/podman/registry"
"github.com/containers/podman/v3/cmd/podman/utils"
"github.com/containers/podman/v3/libpod/define"
"github.com/containers/podman/v3/pkg/domain/entities"
"github.com/containers/podman/v3/pkg/specgen"
"github.com/containers/podman/v3/pkg/specgenutil"
"github.com/containers/podman/v3/pkg/util"
"github.com/mattn/go-isatty"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
var (
createDescription = `Creates a new container from the given image or storage and prepares it for running the specified command.
The container ID is then printed to stdout. You can then start it at any time with the podman start <container_id> command. The container will be created with the initial state 'created'.`
createCommand = &cobra.Command{
Use: "create [options] IMAGE [COMMAND [ARG...]]",
Short: "Create but do not start a container",
Long: createDescription,
RunE: create,
Args: cobra.MinimumNArgs(1),
ValidArgsFunction: common.AutocompleteCreateRun,
Example: `podman create alpine ls
podman create --annotation HELLO=WORLD alpine ls
podman create -t -i --name myctr alpine ls`,
}
containerCreateCommand = &cobra.Command{
Args: createCommand.Args,
Use: createCommand.Use,
Short: createCommand.Short,
Long: createCommand.Long,
RunE: createCommand.RunE,
ValidArgsFunction: createCommand.ValidArgsFunction,
Example: `podman container create alpine ls
podman container create --annotation HELLO=WORLD alpine ls
podman container create -t -i --name myctr alpine ls`,
}
)
var (
InitContainerType string
cliVals entities.ContainerCreateOptions
)
func createFlags(cmd *cobra.Command) {
flags := cmd.Flags()
initContainerFlagName := "init-ctr"
flags.StringVar(
&InitContainerType,
initContainerFlagName, "",
"Make this a pod init container.",
)
flags.SetInterspersed(false)
common.DefineCreateFlags(cmd, &cliVals, false)
common.DefineNetFlags(cmd)
flags.SetNormalizeFunc(utils.AliasFlags)
if registry.IsRemote() {
if cliVals.IsInfra {
_ = flags.MarkHidden("infra-conmon-pidfile")
} else {
_ = flags.MarkHidden("conmon-pidfile")
}
_ = flags.MarkHidden("pidfile")
}
_ = cmd.RegisterFlagCompletionFunc(initContainerFlagName, completion.AutocompleteDefault)
}
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
Command: createCommand,
})
createFlags(createCommand)
registry.Commands = append(registry.Commands, registry.CliCommand{
Command: containerCreateCommand,
Parent: containerCmd,
})
createFlags(containerCreateCommand)
}
func create(cmd *cobra.Command, args []string) error {
var (
err error
)
flags := cmd.Flags()
cliVals.Net, err = common.NetFlagsToNetOptions(nil, *flags, cliVals.Pod == "" && cliVals.PodIDFile == "")
if err != nil {
return err
}
// Check if initctr is used with --pod and the value is correct
if initctr := InitContainerType; cmd.Flags().Changed("init-ctr") {
if !cmd.Flags().Changed("pod") {
return errors.New("must specify pod value with init-ctr")
}
if !util.StringInSlice(initctr, []string{define.AlwaysInitContainer, define.OneShotInitContainer}) {
return errors.Errorf("init-ctr value must be '%s' or '%s'", define.AlwaysInitContainer, define.OneShotInitContainer)
}
cliVals.InitContainerType = initctr
}
cliVals, err = CreateInit(cmd, cliVals, false)
if err != nil {
return err
}
imageName := args[0]
rawImageName := ""
if !cliVals.RootFS {
rawImageName = args[0]
name, err := PullImage(args[0], cliVals)
if err != nil {
return err
}
imageName = name
}
s := specgen.NewSpecGenerator(imageName, cliVals.RootFS)
if err := specgenutil.FillOutSpecGen(s, &cliVals, args); err != nil {
return err
}
s.RawImageName = rawImageName
if _, err := createPodIfNecessary(s, cliVals.Net); err != nil {
return err
}
if cliVals.Replace {
if err := replaceContainer(cliVals.Name); err != nil {
return err
}
}
report, err := registry.ContainerEngine().ContainerCreate(registry.GetContext(), s)
if err != nil {
return err
}
if cliVals.CIDFile != "" {
if err := util.CreateCidFile(cliVals.CIDFile, report.Id); err != nil {
return err
}
}
if cliVals.LogDriver != define.PassthroughLogging {
fmt.Println(report.Id)
}
return nil
}
func replaceContainer(name string) error {
if len(name) == 0 {
return errors.New("cannot replace container without --name being set")
}
rmOptions := entities.RmOptions{
Force: true, // force stop & removal
Ignore: true, // ignore errors when a container doesn't exit
}
return removeContainers([]string{name}, rmOptions, false)
}
func CreateInit(c *cobra.Command, vals entities.ContainerCreateOptions, isInfra bool) (entities.ContainerCreateOptions, error) {
vals.UserNS = c.Flag("userns").Value.String()
// if user did not modify --userns flag and did turn on
// uid/gid mappings, set userns flag to "private"
if !c.Flag("userns").Changed && vals.UserNS == "host" {
if len(vals.UIDMap) > 0 ||
len(vals.GIDMap) > 0 ||
vals.SubUIDName != "" ||
vals.SubGIDName != "" {
vals.UserNS = "private"
}
}
if cliVals.LogDriver == define.PassthroughLogging {
if isatty.IsTerminal(0) || isatty.IsTerminal(1) || isatty.IsTerminal(2) {
return vals, errors.New("the '--log-driver passthrough' option cannot be used on a TTY")
}
if registry.IsRemote() {
return vals, errors.New("the '--log-driver passthrough' option is not supported in remote mode")
}
}
if !isInfra {
if c.Flag("shm-size").Changed {
vals.ShmSize = c.Flag("shm-size").Value.String()
}
if c.Flag("cpu-period").Changed && c.Flag("cpus").Changed {
return vals, errors.Errorf("--cpu-period and --cpus cannot be set together")
}
if c.Flag("cpu-quota").Changed && c.Flag("cpus").Changed {
return vals, errors.Errorf("--cpu-quota and --cpus cannot be set together")
}
vals.IPC = c.Flag("ipc").Value.String()
vals.UTS = c.Flag("uts").Value.String()
vals.PID = c.Flag("pid").Value.String()
vals.CgroupNS = c.Flag("cgroupns").Value.String()
if c.Flags().Changed("group-add") {
groups := []string{}
for _, g := range cliVals.GroupAdd {
if g == "keep-groups" {
if len(cliVals.GroupAdd) > 1 {
return vals, errors.New("the '--group-add keep-groups' option is not allowed with any other --group-add options")
}
if registry.IsRemote() {
return vals, errors.New("the '--group-add keep-groups' option is not supported in remote mode")
}
vals.Annotation = append(vals.Annotation, "run.oci.keep_original_groups=1")
} else {
groups = append(groups, g)
}
}
vals.GroupAdd = groups
}
if c.Flags().Changed("pids-limit") {
val := c.Flag("pids-limit").Value.String()
// Convert -1 to 0, so that -1 maps to unlimited pids limit
if val == "-1" {
val = "0"
}
pidsLimit, err := strconv.ParseInt(val, 10, 32)
if err != nil {
return vals, err
}
vals.PIDsLimit = &pidsLimit
}
if c.Flags().Changed("env") {
env, err := c.Flags().GetStringArray("env")
if err != nil {
return vals, errors.Wrapf(err, "retrieve env flag")
}
vals.Env = env
}
if c.Flag("cgroups").Changed && vals.CGroupsMode == "split" && registry.IsRemote() {
return vals, errors.Errorf("the option --cgroups=%q is not supported in remote mode", vals.CGroupsMode)
}
if c.Flag("pod").Changed && !strings.HasPrefix(c.Flag("pod").Value.String(), "new:") && c.Flag("userns").Changed {
return vals, errors.Errorf("--userns and --pod cannot be set together")
}
}
if (c.Flag("dns").Changed || c.Flag("dns-opt").Changed || c.Flag("dns-search").Changed) && vals.Net != nil && (vals.Net.Network.NSMode == specgen.NoNetwork || vals.Net.Network.IsContainer()) {
return vals, errors.Errorf("conflicting options: dns and the network mode: " + string(vals.Net.Network.NSMode))
}
noHosts, err := c.Flags().GetBool("no-hosts")
if err != nil {
return vals, err
}
if noHosts && c.Flag("add-host").Changed {
return vals, errors.Errorf("--no-hosts and --add-host cannot be set together")
}
if !isInfra && c.Flag("entrypoint").Changed {
val := c.Flag("entrypoint").Value.String()
vals.Entrypoint = &val
} else if isInfra && c.Flag("infra-command").Changed {
}
// Docker-compatibility: the "-h" flag for run/create is reserved for
// the hostname (see https://github.com/containers/podman/issues/1367).
return vals, nil
}
func PullImage(imageName string, cliVals entities.ContainerCreateOptions) (string, error) {
pullPolicy, err := config.ValidatePullPolicy(cliVals.Pull)
if err != nil {
return "", err
}
if cliVals.Platform != "" || cliVals.Arch != "" || cliVals.OS != "" {
if cliVals.Platform != "" {
if cliVals.Arch != "" || cliVals.OS != "" {
return "", errors.Errorf("--platform option can not be specified with --arch or --os")
}
split := strings.SplitN(cliVals.Platform, "/", 2)
cliVals.OS = split[0]
if len(split) > 1 {
cliVals.Arch = split[1]
}
}
}
skipTLSVerify := types.OptionalBoolUndefined
if cliVals.TLSVerify.Present() {
skipTLSVerify = types.NewOptionalBool(!cliVals.TLSVerify.Value())
}
pullReport, pullErr := registry.ImageEngine().Pull(registry.GetContext(), imageName, entities.ImagePullOptions{
Authfile: cliVals.Authfile,
Quiet: cliVals.Quiet,
Arch: cliVals.Arch,
OS: cliVals.OS,
Variant: cliVals.Variant,
SignaturePolicy: cliVals.SignaturePolicy,
PullPolicy: pullPolicy,
SkipTLSVerify: skipTLSVerify,
})
if pullErr != nil {
return "", pullErr
}
// Return the input name such that the image resolves to correct
// repo/tag in the backend (see #8082). Unless we're referring to
// the image via a transport.
if _, err := alltransports.ParseImageName(imageName); err == nil {
imageName = pullReport.Images[0]
}
return imageName, nil
}
// createPodIfNecessary automatically creates a pod when requested. if the pod name
// has the form new:ID, the pod ID is created and the name in the spec generator is replaced
// with ID.
func createPodIfNecessary(s *specgen.SpecGenerator, netOpts *entities.NetOptions) (*entities.PodCreateReport, error) {
if !strings.HasPrefix(s.Pod, "new:") {
return nil, nil
}
podName := strings.Replace(s.Pod, "new:", "", 1)
if len(podName) < 1 {
return nil, errors.Errorf("new pod name must be at least one character")
}
var err error
uns := specgen.Namespace{NSMode: specgen.Default}
if cliVals.UserNS != "" {
uns, err = specgen.ParseNamespace(cliVals.UserNS)
if err != nil {
return nil, err
}
}
createOptions := entities.PodCreateOptions{
Name: podName,
Infra: true,
Net: netOpts,
CreateCommand: os.Args,
Hostname: s.ContainerBasicConfig.Hostname,
Cpus: cliVals.CPUS,
CpusetCpus: cliVals.CPUSetCPUs,
Pid: cliVals.PID,
Userns: uns,
}
// Unset config values we passed to the pod to prevent them being used twice for the container and pod.
s.ContainerBasicConfig.Hostname = ""
s.ContainerNetworkConfig = specgen.ContainerNetworkConfig{}
s.Pod = podName
podSpec := entities.PodSpec{}
podGen := specgen.NewPodSpecGenerator()
podSpec.PodSpecGen = *podGen
podGen, err = entities.ToPodSpecGen(*&podSpec.PodSpecGen, &createOptions)
if err != nil {
return nil, err
}
infraOpts := entities.ContainerCreateOptions{ImageVolume: "bind", Net: netOpts, Quiet: true}
imageName := config.DefaultInfraImage
podGen.InfraImage = imageName
podGen.InfraContainerSpec = specgen.NewSpecGenerator(imageName, false)
podGen.InfraContainerSpec.RawImageName = imageName
podGen.InfraContainerSpec.NetworkOptions = podGen.NetworkOptions
err = specgenutil.FillOutSpecGen(podGen.InfraContainerSpec, &infraOpts, []string{})
if err != nil {
return nil, err
}
podSpec.PodSpecGen = *podGen
return registry.ContainerEngine().PodCreate(context.Background(), podSpec)
}
|
<gh_stars>1-10
/*
* Copyright 2008 CoreMedia AG, Hamburg
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.coremedia.iso.boxes;
import com.coremedia.iso.IsoFile;
import com.coremedia.iso.IsoTypeReader;
import com.googlecode.mp4parser.AbstractBox;
import java.nio.ByteBuffer;
/**
* The Original Format Box contains the four-character-code of the original untransformed sample description.
* See ISO/IEC 14496-12 for details.
*
* @see ProtectionSchemeInformationBox
*/
public class OriginalFormatBox extends AbstractBox {
public static final String TYPE = "frma";
private String dataFormat = " ";
public OriginalFormatBox() {
super("frma");
}
public String getDataFormat() {
return dataFormat;
}
public void setDataFormat(String dataFormat) {
assert dataFormat.length() == 4;
this.dataFormat = dataFormat;
}
protected long getContentSize() {
return 4;
}
@Override
public void _parseDetails(ByteBuffer content) {
parseDetails();
dataFormat = IsoTypeReader.read4cc(content);
}
@Override
protected void getContent(ByteBuffer byteBuffer) {
byteBuffer.put(IsoFile.fourCCtoBytes(dataFormat));
}
public String toString() {
return "OriginalFormatBox[dataFormat=" + getDataFormat() + "]";
}
}
|
The effectiveness of structured exercise in the south Asian population with type 2 diabetes: a systematic review ABSTRACT The impact of exercise interventions on south Asians with type 2 diabetes (T2DM), who have a higher T2DM incidence rate compared to other ethnic groups, is inconclusive. This study aimed to systematically review the effect of exercise interventions in south Asians with T2DM. Five electronic databases were searched up to April 2017 for controlled trials investigating the impact of exercise interventions on south Asian adults with T2DM. The PEDro scale was used to assess the quality of the included studies. Eighteen trials examining the effect of aerobic, resistance, balance or combined exercise programs met the eligibility criteria. All types of exercise were associated with improvements in glycemic control, blood pressure, waist circumference, blood lipids, muscle strength, functional mobility, quality of life or neuropathy progression. The majority of included studies were of poor methodological quality. Few studies compared different types or dose of exercise. In conclusion, this review supports the benefits of exercise for south Asians with T2DM, although it was not possible to identify the most effective exercise prescription. Further studies of good methodological quality are required to determine the most effective dosage and type of exercise to manage T2DM in this population. |
// Get global (x,y,z) location of every DOFrature point on 2D face(dim: nDOFFace x 3)
// for all faces
// This functions in turn calls mapRSTtoXYZ functions in the Face class (e.g. Face:mapRSTtoXYZDOF() for a DOFrilateral face)
void DG::getFaceDOFPointsGlobalLocation(){
for(Index nface=0; nface<this->noOfFaces; nface++){
if(faces[nface].getFaceType() == FaceType::Quad){
FunctionalSpace F(this->order, this->intType);
int Np = this->order+ 1;
int nDOF = pow((Np),2);
faces[nface].setNDOF (nDOF);
faces[nface].getDOFPointsGlobalLocation()->setSize(nDOF, 3);
TensorO1<double> r(nDOF);
TensorO1<double> s(nDOF);
TensorO1<double> w(nDOF);
F.LGLRootsAndWeights2D(Np,Np, &r, &s, &w);
Point faceDOFPoint;
Point refFaceDOFPoint;
for (Index DOF=0; DOF<nDOF; DOF++){
refFaceDOFPoint.setX(r.getValue(DOF));
refFaceDOFPoint.setY(s.getValue(DOF));
refFaceDOFPoint.setZ(0.0);
faces[nface].mapRSTtoXYZQuad(&refFaceDOFPoint, &faceDOFPoint);
faces[nface].getDOFPointsGlobalLocation()->setValue(DOF, 0, faceDOFPoint.getX());
faces[nface].getDOFPointsGlobalLocation()->setValue(DOF, 1, faceDOFPoint.getY());
faces[nface].getDOFPointsGlobalLocation()->setValue(DOF, 2, faceDOFPoint.getZ());
};
}
else if(faces[nface].getFaceType() == FaceType::Tri){
};
};
} |
package qword.spring.petclinic.controllers;
import org.springframework.stereotype.Controller;
import org.springframework.web.bind.annotation.RequestMapping;
@Controller
@RequestMapping("/owners")
public class OwnerController {
@RequestMapping({"", "/", "/index", "/index.html"})
public String listOwners() {
return "owners/index";
}
}
|
A Pilot Study on the Effect of Virgin Coconut Oil On Serum Lipid Profile and HS CRP Level Among Post Acute Coronary Syndrome Patients: A Randomized Controlled Trial ABSTRACT Introduction: Acute coronary syndrome (ACS) is a leading cause of death in Malaysia and worldwide. Besides, teh current treatment which involves teh prescription of statins is found to TEMPhas several side TEMPeffects on ACS patients. Those side TEMPeffects TEMPhas guided teh author to introduce virgin coconut oil (VCO) as supplemental management of ACS. However, its benefits TEMPhas not been widely tested on humans. Methodology: dis study examines teh use of VCO among ACS patients via a crossover trial. It seeks to ascertain teh TEMPeffect of VCO on serum lipid profile and hs-CRP level among ACS patients. Result: VCO was found to be statistically significant in reducing serum lipid level and hs-CRP level (p<0.001). These findings measured from small to moderate Cohens d TEMPeffect size, thus proving teh results from dis study as statistically and clinically significant. Conclusion: These findings suggest dat dietary intake wif saturated fatty acid (C6 to C12) can improve health condition. Keywords: virgin coconut oil, acute coronary syndrome, saturated fatty acid, medium-chain triglycerides |
We’ve had it backward for the last 30 years. Rich businesspeople like me don’t create jobs. Rather they are a consequence of an eco-systemic feedback loop animated by middle-class consumers, and when they thrive, businesses grow and hire, and owners profit. That’s why taxing the rich to pay for investments that benefit all is a great deal for both the middle class and the rich.
Job growth over the post-war period, meanwhile, has been stronger when the top income tax rate was higher. “If you ranked each year since 1950 by overall job growth, the top five years would all boast marginal tax rates at 70 percent or higher,” Michael Linden, the director of tax and budget policy at the Center for American Progress, wrote last year.
Hanauer’s speech may be “too hot for TED,” but that doesn’t mean it isn’t true.
*Hanauer has been working on tax and inequality issues with the Center for American Progress. |
/*
* Copyright 2020-2020 <NAME> and Contributors (https://gitlab.com/Elypia/magick-image-reader/-/graphs/master)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { MagickBackground } from "../configuration/magick-background";
import { CheckeredBackground } from "../configuration/backgrounds/checkered-background";
import { TransparentBackground } from "../configuration/backgrounds/transparent-background";
import { CustomBackground } from "../configuration/backgrounds/custom-background";
/**
* Wrapper around the background implementations for convinient and
* centralized access.
*
* @since 0.4.0
*/
export class BackgroundUtils {
/** An array of all background implementations. */
public static readonly All: Map<string, () => MagickBackground> = new Map<string, () => MagickBackground>()
.set('checkered', () => new CheckeredBackground())
.set('transparent', () => new TransparentBackground())
.set('custom', () => new CustomBackground());
/**
* Statically obtains the required background configuration.
*
* @param id The ID of the background to use.
*/
public static getBackgroundById(id: string): MagickBackground {
if (!id)
throw new Error('Background ID can not be null or blank');
const background: (() => MagickBackground) | undefined = this.All.get(id);
if (!background)
throw new Error('No background option has the specified ID');
return background();
}
}
|
package it.gulch.linuxday.android.db.manager.impl;
import android.util.Log;
import com.j256.ormlite.dao.Dao;
import com.j256.ormlite.stmt.DeleteBuilder;
import com.j256.ormlite.stmt.PreparedDelete;
import com.j256.ormlite.stmt.QueryBuilder;
import java.sql.SQLException;
import java.util.Collections;
import java.util.Date;
import java.util.List;
import it.gulch.linuxday.android.db.OrmLiteDatabaseHelper;
import it.gulch.linuxday.android.db.manager.BookmarkManager;
import it.gulch.linuxday.android.model.db.Bookmark;
import it.gulch.linuxday.android.model.db.Event;
/**
* Created by paolo on 07/09/14.
*/
public class BookmarkManagerImpl implements BookmarkManager
{
private static final String TAG = BookmarkManagerImpl.class.getSimpleName();
private Dao<Bookmark, Long> dao;
private Dao<Event, Long> eventDao;
private BookmarkManagerImpl()
{
}
public static BookmarkManager newInstance(OrmLiteDatabaseHelper helper) throws SQLException
{
BookmarkManagerImpl bookmarkManager = new BookmarkManagerImpl();
bookmarkManager.dao = helper.getDao(Bookmark.class);
bookmarkManager.eventDao = helper.getDao(Event.class);
return bookmarkManager;
}
@Override
public Bookmark get(Long id)
{
try {
return dao.queryForId(id);
} catch(SQLException e) {
Log.e(TAG, e.getMessage(), e);
return null;
}
}
@Override
public List<Bookmark> getAll()
{
try {
return dao.queryForAll();
} catch(SQLException e) {
Log.e(TAG, e.getMessage(), e);
return Collections.emptyList();
}
}
@Override
public void save(Bookmark object) throws SQLException
{
dao.create(object);
}
@Override
public void saveOrUpdate(Bookmark object) throws SQLException
{
dao.createOrUpdate(object);
}
@Override
public void update(Bookmark object) throws SQLException
{
dao.update(object);
}
@Override
public void delete(Bookmark object) throws SQLException
{
dao.delete(object);
}
@Override
public void truncate() throws SQLException
{
PreparedDelete<Bookmark> preparedDelete = dao.deleteBuilder().prepare();
dao.delete(preparedDelete);
}
@Override
public boolean exists(Long objectId) throws SQLException
{
return dao.idExists(objectId);
}
@Override
public void deleteOldBookmarks(Long minEventId) throws SQLException
{
DeleteBuilder<Bookmark, Long> deleteBuilder = dao.deleteBuilder();
deleteBuilder.where().lt("event_id", minEventId);
PreparedDelete<Bookmark> preparedDelete = deleteBuilder.prepare();
dao.delete(preparedDelete);
}
@Override
public void addBookmark(Event event) throws SQLException
{
Bookmark bookmark = new Bookmark();
bookmark.setEvent(event);
save(bookmark);
}
@Override
public void removeBookmark(Event event) throws SQLException
{
DeleteBuilder<Bookmark, Long> deleteBuilder = dao.deleteBuilder();
deleteBuilder.where().eq("event_id", event.getId());
dao.delete(deleteBuilder.prepare());
}
@Override
public void removeBookmarksByEventId(List<Long> eventIds) throws SQLException
{
DeleteBuilder<Bookmark, Long> deleteBuilder = dao.deleteBuilder();
deleteBuilder.where().in("event_id", eventIds);
dao.delete(deleteBuilder.prepare());
}
@Override
public List<Bookmark> getBookmarks(Date minStartTime) throws SQLException
{
QueryBuilder<Bookmark, Long> queryBuilder = dao.queryBuilder();
if(minStartTime != null) {
QueryBuilder<Event, Long> eventQueryBuilder = eventDao.queryBuilder();
eventQueryBuilder.where().gt("startdate", minStartTime);
queryBuilder.join(eventQueryBuilder);
}
return dao.query(queryBuilder.prepare());
}
}
|
Atlantoaxial mobility after screw fixation of the odontoid: a computed tomographic study. Between 1979 and 1989, anterior screw fixation of the odontoid process was performed in 16 patients with fractures of the odontoid. One patient died suddenly 2 days after the operation. Postmortem examination could not disclose the cause of death. No other complication was noted. We followed 13 patients. At examinations 7 to 82 months after injury, all fractures were consolidated in reduced position. In all patients, a functional computed tomographic (CT) examination of the atlantoaxial rotation was performed. Atlantoaxial rotation measurement ranged from 7 to 38 degrees to the right (average: 25.2 degrees) and 7 to 41 degrees (average: 24.1 degrees) to the left side. Five patients presented a normal range of atlantoaxial rotation, 29 to 41 degrees; 3 had a rotation of 20 to 28 degrees; 3 a rotation of 10 to 20 degrees; and in 2, rotation was less than 10 degrees to one side. Our results suggest that anterior screw fixation is the therapy of choice for Type II and cephalad Type III dens fractures. However, significant complications have been reported by other authors. Therefore, a careful surgical technique is mandatory, and contraindications should be respected. |
<gh_stars>0
#include <Python.h>
#include <windows.h>
#include "MyLoadLibrary.h"
#include "python-dynload.h"
/*
This module allows us to dynamically load the python DLL.
We have to #define Py_BUILD_CORE when we cmpile our stuff,
then the exe doesn't try to link with pythonXY.lib, and also
the following definitions compile.
We use MyGetProcAddress to get the functions from the dynamically
loaded python DLL, so it will work both with the DLL loaded from the
file system as well as loaded from memory.
Problems:
- We cannot use vararg functions that have no va_list counterpart.
- What about the flags or other data exported from Python?
- Error handling MUST be improved...
- Should we use a python script to generate this code
from function prototypes automatically?
*/
static HMODULE hmod_pydll;
/*
The python dll may be loaded from memory or in the usual way.
MyGetProcAddress handles both cases.
*/
#define FUNC(res, name, args) \
static res(*proc)args; \
if (!proc) (FARPROC)proc = MyGetProcAddress(hmod_pydll, #name)
#define DATA(type, name) \
static type pflag; \
if (!pflag) pflag = (type)MyGetProcAddress(hmod_pydll, #name); \
return pflag
////////////////////////////////////////////////////////////////
int *_Py_OptimizeFlag_PTR()
{
DATA(int *, Py_OptimizeFlag);
}
int *_Py_NoSiteFlag_PTR()
{
DATA(int *, Py_NoSiteFlag);
}
int *_Py_VerboseFlag_PTR()
{
DATA(int *, Py_VerboseFlag);
}
char **__Py_PackageContext_PTR()
{
DATA(char **, _Py_PackageContext);
}
PyTypeObject *PyModuleDef_Type_PTR()
{
DATA(PyTypeObject *, PyModuleDef_Type);
}
////////////////////////////////////////////////////////////////
int Py_IsInitialized(void)
{
FUNC(int, Py_IsInitialized, (void));
return proc();
}
PyObject *PyLong_FromVoidPtr(void *p)
{
FUNC(PyObject *, PyLong_FromVoidPtr, (void *));
return proc(p);
}
PyObject *PyErr_SetImportError(PyObject *msg, PyObject *name, PyObject *path)
{
FUNC(PyObject *, PyErr_SetImportError, (PyObject *, PyObject *, PyObject *));
return proc(msg, name, path);
}
void PyErr_SetString(PyObject *type, const char *message)
{
FUNC(void, PyErr_SetString, (PyObject *, const char *));
proc(type, message);
}
int Py_FdIsInteractive(FILE *fp, const char *filename)
{
FUNC(int, Py_FdIsInteractive, (FILE *, const char *));
return proc(fp, filename);
}
int PyRun_InteractiveLoopFlags(FILE *fp, const char *filename, PyCompilerFlags *flags)
{
FUNC(int, PyRun_InteractiveLoopFlags, (FILE *, const char *, PyCompilerFlags *));
return proc(fp, filename, flags);
}
int PyRun_SimpleStringFlags(const char *command, PyCompilerFlags *flags)
{
FUNC(int, PyRun_SimpleStringFlags, (const char *, PyCompilerFlags *));
return proc(command, flags);
}
void PyGILState_Release(PyGILState_STATE state)
{
FUNC(void, PyGILState_Release, (PyGILState_STATE));
proc(state);
}
PyGILState_STATE PyGILState_Ensure(void)
{
FUNC(PyGILState_STATE, PyGILState_Ensure, (void));
return proc();
}
wchar_t *Py_GetPath(void)
{
FUNC(wchar_t *, Py_GetPath, (void));
return proc();
}
void Py_SetPath(const wchar_t *path)
{
FUNC(void, Py_SetPath, (const wchar_t *));
proc(path);
}
void Py_Finalize(void)
{
FUNC(void, Py_Finalize, (void));
proc();
}
void Py_Initialize(void)
{
FUNC(void, Py_Initialize, (void));
proc();
}
void PyErr_Clear(void)
{
FUNC(void, PyErr_Clear, (void));
proc();
}
PyObject *PyErr_Occurred(void)
{
FUNC(PyObject *, PyErr_Occurred, (void));
return proc();
}
void PyErr_Print(void)
{
FUNC(void, PyErr_Print, (void));
proc();
}
void Py_SetProgramName(wchar_t *name)
{
FUNC(void, Py_SetProgramName, (wchar_t *));
proc(name);
}
void PySys_SetArgvEx(int argc, wchar_t **argv, int updatepath)
{
FUNC(void, PySys_SetArgvEx, (int, wchar_t **, int));
proc(argc, argv, updatepath);
}
PyObject *PyImport_AddModule(const char *name)
{
FUNC(PyObject *, PyImport_AddModule, (const char *));
return proc(name);
}
PyObject *PyModule_GetDict(PyObject *m)
{
FUNC(PyObject *, PyModule_GetDict, (PyObject *));
return proc(m);
}
PyObject *PyMarshal_ReadObjectFromString(char *string, Py_ssize_t len)
{
FUNC(PyObject *, PyMarshal_ReadObjectFromString, (char *, Py_ssize_t));
return proc(string, len);
}
PyObject *PySequence_GetItem(PyObject *seq, Py_ssize_t i)
{
FUNC(PyObject *, PySequence_GetItem, (PyObject *, Py_ssize_t));
return proc(seq, i);
}
Py_ssize_t PySequence_Size(PyObject *seq)
{
FUNC(Py_ssize_t, PySequence_Size, (PyObject *));
return proc(seq);
}
PyObject *PyEval_EvalCode(PyObject *co, PyObject *globals, PyObject *locals)
{
FUNC(PyObject *, PyEval_EvalCode, (PyObject *, PyObject *, PyObject *));
return proc(co, globals, locals);
}
int PyImport_AppendInittab(const char *name, PyObject* (*initfunc)(void))
{
FUNC(int, PyImport_AppendInittab, (const char *, PyObject *(*)(void)));
return proc(name, initfunc);
}
PyObject *PyModule_Create2(PyModuleDef *module, int module_api_version)
{
FUNC(PyObject *, PyModule_Create2, (PyModuleDef *, int));
return proc(module, module_api_version);
}
PyObject *PyModuleDef_Init(PyModuleDef *module)
{
FUNC(PyObject *, PyModuleDef_Init, (PyModuleDef *));
return proc(module);
}
int PyType_IsSubtype(PyTypeObject *a, PyTypeObject *b)
{
FUNC(int, PyType_IsSubtype, (PyTypeObject *, PyTypeObject *));
return proc(a, b);
}
PyObject *PyModule_FromDefAndSpec2(struct PyModuleDef* def, PyObject *spec, int module_api_version)
{
FUNC(PyObject *, PyModule_FromDefAndSpec2, (struct PyModuleDef *, PyObject *, int));
return proc(def, spec, module_api_version);
}
PyObject *PyImport_ReloadModule(PyObject *m)
{
FUNC(PyObject *, PyImport_ReloadModule, (PyObject *));
return proc(m);
}
PyObject *PyLong_FromLong(long n)
{
FUNC(PyObject *, PyLong_FromLong, (long));
return proc(n);
}
int PyArg_ParseTuple(PyObject *args, const char *format, ...)
{
int result;
va_list marker;
FUNC(int, PyArg_VaParse, (PyObject *, const char *, va_list));
va_start(marker, format);
result = proc(args, format, marker);
va_end(marker);
return -1;
}
PyObject *PyUnicode_FromFormat(const char *format, ...)
{
PyObject *result;
va_list marker;
FUNC(PyObject *, PyUnicode_FromFormatV, (const char *, va_list));
va_start(marker, format);
result = proc(format, marker);
va_end(marker);
return result;
}
PyObject *PyUnicode_FromWideChar(const wchar_t *w, Py_ssize_t size)
{
FUNC(PyObject *, PyUnicode_FromWideChar, (const wchar_t *, Py_ssize_t));
return proc(w, size);
}
PyObject *PyObject_CallObject(PyObject *callable, PyObject *args)
{
FUNC(PyObject *, PyObject_CallObject, (PyObject *, PyObject *));
return proc(callable, args);
}
PyObject *PyTuple_New(Py_ssize_t len)
{
FUNC(PyObject *, PyTuple_New, (Py_ssize_t));
return proc(len);
}
int PyTuple_SetItem(PyObject *p, Py_ssize_t pos, PyObject *o)
{
FUNC(int, PyTuple_SetItem, (PyObject *, Py_ssize_t, PyObject *));
return proc(p, pos, o);
}
PyObject *PyUnicode_FromString(const char *u)
{
FUNC(PyObject *, PyUnicode_FromString, (const char *));
return proc(u);
}
#undef _Py_Dealloc
void _Py_Dealloc(PyObject *op)
{
destructor dealloc = Py_TYPE(op)->tp_dealloc;
#ifdef Py_TRACE_REFS
_Py_ForgetReference(op);
#else
#if (PY_VERSION_HEX < 0x03090000)
_Py_INC_TPFREES(op);
#endif
#endif
(*dealloc)(op);
}
char *PyBytes_AsString(PyObject *string)
{
FUNC(char *, PyBytes_AsString, (PyObject *));
return proc(string);
}
PyModuleDef *PyModule_GetDef(PyObject *module)
{
FUNC(PyModuleDef *, PyModule_GetDef, (PyObject *));
return proc(module);
}
void *PyModule_GetState(PyObject *module)
{
FUNC(void *, PyModule_GetState, (PyObject *));
return proc(module);
}
int PyModule_ExecDef(PyObject *module, PyModuleDef *def)
{
FUNC(int, PyModule_ExecDef, (PyObject *, PyModuleDef *));
return proc(module, def);
}
#if (PY_VERSION_HEX >= 0x03070000)
PyObject *PyImport_GetModuleDict(void)
{
FUNC(PyObject *, PyImport_GetModuleDict, (void));
return proc();
}
#endif
PyObject *PyModule_New(const char *name)
{
FUNC(PyObject *, PyModule_New, (const char *));
return proc(name);
}
PyObject *PyImport_ImportModule(const char *name)
{
FUNC(PyObject *, PyImport_ImportModule, (const char *));
return proc(name);
}
PyObject *_PyImport_FindExtensionObject(PyObject *a, PyObject *b)
{
FUNC(PyObject *, _PyImport_FindExtensionObject, (PyObject *, PyObject *));
return proc(a, b);
}
#if (PY_VERSION_HEX >= 0x03070000)
int _PyImport_FixupExtensionObject(PyObject *m, PyObject *a, PyObject *b, PyObject *l)
{
FUNC(int, _PyImport_FixupExtensionObject, (PyObject *, PyObject *, PyObject *, PyObject *));
return proc(m, a, b, l);
}
#else
int _PyImport_FixupExtensionObject(PyObject *m, PyObject *a, PyObject *b)
{
FUNC(int, _PyImport_FixupExtensionObject, (PyObject *, PyObject *, PyObject *));
return proc(m, a, b);
}
#endif
int PySys_SetObject(const char *name, PyObject *v)
{
FUNC(int, PySys_SetObject, (const char *, PyObject *));
return proc(name, v);
}
void PyErr_SetObject(PyObject *type, PyObject *value)
{
FUNC(PyObject *, PyErr_SetObject, (PyObject *, PyObject *));
proc(type, value);
}
PyObject *PyBool_FromLong(long v)
{
FUNC(PyObject *, PyBool_FromLong, (long));
return proc(v);
}
int PyObject_SetAttrString(PyObject *o, const char *attr_name, PyObject *v)
{
FUNC(int, PyObject_SetAttrString, (PyObject *, const char *, PyObject *));
return proc(o, attr_name, v);
}
#if (PY_VERSION_HEX < 0x03090000)
PyObject *PyCFunction_NewEx(PyMethodDef *methdef, PyObject *self, PyObject *foo)
{
FUNC(PyObject *, PyCFunction_NewEx, (PyMethodDef *, PyObject *, PyObject *));
return proc(methdef, self, foo);
}
#endif
#if (PY_VERSION_HEX >= 0x03090000)
PyObject *PyCMethod_New(PyMethodDef *ml, PyObject *self, PyObject *module, PyTypeObject *cls)
{
FUNC(PyObject *, PyCMethod_New, (PyMethodDef *, PyObject *, PyObject *, PyTypeObject *));
return proc(ml, self, module, cls);
}
#endif
////////////////////////////////////////////////////////////////
int PythonLoaded(HMODULE hmod)
{
hmod_pydll = hmod;
PyExc_SystemError = *((PyObject **)MyGetProcAddress(hmod, "PyExc_SystemError"));
if (PyExc_SystemError == NULL)
return -1;
PyExc_ImportError = *((PyObject **)MyGetProcAddress(hmod, "PyExc_ImportError"));
if (PyExc_ImportError == NULL)
return -1;
PyExc_RuntimeError = *((PyObject **)MyGetProcAddress(hmod, "PyExc_RuntimeError"));
if (PyExc_RuntimeError == NULL)
return -1;
return 0;
}
PyObject *PyExc_SystemError;
PyObject *PyExc_ImportError;
PyObject *PyExc_RuntimeError;
//Py_VerboseFlag
|
What Can Erode Through Lungs, Bone and Skin? CASE A 51 year old African American Man without significant past history presented with three weeks of persistent cough productive of copious yellow sputum. He denied fevers, chills, hemoptysis, dyspnea, weight or appetite changes, sick contacts, recent travel. On physical examination, the patient was afebrile and appeared comfortable. He had decreased air entry of the left lower lobe with dullness to percussion. A 5x3 cm fluctuant mass was incidentally found on the left anterior chest wall at the level of the 11th rib with yellow expressible exudate at which time the patient reported a minor trauma sustained 3 weeks prior. WBC count was 17,300/mcL. CT chest identified a peripherally enhancing fluid-attenuation structure in the left lower lung measuring 11.8 cm x 11.3 cm x 9.6 cm. The collection appeared to be tracking out from the pleural space to the exterior skin that corresponded to the site of the chest wall swelling. There was also a focal lytic lesion of the adjacent ribs. He was empirically started on Vancomycin, clindamycin and piperacillin-tazobactam. CTguided aspiration failed because the material was too viscous to be aspirated; a chest tube drained copious yellow exudate. Blood cultures and respiratory cultures were negative. Gram stain of the purulent material demonstrated clusters of branching gram positive rods. Pathology showed necrotic debris with clusters of filamentous gram negative organism. Acid fast and Kinyoun stains were negative. He was started on empiric Penicillin G for empyema necessitans with a presumed etiology of actinomyces. Due to development of hypersensitivity drug eruption from PCN, intravenous doxycycline was started for total of 14 days followed by 6 months of oral therapy. Imaging four weeks after treatment showed significant reduction in size of the lesion. Culture confirmed Actinomyces israelii. DISCUSSION Actinomyces are anaerobic gram positive commensals of the oral cavity notorious to breach though tissue planes. Thoracic manifestations are varied and can mimic malignancy. Astute microbiology and pathology tests are necessary to make an early diagnosis and prevent invasive surgery as the organism is a slow growing anaerobic bacteria. Excellent clinical and radiologic response were noted in our case following treatment with chest wall drainage and antibiotics thus avoiding invasive thoracic surgery. |
Exemplary embodiments of the present invention relates to a semiconductor design technology, and more particularly, to a technology for terminating internal transmission lines of a semiconductor device.
As semiconductor devices are designed to operate at higher operating frequencies, it is important to ensure a sufficient data window size and timing margin when data is outputted at high speed.
FIG. 1 is a circuit diagram of a conventional semiconductor device.
Referring to FIG. 1, the conventional semiconductor device includes a pre-driving unit 11 and a main driving unit 12.
The pre-driving unit 11 generates a pull-up driving signal PU and a pull-down driving signal PD corresponding to an output data signal DATA_OUT, and transfers the generated pull-up and pull-down driving signals PU and PD to a first transmission line LINE1 and a second transmission line LINE2, respectively. Specifically, a first pre-driving unit PDRV1 generates the pull-up driving signal PU and transfers it to the first transmission line LINE1, and a second pre-driving unit PDRV2 generates the pull-down driving signal PD and transfers it to the second transmission line LINE2.
The main driving unit 12 drives a data input/output pad DQ in response to the pull-up driving signal PU and the pull-down driving signal PD transferred through the first transmission line LINE1 and the second transmission line LINE2. Specifically, a pull-up driving unit MDRV1 or a pull-down driving unit MDRV2 of the main driving unit 12 pulls up or pulls down the data input/output pad DQ to a power supply voltage VDDQ or a ground voltage VSSQ according to the control of the pull-up driving signal PU and the pull-down driving signal PD.
FIG. 2 is a waveform diagram of the pull-up driving signal and the pull-down driving signal in the conventional semiconductor device.
Specifically, FIG. 2 is a waveform diagram of the pull-up driving signal PU and the pull-down driving signal PD that are generated from the pre-driving unit 11 when the output data signal DATA_OUT is “0100101101,” and then transferred to the first transmission line LINE1 and the second transmission line LINE2.
Referring to FIG. 2, a first waveform 21 represents a case where data is outputted at low speed while the semiconductor device operates at a relatively low operating frequency of A Hz, and a second waveform 22 represents a case where data is outputted at high speed while the semiconductor device operates at a relatively high operating frequency of 2×A Hz. For reference, the second waveform 22 is a waveform of the pull-up driving signal PU and the pull-down driving signal PD when the operating frequency is two times higher than the first waveform 21.
If the operating frequency becomes high and data is outputted at high speed, 1 unit interval (UI) corresponding to the width of 1 data bit gradually becomes narrower. However, if the 1 UI becomes too narrow, the full swing of the signals cannot be achieved, as illustrated in the second waveform 22. If the full swing of the signals is not achieved during the 1 UI, pattern jitters may be generated. The pattern jitters operate as a factor to reduce an effective window size and timing margin. |
<filename>src/gamelogic/cards/base/curse.h
#pragma once
#include "card.h"
class Curse : public Card {
public:
int victoryPoints(Deck const*) const override {
return -1;
}
protected:
friend class Supply;
Curse() {
m_info = {
CardId::Curse,
Card::NoType,
NoHints,
Cost{0}
};
}
};
|
from tkinter.scrolledtext import ScrolledText
class output_terminal(ScrolledText):
""" This class aims to pretty print all the outputs. """
def __init__(self, parent):
super().__init__(parent, bg="white", wrap='word')
self.configure(state='disabled')
self.tag_config('blue', foreground="blue")
self.tag_config('red', foreground="red")
self.tag_config('orange', foreground="orange")
self.tag_config('gray', foreground="gray30")
self.tag_config('green', foreground="green")
self.tag_config('black', foreground="black")
def pretty_print(self, s, fg='black'):
self.configure(state='normal')
self.insert('end', s, fg)
self.yview_moveto(1)
self.configure(state='disabled')
|
from functools import wraps
from typing import Union, Callable
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import numpy as np
import zproc
from matplotlib.lines import Line2D
zproc_ctx = zproc.Context()
ZPROC_INTERNAL_NAMESPACE = "oscilloscope"
class Normalizer:
def __init__(self, output_range: tuple = (0, 100)):
self._input_min = 0
self._input_max = 0
self._output_min, self._output_max = output_range
self._output_diff = self._output_max - self._output_min
self._norm_factor = 0
def _refresh_norm_factor(self):
self._norm_factor = 1 / (self._input_max - self._input_min) * self._output_diff
def _refresh_bounds(self, input_value):
if input_value < self._input_min:
self._input_min = input_value
self._refresh_norm_factor()
elif input_value > self._input_max:
self._input_max = input_value
self._refresh_norm_factor()
def normalize(self, input_value):
self._refresh_bounds(input_value)
return (input_value - self._input_min) * self._norm_factor + self._output_min
def shift(ax, x):
return np.delete(np.append(ax, x), 0)
class AnimationScope:
def __init__(
self,
ax: plt.Axes,
window_sec,
frame_interval_sec,
row_index,
col_index,
intensity,
padding_percent,
):
self.row_index = row_index
self.col_index = col_index
self.ax = ax
self.padding_percent = padding_percent
self.frame_interval_sec = frame_interval_sec
self.num_frames = int(window_sec / self.frame_interval_sec)
self.y_values = np.zeros([1, self.num_frames])
self.x_values = np.linspace(-window_sec, 0, self.num_frames)
self.line = Line2D(self.x_values, self.y_values, linewidth=intensity)
self.ax.add_line(self.line)
self.ax.set_xlim(-window_sec, 0)
self.y_limits = np.array([0, np.finfo(np.float).eps])
self.ax.set_ylim(self.y_limits[0], self.y_limits[1])
self._internal_state = zproc_ctx.create_state(
namespace=ZPROC_INTERNAL_NAMESPACE
)
def _adjust_ylim(self):
padding = self.padding_percent * (self.y_limits[1] - self.y_limits[0]) / 100
self.ax.set_ylim(self.y_limits[0] - padding, self.y_limits[1] + padding)
def _adjust_ylim_if_req(self, amplitude):
if amplitude < self.y_limits[0]:
self.y_limits[0] = amplitude
self._adjust_ylim()
elif amplitude > self.y_limits[1]:
self.y_limits[1] = amplitude
self._adjust_ylim()
def draw(self, _):
try:
amplitude, kwargs = self._internal_state[(self.row_index, self.col_index)]
except KeyError:
pass
else:
# set the labels
self.ax.set(**kwargs)
try:
size = np.ceil(self.num_frames / len(amplitude))
self.y_values = np.resize(
np.repeat(np.array([amplitude]), size, axis=1), [1, self.num_frames]
)
self._adjust_ylim_if_req(np.min(self.y_values))
self._adjust_ylim_if_req(np.max(self.y_values))
except TypeError:
self.y_values = shift(self.y_values, amplitude)
self._adjust_ylim_if_req(amplitude)
# update line
self.line.set_data(self.x_values, self.y_values)
return [self.line]
def _signal_process(ctx: zproc.Context, fn: Callable, normalize: bool, *args, **kwargs):
if normalize:
normalizer = Normalizer()
def _normalize(val):
return normalizer.normalize(val)
else:
def _normalize(val):
return val
state = ctx.create_state()
_internal_state = state.fork(namespace=ZPROC_INTERNAL_NAMESPACE)
def draw(amplitude, *, row=0, col=0, **kwargs):
amplitude = _normalize(amplitude)
_internal_state[(row, col)] = amplitude, kwargs
state.draw = draw
fn(state, *args, **kwargs)
class Osc:
def __init__(
self,
*,
fps: Union[float, int] = 24,
window_sec: Union[float, int] = 5,
intensity: Union[float, int] = 2.5,
normalize: bool = False,
xlabel: str = "Time (sec)",
ylabel: str = "Amplitude",
nrows: int = 1,
ncols: int = 1,
padding_percent: Union[float, int] = 0,
):
frame_interval_sec = 1 / fps
self.nrows = nrows
self.ncols = ncols
self.normalize = normalize
self.xlabel = xlabel
self.ylabel = ylabel
self.anim_scopes = {}
self.gc_protect = []
fig, axes = plt.subplots(self.nrows, self.ncols, squeeze=False)
for row_index, row_axes in enumerate(axes):
for col_index, ax in enumerate(row_axes):
scope = AnimationScope(
ax=ax,
window_sec=window_sec,
frame_interval_sec=frame_interval_sec,
row_index=row_index,
col_index=col_index,
intensity=intensity,
padding_percent=padding_percent,
)
self.gc_protect.append(
animation.FuncAnimation(
fig, scope.draw, interval=frame_interval_sec * 1000, blit=True
)
)
self.anim_scopes[(row_index, col_index)] = scope
def signal(self, fn=None, **process_kwargs):
if fn is None:
@wraps(fn)
def wrapper(fn):
return self.signal(fn, **process_kwargs)
return wrapper
process_kwargs["start"] = False
process_kwargs["args"] = (fn, self.normalize, *process_kwargs.get("args", ()))
return zproc_ctx.spawn(_signal_process, **process_kwargs)
def start(self):
zproc_ctx.start_all()
plt.show()
zproc_ctx.wait()
def stop(self):
zproc_ctx.stop_all()
plt.close()
|
<filename>emu-ex-plus-alpha/imagine/src/data-type/image/png/Quartz2d.cc
/* This file is part of Imagine.
Imagine is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Imagine is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Imagine. If not, see <http://www.gnu.org/licenses/> */
#define thisModuleName "quartzpng"
#include "Quartz2d.hh"
#include <assert.h>
#include <logger/interface.h>
#include <base/Base.hh>
#include <base/iphone/private.hh>
#include <mem/interface.h>
#include <util/pixel.h>
#include <util/strings.h>
#include <CoreGraphics/CGBitmapContext.h>
#include <CoreGraphics/CGContext.h>
uint Quartz2dImage::width()
{
return CGImageGetWidth(img);
}
uint Quartz2dImage::height()
{
return CGImageGetHeight(img);
}
bool Quartz2dImage::isGrayscale()
{
return CGImageGetBitsPerPixel(img) == 8;
}
const PixelFormatDesc *Quartz2dImage::pixelFormat()
{
if(isGrayscale())
return &PixelFormatI8;
else
return hasAlphaChannel() ? &PixelFormatRGBA8888 : &PixelFormatRGB888;
}
CallResult Quartz2dImage::load(const char *name)
{
freeImageData();
CGDataProviderRef dataProvider = CGDataProviderCreateWithFilename(name);
if(!dataProvider)
{
logErr("error creating opening file: %s", name);
return INVALID_PARAMETER;
}
img = CGImageCreateWithPNGDataProvider(dataProvider, nullptr, 0, kCGRenderingIntentDefault);
CGDataProviderRelease(dataProvider);
if(!img)
{
logErr("error creating CGImage from file: %s", name);
return INVALID_PARAMETER;
}
return OK;
}
bool Quartz2dImage::hasAlphaChannel()
{
auto info = CGImageGetAlphaInfo(img);
return info == kCGImageAlphaPremultipliedLast || info == kCGImageAlphaPremultipliedFirst
|| info == kCGImageAlphaLast || info == kCGImageAlphaFirst;
}
CallResult Quartz2dImage::readImage(void* buffer, uint pitch, const PixelFormatDesc &outFormat)
{
int height = this->height();
int width = this->width();
auto colorSpace = isGrayscale() ? Base::grayColorSpace : Base::rgbColorSpace;
auto bitmapInfo = hasAlphaChannel() ? kCGImageAlphaPremultipliedLast : kCGImageAlphaNone;
auto context = CGBitmapContextCreate(buffer, width, height, 8, pitch, colorSpace, bitmapInfo);
CGContextDrawImage(context, CGRectMake(0.0, 0.0, (CGFloat)width, (CGFloat)height), img);
CGContextRelease(context);
return OK;
}
void Quartz2dImage::freeImageData()
{
if(img)
{
CGImageRelease(img);
img = nullptr;
}
}
CallResult PngFile::getImage(Pixmap &dest)
{
return(png.readImage(dest.data, dest.pitch, dest.format));
}
CallResult PngFile::load(const char *name)
{
deinit();
return png.load(name);
}
CallResult PngFile::loadAsset(const char *name)
{
FsSys::cPath fullPath;
string_printf(fullPath, "%s/%s", Base::appPath, name);
return load(fullPath);
}
void PngFile::deinit()
{
png.freeImageData();
}
|
With $1 million of public money, a private firm is installing 100 surveillance cameras on lampposts around Brooklyn's Orthodox Jewish neighborhoods.
The move, which has prompted privacy concerns from residents, came following the brutal abduction and murder last year of 8-year-old Jewish boy, Leiby Kletzky.
Assemblyman Dov Hikind (D-Brooklyn) and state Sen. Dean Skelos (R-Nassau) announced the Leiby Kletzky Security Initiative — with a $1 million state grant to Agudath Israel, a nonprofit group that hired the private firm SecureWatch24 to operate the network.
The surveillance cameras were necessary, officials said, so residents would feel safe in the belief that such a horrible crime would never happen again.
It is understandable that these Brooklyn communities want additional security — but the arrangement sets a terrible precedent.
... Tragic events are often invitations to bad policy judgments. And that is what’s happening here.
First, there is no conclusive research establishing that cameras deter crime (though they do make it easier to solve some crimes after the fact).
Yet untold numbers of cameras have gone up around the city in the years since the 9/11 attacks in the belief they will do exactly that.
Meantime, the cameras catch New Yorkers, at all hours of the day, every day of the year, living their lives — doing everything from walking the dog to visiting a psychiatrist, going into a gay bar or enjoying a romantic interlude. These are all perfectly legal activities, yet we have no idea what becomes of the captured images. |
Immunochemical characterization with monoclonal antibodies of three major caseins and alpha-lactalbumin from rat milk. Rat milk contains at least three major caseins with apparent molecular weights of 41,000 (alpha-casein), 25,000 (beta-casein), and 22,000 (gamma-casein) (estimated in 10% sodium dodecyl sulfate-polyacrylamide gels). These three caseins and alpha-lactalbumin, a major whey protein, were purified from rat milk. The purified caseins and alpha-lactalbumin were used to immunize BALB/c mice, and spleen cells from these mice were hybridized with cells of the mouse myeloma SP-2/0 cell-line. We have isolated a small library of hybridoma cell-lines secreting monoclonal antibodies specific for each of the major caseins and alpha-lactalbumin from rat milk. Antibodies were tested for immunoreactivity with each of the purified milk proteins and with total rat milk proteins separated by sodium dodecyl sulfate-polyacrylamide gel electrophoresis. Some heterogeneity in apparent molecular weight was observed for purified alpha-casein, gamma-casein, and alpha-lactalbumin. Monoclonal antibodies against alpha-casein, gamma-casein, and alpha-lactalbumin recognized all of the molecular weight forms of the antigen for which they were specific. Each monoclonal antibody was specific for one of the caseins or alpha-lactalbumin and did not react with the other caseins or alpha-lactalbumin, suggesting that there is limited structural homology among these proteins. All of the monoclonal antibodies against the rat caseins reacted with components of mouse milk, and the monoclonal antibodies against rat gamma-casein reacted with a component of human milk of apparent molecular weight 27,000. No interspecies reactivity was observed with the antibodies against rat alpha-lactalbumin. These monoclonal antibodies are being used to develop sensitive assays for each of these major rat milk proteins. |
/**
* Reusable geo-spatial projection utility methods.
*
* @lucene.experimental
*/
public class GeoProjectionUtils {
// WGS84 earth-ellipsoid parameters
/** major (a) axis in meters */
public static final double SEMIMAJOR_AXIS = 6_378_137; // [m]
/** earth flattening factor (f) */
public static final double FLATTENING = 1.0/298.257223563;
/** minor (b) axis in meters */
public static final double SEMIMINOR_AXIS = SEMIMAJOR_AXIS * (1.0 - FLATTENING); //6_356_752.31420; // [m]
/** first eccentricity (e) */
public static final double ECCENTRICITY = sqrt((2.0 - FLATTENING) * FLATTENING);
/** major axis squared (a2) */
public static final double SEMIMAJOR_AXIS2 = SEMIMAJOR_AXIS * SEMIMAJOR_AXIS;
/** minor axis squared (b2) */
public static final double SEMIMINOR_AXIS2 = SEMIMINOR_AXIS * SEMIMINOR_AXIS;
private static final double E2 = (SEMIMAJOR_AXIS2 - SEMIMINOR_AXIS2)/(SEMIMAJOR_AXIS2);
private static final double EP2 = (SEMIMAJOR_AXIS2 - SEMIMINOR_AXIS2)/(SEMIMINOR_AXIS2);
/** min longitude value in radians */
public static final double MIN_LON_RADIANS = TO_RADIANS * MIN_LON_INCL;
/** min latitude value in radians */
public static final double MIN_LAT_RADIANS = TO_RADIANS * MIN_LAT_INCL;
/** max longitude value in radians */
public static final double MAX_LON_RADIANS = TO_RADIANS * MAX_LON_INCL;
/** max latitude value in radians */
public static final double MAX_LAT_RADIANS = TO_RADIANS * MAX_LAT_INCL;
// No instance:
private GeoProjectionUtils() {
}
/**
* Converts from geocentric earth-centered earth-fixed to geodesic lat/lon/alt
* @param x Cartesian x coordinate
* @param y Cartesian y coordinate
* @param z Cartesian z coordinate
* @param lla 0: longitude 1: latitude: 2: altitude
* @return double array as 0: longitude 1: latitude 2: altitude
*/
public static final double[] ecfToLLA(final double x, final double y, final double z, double[] lla) {
boolean atPole = false;
final double ad_c = 1.0026000D;
final double cos67P5 = 0.38268343236508977D;
if (lla == null) {
lla = new double[3];
}
if (x != 0.0) {
lla[0] = StrictMath.atan2(y,x);
} else {
if (y > 0) {
lla[0] = PIO2;
} else if (y < 0) {
lla[0] = -PIO2;
} else {
atPole = true;
lla[0] = 0.0D;
if (z > 0.0) {
lla[1] = PIO2;
} else if (z < 0.0) {
lla[1] = -PIO2;
} else {
lla[1] = PIO2;
lla[2] = -SEMIMINOR_AXIS;
return lla;
}
}
}
final double w2 = x*x + y*y;
final double w = StrictMath.sqrt(w2);
final double t0 = z * ad_c;
final double s0 = StrictMath.sqrt(t0 * t0 + w2);
final double sinB0 = t0 / s0;
final double cosB0 = w / s0;
final double sin3B0 = sinB0 * sinB0 * sinB0;
final double t1 = z + SEMIMINOR_AXIS * EP2 * sin3B0;
final double sum = w - SEMIMAJOR_AXIS * E2 * cosB0 * cosB0 * cosB0;
final double s1 = StrictMath.sqrt(t1 * t1 + sum * sum);
final double sinP1 = t1 / s1;
final double cosP1 = sum / s1;
final double rn = SEMIMAJOR_AXIS / StrictMath.sqrt(1.0D - E2 * sinP1 * sinP1);
if (cosP1 >= cos67P5) {
lla[2] = w / cosP1 - rn;
} else if (cosP1 <= -cos67P5) {
lla[2] = w / -cosP1 - rn;
} else {
lla[2] = z / sinP1 + rn * (E2 - 1.0);
}
if (!atPole) {
lla[1] = StrictMath.atan(sinP1/cosP1);
}
lla[0] = TO_DEGREES * lla[0];
lla[1] = TO_DEGREES * lla[1];
return lla;
}
/**
* Converts from geodesic lon lat alt to geocentric earth-centered earth-fixed
* @param lon geodesic longitude
* @param lat geodesic latitude
* @param alt geodesic altitude
* @param ecf reusable earth-centered earth-fixed result
* @return either a new ecef array or the reusable ecf parameter
*/
public static final double[] llaToECF(double lon, double lat, double alt, double[] ecf) {
lon = TO_RADIANS * lon;
lat = TO_RADIANS * lat;
final double sl = sin(lat);
final double s2 = sl*sl;
final double cl = cos(lat);
if (ecf == null) {
ecf = new double[3];
}
if (lat < -PIO2 && lat > -1.001D * PIO2) {
lat = -PIO2;
} else if (lat > PIO2 && lat < 1.001D * PIO2) {
lat = PIO2;
}
assert (lat >= -PIO2) || (lat <= PIO2);
if (lon > StrictMath.PI) {
lon -= (2*StrictMath.PI);
}
final double rn = SEMIMAJOR_AXIS / StrictMath.sqrt(1.0D - E2 * s2);
ecf[0] = (rn+alt) * cl * cos(lon);
ecf[1] = (rn+alt) * cl * sin(lon);
ecf[2] = ((rn*(1.0-E2))+alt)*sl;
return ecf;
}
/**
* Converts from lat lon alt (in degrees) to East North Up right-hand coordinate system
* @param lon longitude in degrees
* @param lat latitude in degrees
* @param alt altitude in meters
* @param centerLon reference point longitude in degrees
* @param centerLat reference point latitude in degrees
* @param centerAlt reference point altitude in meters
* @param enu result east, north, up coordinate
* @return east, north, up coordinate
*/
public static double[] llaToENU(final double lon, final double lat, final double alt, double centerLon,
double centerLat, final double centerAlt, double[] enu) {
if (enu == null) {
enu = new double[3];
}
// convert point to ecf coordinates
final double[] ecf = llaToECF(lon, lat, alt, null);
// convert from ecf to enu
return ecfToENU(ecf[0], ecf[1], ecf[2], centerLon, centerLat, centerAlt, enu);
}
/**
* Converts from East North Up right-hand rule to lat lon alt in degrees
* @param x easting (in meters)
* @param y northing (in meters)
* @param z up (in meters)
* @param centerLon reference point longitude (in degrees)
* @param centerLat reference point latitude (in degrees)
* @param centerAlt reference point altitude (in meters)
* @param lla resulting lat, lon, alt point (in degrees)
* @return lat, lon, alt point (in degrees)
*/
public static double[] enuToLLA(final double x, final double y, final double z, final double centerLon,
final double centerLat, final double centerAlt, double[] lla) {
// convert enuToECF
if (lla == null) {
lla = new double[3];
}
// convert enuToECF, storing intermediate result in lla
lla = enuToECF(x, y, z, centerLon, centerLat, centerAlt, lla);
// convert ecf to LLA
return ecfToLLA(lla[0], lla[1], lla[2], lla);
}
/**
* Convert from Earth-Centered-Fixed to Easting, Northing, Up Right Hand System
* @param x ECF X coordinate (in meters)
* @param y ECF Y coordinate (in meters)
* @param z ECF Z coordinate (in meters)
* @param centerLon ENU origin longitude (in degrees)
* @param centerLat ENU origin latitude (in degrees)
* @param centerAlt ENU altitude (in meters)
* @param enu reusable enu result
* @return Easting, Northing, Up coordinate
*/
public static double[] ecfToENU(double x, double y, double z, final double centerLon,
final double centerLat, final double centerAlt, double[] enu) {
if (enu == null) {
enu = new double[3];
}
// create rotation matrix and rotate to enu orientation
final double[][] phi = createPhiTransform(centerLon, centerLat, null);
// convert origin to ENU
final double[] originECF = llaToECF(centerLon, centerLat, centerAlt, null);
final double[] originENU = new double[3];
originENU[0] = ((phi[0][0] * originECF[0]) + (phi[0][1] * originECF[1]) + (phi[0][2] * originECF[2]));
originENU[1] = ((phi[1][0] * originECF[0]) + (phi[1][1] * originECF[1]) + (phi[1][2] * originECF[2]));
originENU[2] = ((phi[2][0] * originECF[0]) + (phi[2][1] * originECF[1]) + (phi[2][2] * originECF[2]));
// rotate then translate
enu[0] = ((phi[0][0] * x) + (phi[0][1] * y) + (phi[0][2] * z)) - originENU[0];
enu[1] = ((phi[1][0] * x) + (phi[1][1] * y) + (phi[1][2] * z)) - originENU[1];
enu[2] = ((phi[2][0] * x) + (phi[2][1] * y) + (phi[2][2] * z)) - originENU[2];
return enu;
}
/**
* Convert from Easting, Northing, Up Right-Handed system to Earth Centered Fixed system
* @param x ENU x coordinate (in meters)
* @param y ENU y coordinate (in meters)
* @param z ENU z coordinate (in meters)
* @param centerLon ENU origin longitude (in degrees)
* @param centerLat ENU origin latitude (in degrees)
* @param centerAlt ENU origin altitude (in meters)
* @param ecf reusable ecf result
* @return ecf result coordinate
*/
public static double[] enuToECF(final double x, final double y, final double z, double centerLon,
double centerLat, final double centerAlt, double[] ecf) {
if (ecf == null) {
ecf = new double[3];
}
double[][] phi = createTransposedPhiTransform(centerLon, centerLat, null);
double[] ecfOrigin = llaToECF(centerLon, centerLat, centerAlt, null);
// rotate and translate
ecf[0] = (phi[0][0]*x + phi[0][1]*y + phi[0][2]*z) + ecfOrigin[0];
ecf[1] = (phi[1][0]*x + phi[1][1]*y + phi[1][2]*z) + ecfOrigin[1];
ecf[2] = (phi[2][0]*x + phi[2][1]*y + phi[2][2]*z) + ecfOrigin[2];
return ecf;
}
/**
* Create the rotation matrix for converting Earth Centered Fixed to Easting Northing Up
* @param originLon ENU origin longitude (in degrees)
* @param originLat ENU origin latitude (in degrees)
* @param phiMatrix reusable phi matrix result
* @return phi rotation matrix
*/
private static double[][] createPhiTransform(double originLon, double originLat, double[][] phiMatrix) {
if (phiMatrix == null) {
phiMatrix = new double[3][3];
}
originLon = TO_RADIANS * originLon;
originLat = TO_RADIANS * originLat;
final double sLon = sin(originLon);
final double cLon = cos(originLon);
final double sLat = sin(originLat);
final double cLat = cos(originLat);
phiMatrix[0][0] = -sLon;
phiMatrix[0][1] = cLon;
phiMatrix[0][2] = 0.0D;
phiMatrix[1][0] = -sLat * cLon;
phiMatrix[1][1] = -sLat * sLon;
phiMatrix[1][2] = cLat;
phiMatrix[2][0] = cLat * cLon;
phiMatrix[2][1] = cLat * sLon;
phiMatrix[2][2] = sLat;
return phiMatrix;
}
/**
* Create the transposed rotation matrix for converting Easting Northing Up coordinates to Earth Centered Fixed
* @param originLon ENU origin longitude (in degrees)
* @param originLat ENU origin latitude (in degrees)
* @param phiMatrix reusable phi rotation matrix result
* @return transposed phi rotation matrix
*/
private static double[][] createTransposedPhiTransform(double originLon, double originLat, double[][] phiMatrix) {
if (phiMatrix == null) {
phiMatrix = new double[3][3];
}
originLon = TO_RADIANS * originLon;
originLat = TO_RADIANS * originLat;
final double sLat = sin(originLat);
final double cLat = cos(originLat);
final double sLon = sin(originLon);
final double cLon = cos(originLon);
phiMatrix[0][0] = -sLon;
phiMatrix[1][0] = cLon;
phiMatrix[2][0] = 0.0D;
phiMatrix[0][1] = -sLat * cLon;
phiMatrix[1][1] = -sLat * sLon;
phiMatrix[2][1] = cLat;
phiMatrix[0][2] = cLat * cLon;
phiMatrix[1][2] = cLat * sLon;
phiMatrix[2][2] = sLat;
return phiMatrix;
}
/**
* Finds a point along a bearing from a given lon,lat geolocation using vincenty's distance formula
*
* @param lon origin longitude in degrees
* @param lat origin latitude in degrees
* @param bearing azimuthal bearing in degrees
* @param dist distance in meters
* @param pt resulting point
* @return the point along a bearing at a given distance in meters
*/
public static final double[] pointFromLonLatBearingVincenty(double lon, double lat, double bearing, double dist, double[] pt) {
if (pt == null) {
pt = new double[2];
}
final double alpha1 = TO_RADIANS * bearing;
final double cosA1 = cos(alpha1);
final double sinA1 = sin(alpha1);
final double tanU1 = (1-FLATTENING) * tan(TO_RADIANS * lat);
final double cosU1 = 1 / StrictMath.sqrt((1+tanU1*tanU1));
final double sinU1 = tanU1*cosU1;
final double sig1 = StrictMath.atan2(tanU1, cosA1);
final double sinAlpha = cosU1 * sinA1;
final double cosSqAlpha = 1 - sinAlpha*sinAlpha;
final double uSq = cosSqAlpha * EP2;
final double A = 1 + uSq/16384D*(4096D + uSq * (-768D + uSq * (320D - 175D*uSq)));
final double B = uSq/1024D * (256D + uSq * (-128D + uSq * (74D - 47D * uSq)));
double sigma = dist / (SEMIMINOR_AXIS*A);
double sigmaP;
double sinSigma, cosSigma, cos2SigmaM, deltaSigma;
do {
cos2SigmaM = cos(2*sig1 + sigma);
sinSigma = sin(sigma);
cosSigma = cos(sigma);
deltaSigma = B * sinSigma * (cos2SigmaM + (B/4D) * (cosSigma*(-1+2*cos2SigmaM*cos2SigmaM)-
(B/6) * cos2SigmaM*(-3+4*sinSigma*sinSigma)*(-3+4*cos2SigmaM*cos2SigmaM)));
sigmaP = sigma;
sigma = dist / (SEMIMINOR_AXIS*A) + deltaSigma;
} while (StrictMath.abs(sigma-sigmaP) > 1E-12);
final double tmp = sinU1*sinSigma - cosU1*cosSigma*cosA1;
final double lat2 = StrictMath.atan2(sinU1*cosSigma + cosU1*sinSigma*cosA1,
(1-FLATTENING) * StrictMath.sqrt(sinAlpha*sinAlpha + tmp*tmp));
final double lambda = StrictMath.atan2(sinSigma*sinA1, cosU1*cosSigma - sinU1*sinSigma*cosA1);
final double c = FLATTENING/16 * cosSqAlpha * (4 + FLATTENING * (4 - 3 * cosSqAlpha));
final double lam = lambda - (1-c) * FLATTENING * sinAlpha *
(sigma + c * sinSigma * (cos2SigmaM + c * cosSigma * (-1 + 2* cos2SigmaM*cos2SigmaM)));
pt[0] = normalizeLon(lon + TO_DEGREES * lam);
pt[1] = normalizeLat(TO_DEGREES * lat2);
return pt;
}
/**
* Finds a point along a bearing from a given lon,lat geolocation using great circle arc
*
* @param lon origin longitude in degrees
* @param lat origin latitude in degrees
* @param bearing azimuthal bearing in degrees
* @param dist distance in meters
* @param pt resulting point
* @return the point along a bearing at a given distance in meters
*/
public static final double[] pointFromLonLatBearingGreatCircle(double lon, double lat, double bearing, double dist, double[] pt) {
if (pt == null) {
pt = new double[2];
}
lon *= TO_RADIANS;
lat *= TO_RADIANS;
bearing *= TO_RADIANS;
final double cLat = cos(lat);
final double sLat = sin(lat);
final double sinDoR = sin(dist / GeoProjectionUtils.SEMIMAJOR_AXIS);
final double cosDoR = cos(dist / GeoProjectionUtils.SEMIMAJOR_AXIS);
pt[1] = asin(sLat*cosDoR + cLat * sinDoR * cos(bearing));
pt[0] = TO_DEGREES * (lon + Math.atan2(sin(bearing) * sinDoR * cLat, cosDoR - sLat * sin(pt[1])));
pt[1] *= TO_DEGREES;
return pt;
}
/**
* Finds the bearing (in degrees) between 2 geo points (lon, lat) using great circle arc
* @param lon1 first point longitude in degrees
* @param lat1 first point latitude in degrees
* @param lon2 second point longitude in degrees
* @param lat2 second point latitude in degrees
* @return the bearing (in degrees) between the two provided points
*/
public static double bearingGreatCircle(double lon1, double lat1, double lon2, double lat2) {
double dLon = (lon2 - lon1) * TO_RADIANS;
lat2 *= TO_RADIANS;
lat1 *= TO_RADIANS;
double y = sin(dLon) * cos(lat2);
double x = cos(lat1) * sin(lat2) - sin(lat1) * cos(lat2) * cos(dLon);
return Math.atan2(y, x) * TO_DEGREES;
}
} |
Resolution of coherent radar targets in the near field of an array antenna In this paper we present the results of the research on radar target elevation estimation in the near field of the vertical line array antenna. In particular, we examine the case of targets at low elevations where coherent reflections from ground prevent accurate measurements of target angular positions. The estimation method used is a modified version of the MUSIC algorithm, adapted for the application in the near field antenna environment. A spatial smoothing preprocessing scheme has been applied in order to resolve close coherent signals. The algorithm has been successfully applied go both simulated and recorded radar data. Controlled Ku-band radar experiments were conducted at the Valley Forge Research Center of the University of Pennsylvania. Estimation of actual target elevations to within 1 standard beamwidth has been achieved for SNR's of about 0 db. |
<reponame>Nalhin/Leetcode<filename>src/main/java/com/leetcode/strings/easy/ReverseWordsInAStringIII_557.java<gh_stars>1-10
package com.leetcode.strings.easy;
// Given a string, you need to reverse the order of characters in each word withi
// n a sentence while still preserving whitespace and initial word order.
//
// Example 1:
//
// Input: "Let's take LeetCode contest"
// Output: "s'teL ekat edoCteeL tsetnoc"
//
//
//
// Note:
// In the string, each word is separated by single space and there will not be an
// y extra space in the string.
// Related Topics String
// 👍 1169 👎 94
// leetcode submit region begin(Prohibit modification and deletion)
public class ReverseWordsInAStringIII_557 {
public String reverseWords(String s) {
char[] word = s.toCharArray();
int slow = 0;
for (int fast = 0; fast < word.length; fast++) {
if (word[fast] == ' ') {
reverseWord(word, slow, fast - 1);
slow = fast + 1;
}
}
reverseWord(word, slow, word.length - 1);
return new String(word);
}
private void reverseWord(char[] str, int first, int last) {
while (first < last) {
char temp = str[last];
str[last] = str[first];
str[first] = temp;
first++;
last--;
}
}
}
/*
O(n) Runtime: 6 ms, faster than 58.86% of Java online submissions for Reverse Words in a String III.
O(1) Memory Usage: 45.9 MB, less than 21.32% of Java online submissions for Reverse Words in a String III.
*/
// leetcode submit region end(Prohibit modification and deletion)
|
It might be helpful to look at the question of working-class and mass organizing through the lens of the centennial of the Bolshevik Revolution and the anniversary of the assassination of Che Guevara.
In the case of the Bolshevik Revolution, there is hardly a way of discussing it without referring to Lenin’s leadership.
I think a large number of the comrades and friends are familiar with Lenin’s theoretical contributions on imperialism and the national question even if they haven’t had a chance to study them in detail.
What has to be understood is that the theoretical works produced by Lenin were written in the heat of battle, even in exile. There was nothing professorial about them. Lenin wrote as a means to intervene in the struggle, regardless of its stage or development. He was without a doubt a giant theoretician.
On the other hand, there is less discussion of Lenin’s role as the organizer of the Bolshevik party. Everyone knows he was its proponent and architect — but he was also its organizer — rolling up his sleeves, working in the trenches, encouraging and building cadre and, when needed, even arguing with his own party.
Lenin paid immense attention to, and developed an unparalleled understanding of, the working class and peasants of Czarist Russia.
This could not have happened if the Bolsheviks were not deeply engaged in the struggles of the masses and had not recruited workers into the ranks of their party.
Krupskaya, his companion, who was a revolutionary and leader in her own right, wrote about how feverishly Lenin, even in exile, followed every detail of what was happening in the working-class struggle. It literally made him sick if he was unable to get the needed news and feedback from those who were on the front lines inside of Russia.
All of this preparation preceded and prepared the ground for the great October Revolution. Without this work, without the building of the Party, a great revolutionary development might have been missed or delayed.
What were the rallying call and demands of the Bolshevik Revolution?
The Russian bourgeoisie of 1905 had neither the will nor the ability as defenders of private property to fulfill these rather straightforward demands.
Poor people and the working class were hungry and starving, the peasants despised the landlords, and the soldiers who came from both the working class and the peasants, who were used as cannon fodder in the imperialist war, were weary, exhausted and ready to turn their guns around.
The workers in the cities wanted power!
Revolutions, uprisings and insurrections are perhaps the most democratic expression of the will of the mass of people. As Marxists, we recognize this.
Perhaps most are familiar with Che Guevara’s poignant story of how he and his troops were forced to kill their beloved puppy to keep the enemy from detecting them.
But there is another story that we should familiarize ourselves with. This is how Comandante Che insisted not only on teaching political theory to the peasants and workers in the mountains of Cuba, but also on requiring literacy as a prerequisite for participating in the guerilla army. He had the confidence in the poorest of the poor in Cuba.
This story shines light on the Che quote referenced many times, his statement that revolutionaries are guided by great feelings of love.
I’m positive that the love Che refers to is not the ridiculous Hallmark-card version of love, but rather the deeper sentiment of concern, service and care for the people which involves action and sacrifice — the kind of action that Che took in teaching peasants to read and write.
They came with the prejudices that are the stamp of the old society.
Love is not always about not being angry or not struggling; it is about persevering, hanging in there and not giving up! In this case, love means not giving up on the broad working class, regardless of all of its characteristics. More importantly, love means not giving up on each other.
Our party has an amazing history guided by our founding leaders: Sam Marcy, Vince Copeland, Dorothy Ballan and others. Sam continuously insisted that the party turn its face to the workers and that it had to particularly recruit and defend the oppressed communities.
The All Peoples Congress, which was convened in Detroit on October 18, 1981, in response to the crisis of Reaganism, was the conception of Marcy and was characteristic of many of Sam’s tactics and strategies.
The APC was different from other mass organizations, which may in a sense remain static, whether doing good work or not. The All Peoples Congress was different in that it was developed to enable the working class and the poor to intervene in the particular crisis that existed during that period.
The APC was, in essence, conceived to become an instrument of power for the workers who were beginning to suffer an avalanche of attacks following the Reagan election. The 2,000 plus delegates who convened this congress called for “Days of Resistance” and called for assemblies everywhere. In a very modest way it borrowed its conception from the early Soviets.
I want to end with two urgent appeals: One is immediate, a continuation of the political discussion at our recent plenum on the crisis of the Trump regime and the Democratic Party and the dangers for the working class.
It’s a given that if an imperialist war breaks out against the Democratic People’s Republic of Korea or anywhere else in the world — that all of our attention would have to be immediately refocused.
But here’s the danger: If the bourgeois state takes Trump out and the mass of people both young and old are left out of the equation — it will benefit the far right and the fascists.
We cannot allow the fight against Trump to be co-opted by the Democratic Party. Instead of the sickening, mind-numbing attacks on Russia which have done nothing but sideline the masses, the fight instead must be against white supremacy; against the wave of anti-immigrant attacks; about health care; about women, LGBTQ and trans rights; and about the general misery and state of affairs for the working class. The fight should be to turn the lights back on in Puerto Rico!
The second appeal: There is a capitalist economic crisis that is brewing that will most undoubtedly be deeper than the crisis in 2007. We need to prepare ourselves!
The worst thing I believe we could do is look at the current state of the broad working-class movement and not step back to see how temporary the present situation is. We must understand how quickly and deeply things could turn in a direction that is favorable for revolutionary socialists.
We need to redouble our efforts to recruit workers and the oppressed, and at the same time participate in each and every struggle that we are capable of so that we can learn, prepare and look for every avenue possible to intervene — whether we succeed or make mistakes — to direct the working class against capitalism and towards the fight for revolutionary socialism. We must take a page from the Bolsheviks on this!
In turning ourselves toward these tasks, toward building our Party and toward the working class, which is now global in character, we have to exhibit the same ethical practice and optimistic confidence that both Comandante Che and Fidel had for the people.
Build Workers World Party! Fight for Socialism!
Workers and oppressed of the world unite! |
Molecular phenotypes associated with antipsychotic drugs in the human caudate nucleus Antipsychotic drugs are the current first-line of treatment for schizophrenia and other psychotic conditions. However, their molecular effects on the human brain are poorly studied, due to difficulty of tissue access and confounders associated with disease status. Here we examine differences in gene expression and DNA methylation associated with positive antipsychotic drug toxicology status in the human caudate nucleus. We find no genome-wide significant differences in DNA methylation, but abundant differences in gene expression. These gene expression differences are overall quite similar to gene expression differences between schizophrenia cases and controls. Interestingly, gene expression differences based on antipsychotic toxicology are different between brain regions, potentially due to affected cell type differences. We finally assess similarities with effects in a mouse model, which finds some overlapping effects but many differences as well. As a first look at the molecular effects of antipsychotics in the human brain, the lack of epigenetic effects is unexpected, possibly because long term treatment effects may be relatively stable for extended periods. Introduction Schizophrenia is a serious mental illness which is characterized by psychosis as well as other symptoms that disrupt cognitive and social functioning. Antipsychotic drugs are a common first line treatment for schizophrenia and many other psychotic conditions. Their mechanism of action has been linked primarily with antagonism of dopamine type II receptors 1 but other neurotransmitter receptors are involved in the actions of several of these agents. It is noteworthy, however, that these drugs are imperfect and also known to cause a wide array of neurologic and metabolic side effects, which have driven a mission to create more effective drugs with fewer off-target side effects 2. A challenge in the pursuit of better antipsychotic treatment has been an overall poor understanding of the molecular underpinnings of the disease. While examining the effects of antipsychotics may not elucidate some of the causative mechanisms of the illness, it might identify mechanisms that are critical for effective treatment, and can further help partition and interpret casecontrol associations in the context of potentially causal versus consequential effects. Two important molecular substrates for potentially capturing cellular effects of various pharmacological interventions are gene expression and DNA methylation (DNAm). DNAm is an epigenetic regulator of gene expression. It occurs most commonly at CpG dinucleotides, but in neurons also uniquely occurs at CpH sites (H = A, T, or C). It is thought to be a reflection of the interaction between genes and environment, as various environmental factors including diet 3 and cigarette smoking 4 have been associated with altered methylation patterns at specific sites in the genome. Thus, DNA methylation analysis has the potential to impart the effects of drugs such as antipsychotics on the epigenome. Previous studies have aimed to uncover the molecular footprint of antipsychotic effects in a variety of ways. In humans, most studies examining molecular effects of antipsychotics have been performed in peripheral tissues such as blood and non-CNS tissues like adipose 8. These studies have provided some insight into how antipsychotics may alter DNAm and gene expression levels, but it is unclear to what extent these effects are present in brain. Additionally, these DNAm studies used microarray technology, which only captures a small fraction of CpG dinucleotides in the genome and does not target CpH sites. Studies that have been performed in human postmortem brain tissue of individuals with schizophrenia, most of whom had received antipsychotics, have identified many molecular associations with genetic risk 9 10, but have identified relatively few direct case-control differences in gene expression. Minimal differences in DNAm levels have been reported between cases and controls 11. Further, none of the prior studies of gene expression or DNAm in postmortem human brain tissue from donors with schizophrenia have investigated specifically the effects of antipsychotic use. The previous literature contains many studies that examined antipsychotic-induced differences in brains of model organisms such as mice 12, rats 13 and rhesus monkey 14, but how well these findings translate to the human brain and clinical treatment is unclear. The caudate nucleus is especially highly involved in dopaminergic signaling, with involvement in motor, learning, and reward processes. PET studies of dopamine (DA) activity and DRD2 availability in patients with schizophrenia have highlighted the caudate nucleus as the site of primary DA relevance to illness and treatment 15. While many postmortem human brain studies of gene expression in schizophrenia have examined the prefrontal cortex, a region prominently linked with the so-called negative and cognitive features of schizophrenia, its role in the treatment effects of DRD2 antagonists is uncertain and DA receptors in prefrontal cortex are at least one order of magnitude less abundant than in caudate 16. A recent study of gene expression and schizophrenia genetic risk in caudate has identified far more differential expression by disease status than other brain regions (like hippocampus and frontal cortex), highlighting the need for further investigation of this region 17. Here we examine associations of gene expression and DNA methylation levels in the human postmortem caudate nucleus to antipsychotic treatment to better characterize their molecular landscapes. Gene expression associations to antipsychotics in the human caudate nucleus In order to assess the molecular consequences of antipsychotic use in the human brain, we first analyzed RNA-seq data from the caudate nucleus of 380 postmortem brains, including samples from 147 patients diagnosed with schizophrenia and 233 adult neurotypical controls; (see Methods) 17. We used postmortem toxicology assays measured at time of death to classify the 147 patients into 100 who were antipsychotic positive at time of death (noted as SCZDAP), and 47 who were antipsychotic negative (noted as SCZD). Antipsychotic toxicology testing was chosen for analysis (as opposed to reported use via next-of-kin or medical records) to ensure that donors were indeed compliant with their antipsychotic prescriptions at time of death. However, interviews with family members and review of past medical records indicated that almost all patients had used antipsychotics at some point in life. Demographics were largely consistent among these three groups (Table 1). We performed a series of regression analyses to refine the relationship between antipsychotic status, schizophrenia diagnosis and gene expression, including defining schizophrenia effects ignoring APs (147 schizophrenia cases versus 233 controls), AP effects contrasting patient groups (100 SCZDAP versus 47 SCZD) and AP effects independent of diagnosis (100 SCZDAP versus 280 SCZD + Control). All three analyses were performed on the same set of 380 samples. For a frame of reference, we first performed linear modeling to identify genes which were differentially expressed based on schizophrenia diagnosis. We identified 3131 significantly differential genes at FDR < 0.05 (Table S1). Our findings are in line with previous similar analysis by Benjamin et al. 17 Next, we performed linear modeling to determine which genes were differentially expressed based on antipsychotic status while adjusting for clinical and technical confounders (see Methods). When assessing the differences between patient groups -SCZDAP and SCZD, we identified 70 genes that were significantly differentially expressed at FDR < 0.05 (Table S2, Figure 1A). Effect sizes were generally small, with a mean gene expression difference of 1.25% (log2 fold change = 0.32). Because this sample size is relatively small and most SCZ patients are on antipsychotics, we expanded our analysis to include neurotypical controls. In this analysis, we compared SCZDAP patients to all samples that were not on antipsychotics -SCZD and controls. We identified 2347 genes differentially expressed between these groups (Table S3, Figure 1B). While the number of significant genes is very different from the analysis within cases (due in part to sample size), 94% of these genes are directionally consistent in the previous case only analysis (Table S4). We also compare these results to differences between controls and patients on antipsychotics, excluding patients not on antipsychotics. The results are again highly similar (Table S4), but the latter analysis identifies more significant genes despite a smaller sample size, likely because patient samples share some illness-associated DEGs, indicating that our design helps to control for diagnosis-and epiphenomena-driven differences. For the remainder of the results, the SCZDAP vs. SCZD+Control gene set will be examined for antipsychotic differential expression. This gene set was enriched for many gene ontology (GO) terms, with the enrichment for terms related to synaptic signaling, development and neurogenesis, processes which have been previously implicated in the pathology of schizophrenia ( Figure 1D, Table S5). Generally, the effects on differential expression by antipsychotics and disease status are very similar and highly correlated ( Figure 1C), indicating that antipsychotic use may be driving broader differences that have been identified in prior studies between cases and controls. However, 26% of these genes were not differential between cases and controls, providing evidence that some of these differences may be specifically driven by antipsychotic use at least around the time of death. As with previous findings in case-control differences, effect sizes were subtle, with a mean gene expression difference of 1.12% (log2 fold change = 0.16). 57 genes had a greater difference (log2FC > 0.5, % change = 1.41%). Another factor in assessing effects of antipsychotics is consideration of the differences between generations of the drugs -older "typical" antipsychotics (such as haloperidol and chlorpromazine) and newer "atypical" antipsychotics (including clozapine, olanzapine, and risperidone). Typicals are generally more selective as specific DRD2 antagonists, while atypicals target other receptor systems, particularly 5HT2. We performed several analyses to tease apart the effect differences of typicals and atypicals, but generally we found little to no significant difference between the groups. These analyses were complicated by the fact that some patients were on both generations of antipsychotics at once and that the subgroupings became increasingly underpowered. Lack of DNAm associations to antipsychotic use DNA methylation (DNAm) is thought to be a molecular representation of environmental exposures, as well as an effector of gene expression, so we investigated antipsychotic effects on DNAm by examining whole genome bisulfite sequencing (WGBS) data from 296 postmortem caudate nuclei (121 cases, 175 controls). In this sample, 86 schizophrenia patients were antipsychotic positive at time of death as determined by toxicology assays, leaving 35 patients who were not. The vast majority of these samples (291, 98.3%) were also included in the previous RNA-seq analyses, and both DNA and RNA were concurrently extracted from the same tissue aliquot. With WGBS, we modeled differences across 27,812,354 CpG sites and 50,336,332 CpH sites (see Methods). In these analyses, comparing differences within schizophrenia patients as well as including controls, we found no significant differences after adjusting for multiple testing, and overall, p-values were depleted for low values. Further, there was no significant difference in DNAm between the different generations of antipsychotics, and there were no significant differences in DNAm by diagnosis. This indicates that the observed differences in gene expression were not associated with differential DNAm at the time of death, and challenges the idea that DNAm can represent acute differences in environmental exposures. Antipsychotic related changes are not consistent between brain regions To understand how applicable our findings are to other regions of the brain, we performed the same analyses in the dorsolateral prefrontal cortex (DLPFC). Like the caudate, there were no significant differences in DNAm between those on antipsychotics at time of death and those who were not (N = 165). When examining gene expression via RNA-seq, we found that overall there were fewer differences in the DLPFC than in caudate. The DLPFC sample included 117 cases and 197 controls, of which 82 were antipsychotic positive at time of death and 35 were antipsychotic negative. 235 of these samples came from donors who were also included in the caudate analyses. There were no significant differences when examining only SCZ cases grouped by toxicology status. When expanding the analysis to include controls as previously described, we identified 622 differentially expressed (FDR < 0.05) genes -far fewer than the 2347 identified in the caudate (Table S6). The sample sizes between these two brain regions were quite similar, so it is unlikely that this discrepancy can be attributed to differences in power. Further, the genes which are differential in each region do not strongly overlap between regions. Only 93 (15%) of the genes differentially expressed based on toxicology status in DLPFC were also differentially expressed in caudate (Figure 2A). When examining genes which were differentially expressed in caudate, the majority (79%) do not replicate in DLPFC at p < 0.05 ( Figure 2B). One potential reason for these differences is difference in affected cell type. Genes differentially expressed by antipsychotic use in caudate are most enriched for specificity to D1 dopaminoceptive neurons (OR=3.50, P = 1.74e-82), D2 dopaminoceptive neurons (OR = 3.39, P=1.59e-77), and oligodendrocytes (OR=3.24,P = 7.84e-71). In DLPFC, they are most enriched for specificity to macrophages (OR= 3.69, P=8.70e-30), microglia (OR=7.22, P=6.99e-84), and T-cells (OR =2.43, P=2.94e-12), and depleted for specificity to oligodendrocytes (OR = 0.35, P = 2.60e-6) 18 (see Methods, Table S7, Table S8). These results likely reflect the unique representation of dopaminoceptive medium spiny neurons and cholinergic neurons in the caudate, which are not found in DLPFC. The relative enrichment of immune related cells in DLPFC raise questions about whether prior findings of similar cell enrichment in differential gene expression in DLPFC in schizophrenia 19 are antipsychotic treatment effects. Altogether, these results indicate that the caudate nucleus is strongly affected by antipsychotic drugs at a molecular level, and that these drugs have different effects in different areas of the brain. Translational considerations for mouse and human brain studies A major difficulty in disambiguating antipsychotic and schizophrenia effects on the human brain is the fact that virtually all schizophrenia patients are treated with antipsychotics at some time in their history making molecular differences hard to tease apart. The toxicology screens are sensitive and accurate for documenting drug in brain at the time of death, but they do not provide a historical reference for prior treatment. For this reason, animal models are a potentially valuable tool in understanding the molecular effects of antipsychotics in the brain. We aimed to assess the translationability of such work by comparing our findings to results from Kim and colleagues 12, who investigated gene expression changes in mice treated with haloperidol -a typical antipsychotic. We found that genes that were significantly differentially expressed by antipsychotic use in human caudate were enriched for being differentially expressed in mouse (OR = 2.13, P = 0.01). The 17 genes that were significantly differentially expressed in both mouse and human were LAMB3, EPHA4, ANXA3, FAT2, DOCK4, PENK, HECTD2, GDPD5, ANO2, HTR2A, REM2, HBA2, GAN, CHD3, CBLN4, CSTB, and PFKL. Further, if we only looked at mouse hits from the striatum -a region that includes the caudate -we found slightly stronger enrichment (OR = 2.18, p = 0.01, see Methods). We then assessed the replication of differentially expressed genes in mouse within our caudate dataset. We found that 34% of significant (q < 0.05) hits in mouse replicated in human caudate at p < 0.05. However, only 52% of the replicated genes are directionally consistent, and overall, effect sizes are very uncorrelated between mouse and human. Some of these differences may be attributed to the different types of antipsychotics used -the mice were treated with haloperidol, a typical antipsychotic, while only 44% of the human samples were positive for typical antipsychotics. Thus, better understanding of molecular effects of typical and atypical antipsychotics is needed to interpret these differences. These results indicate that there are some valuable similarities between antipsychotic effects in mice and human brains, but that there are differences which are important to understand as well. Discussion Here we have examined the molecular effects of recent antipsychotic use in the human caudate nucleus. We found many changes of small effect in gene expression, which overlap highly with case-control differences independent of toxicology, and a surprising lack of differences in DNAm levels. We also see that these effects are variable between brain regions and when contrasted with mice. The gene ontology insights in caudate point to synaptic signaling and also neurogenesis and neurodevelopment. These biological enrichments are derived from DEGs in adult brain, which at least in terms of neurogenesis and neurodevelopment, may seem counterintuitive, but at the molecular level the DEGS likely involve diverse functional pathways that are fully captured by these in silico analyses. It is interesting to note that genes differentially expressed in caudate based on antipsychotic presence at death highlighted synaptic signaling while in DLPFC it emphasized microglia and other immune cells. While these cell phenotypes in DLPFC have not been implicated in bioinformatic translation of GWAS risk genes, they have been found in differentially expressed gene sets comparing patients with schizophrenia to neurotypicals 19, suggesting that these prior case-control findings represent at least in large part antipsychotic treatment epiphenomena. Overall, our data provide an early view of antipsychotic effects in human brain, but the tip of the iceberg that we see is not likely to be the whole story. There are many challenges to studying antipsychotic use in human brain. At a phenotypic level, we examined antipsychotic status at time of death, but nearly all schizophrenia patients have used antipsychotics at some point, and often for prolonged periods of time, throughout their life. Thus, we are only capturing evidence of acute antipsychotic treatment at a relatively restricted period of time. Toxicology captures this "moment in time", but does not necessarily represent antipsychotic use further than a week or two prior to death. Longer term effects would have been masked here, and this may be the reason we see mostly subtle gene expression differences and virtually no DNA methylation differences. DNAm may more accurately represent long term, cumulative effects, rather than acute environmental changes. This potentially challenges notions about the plasticity of DNAm in response to acute exogenous factors. This may also be the reason why DRD2, a gene which has been shown to have increased expression in response to antipsychotics in animal studies, reaches only near significance (P = 0.004, FDR = 0.0509) for increased expression with antipsychotics. We acknowledge that there are many differences between schizophrenia cases and controls besides just diagnosis and antipsychotic use, and some gene expression differences between cases and controls may be attributed to these. These factors include among others, differences in smoking habits and in BMI. We note, however, that despite there being differences in BMI between cases and controls (Table 1), there is no significant difference between the BMIs of the SCZDAP group and the SCZD+Control group that serves as the main comparison for this investigation. While there are also differences in smoking rates between groups (Table 1), we see that among controls, smoking does not associate with differential gene expression. Thus, we do not believe that these associated characteristics are major drivers in our analysis. Another difficulty in studying human postmortem brain in this particular study is that the samples used were bulk tissue -a mix of cell types. This could mask cell-type specific effects, and could also contribute to the difficulty of comparing results between different brain regions and organisms. The increasing presence of single-cell datasets will help elucidate whether antipsychotic-induced changes are cell-type specific, and whether these changes are consistent between different tissues and animals. Overall, these findings provide a first look at the molecular genetic effects of antipsychotics in the human brain. It is clear that further investigation is warranted -especially to understand the translatability of tissue and animal studies. Study samples Details regarding postmortem human brain collection were described in a previous manuscript 17. Briefly, human brain tissue was obtained from two phases of collection. Retrospective clinical diagnostic reviews were conducted for every brain donor to include data from: autopsy reports, forensic investigations, neuropathological examinations, telephone screening, and psychiatric/substance abuse treatment record reviews and/or supplemental family informant interviews (whenever possible). All data was compiled and summarized in a detailed psychiatric narrative summary, and was reviewed independently by two board-certified psychiatrists in order to determine lifetime psychiatric diagnoses according to DSM-IV/V. For this study, 147 donors met criteria for schizophrenia, and 233 donors were free from all DSM-IV/V psychiatric/substance use disorder diagnoses. Non-psychiatric healthy controls were only included if free from all drugs of abuse by toxicology testing in blood, urine or brain at time of death. Antipsychotic toxicology testing was initially performed at the medical examiner as part of routine toxicology testing during autopsy to determine cause of death. Additionally, supplemental toxicology was conducted for donors with schizophrenia, to screen for any prescribed medications being taken at time of death (at therapeutic levels) that medical records, medical examiner reports, or next-of-kin reported at time of death. Supplemental toxicology testing was completed at National Medical Services Laboratories in Horsham, PA (www.nmslabs.com), using postmortem blood or cerebellar tissue. Of the donors who were antipsychotic positive, 29% were on only typical antipsychotics, 56% were on only atypical antipsychotics, and 15% were on both types of antipsychotics at time of death. The caudate nucleus was dissected from the slab containing the caudate and putamen at the level of the nucleus accumbens. The caudate was dissected from the dorsal third of the caudate nucleus, lateral to the lateral ventricle. DNA and RNA were concurrently extracted from ~250mg of tissue using the QIAGEN AllPrep DNA/RNA Mini Kit. WGBS data generation Extracted DNA was subjected to QC via Bioanalyzer and WGBS library construction using the Swift Accel-NGS Methyl-Seq DNA Library Kit (https://swiftbiosci.com/accelngs-methyl-seq-dna-library-kit/), with 0.1% Lambda spike-in (to assess bisulfite conversion rate after sequencing) and 5% PhiX spike-in (to improve sequencing metrics and offset the lower-complexity WGBS libraries). The WGBS libraries were sequenced on an Illumina X Ten platform with 2150bp paired end reads. RNAseq data generation Extracted RNA was subjected to QC and library construction as previously described 17 Briefly, libraries were constructed using the TruSeq Stranded Total RNA Library Preparation kit with Ribo-Zero Gold ribosomal RNA depletion, with ERCC Mix 1 spike-ins added to each sample. These paired-end, strand-specific libraries were sequenced on an Illumina HiSeq 3000 using 2100bp reads. WGBS Data Processing The raw WGBS data were processed using FastQC to control for quality of reads, Trim Galore to trim reads and remove adapter content 20, Arioc for alignment to the GRCh38.p12 genome (obtained from ftp://ftp.ncbi.nlm.nih.gov/genomes/all/GCA/000/001/405/ GCA_000001405.27_GRCh38.p12/GCA_000001405.27_GRCh38.p12_assembly_structure/ Primary_Assembly/assembled_chromosomes/) 21, duplicate alignments were removed with SAMBLASTER 22, and filtered with samtools 23 (v1.9) to exclude all but primary alignments with a MAPQ >= 5. We used the Bismark methylation extractor to extract methylation data from aligned, filtered reads 24. We then used the bsseq R/Bioconductor package (v1.22) to process and combine the DNA methylation proportions across the samples for all further manipulation and analysis 25. After initial data metrics were calculated, the methylation data for each sample was locally smoothed using BSmooth with default parameters for downstream analyses. CpG results were filtered to those not in blacklist regions (N = 27,812,354). CpHs were filtered to sites which had >3 coverage and non-zero methylation in at least half the samples. Due to an unidentifiable primary source of variance, 11 samples in the DLPFC were dropped before analysis. We also extracted DNA sequence variants from 740 common exonic/coding sites for comparisons to DNA genotyping data to confirm sample identities, as implemented in our SPEAQeasy RNA-seq software 26. RNAseq Data Processing RNA sequencing reads were processed as described in Benjamin et al using the SPEAQeasy pipeline described in Eagles et al 26. Briefly, reads were aligned to the human genome using HISAT2 27 and genes were quantified in a strand-specific manner using featureCounts 28. RNA-called coding variants were used to confirm sample identities against corresponding DNA genotyping data. Exonic sequences that were susceptible to RNA degradation from an independent tissue degradation experiment were extracted from coverage-level data following our qSVA algorithm 29, as described in more detail in Benjamin et al. Differential gene expression analysis For all differential gene expression analyses, we first filtered to samples which had exonic mapping rate > 0.37, mitochondrial mapping rate < 0.1, and RNA integrity number (RIN) > 6. We then filtered out lowly expressed genes by calculating reads per kilobase per million (RPKM) genes assigned during counting, and retaining those genes which had RPKM > 0.1. We then performed differential expression analysis across various sample subsets for the diagnosis, antipsychotic, and antipsychotic generation variables. For each variable, we performed linear modelling with limma, modelling voom-normalized feature counts (on the log2 scale) across the variable of interest, adjusting for potential confounders including age, sex, mitochondrial mapping rate, rRNA rate, exonic mapping rate, RIN, overall mapping rate, ERCC bias factor, and the top 5 quantitative ancestry factors. We further adjusted for quality surrogate variables (qSVs), which were calculated from the top k principal components (PCs) of degradation-susceptible exonic regions. We selected k = 16 using the BE algorithm with the sva Bioconductor package. While this qSVA was designed to reduce spurious differential expression signal due to RNA quality differences between groups, the latent qSVs can also capture and control for potential cell type composition variation 10 and thus should correct for differences between samples. To analyze differences within cases (SCZDAP vs SCZD), we applied contrasts to a model which separated samples into three groups by diagnosis and antipsychotic use. Linear modelling effects were converted to empirical Bayes-moderated T-statistics, with corresponding p-values, and Benjamini-Hochberg-adjusted (BH-adjusted) p-values using the limma topTable function. Differential methylation analysis Before analysis, CpG sites were filtered to those which are outside the ENCODE blacklist 30, which has been shown to have poor data quality in WGBS 9. CpH sites were filtered to those outside the blacklist, and those which have coverage > 3 and non-zero methylation for at least half of samples. Differential methylation analyses for diagnosis and antipsychotic use were performed using linear regression modelling accounting for sex, age, estimated neuronal composition (represented by the top principal component of methylation data), and the top 3 MDS components from genotype data. The regression analyses above were formed using limma, which employed empirical Bayes and returned moderated T-statistics, which were used to calculate P values and estimate the false discovery rate (FDR, via Benjamini-Hochberg approach). Cell type enrichment of differentially expressed genes Cell-type specific gene expression data was taken from Tran et al. 18 for the DLPFC and nucleus accumbens (a region which is also in the striatum with the caudate nucleus). We selected the top 2000 most cell type-specific genes for each considered cell type, which makes analyses across cell types more comparable (as many cell types had far more than 2000 DEGs that were significant). We then performed one Fisher's exact test per cell type to assess the enrichment of subsequent cell type-specific genes against our significant antipsychotic-differentially expressed genes. This involved comparing the proportion of differentially expressed genes which were within the cell type specific gene sets to the proportion of differentially expressed genes which were not within these cell type specific gene sets. Comparison to mouse To compare our results to the significantly differential genes previously reported by Kim and colleagues 12 in mouse, we first filtered all results to those which have mouse human orthologs (using biomaRt getLDS). We identified which genes were significant in both mouse and human analysis, and then performed enrichment analysis using Fisher's exact test to compare how many genes were significantly differentially expressed in human and how many genes were in the significant set of mouse genes with human orthologs. Data availability Raw and processed nucleic acid sequencing data generated to support the findings of this study are available via the PsychENCODE Knowledge Portal (https:// psychencode.synapse.org/). The PsychENCODE Knowledge Portal is a platform for accessing data, analyses, and tools generated through grants funded by the National Institute of Mental Health (NIMH) PsychENCODE program. Data is available for general research use according to the following requirements for data access and data attribution: (https://psychencode.synapse.org/DataAccess). For access to WGBS content described in this manuscript see access Synapse ID syn23318163. RNA-seq data used was generated by Benjamin and colleagues 17, who provide the relevant data availability information. Supplementary Material Refer to Web version on PubMed Central for supplementary material. |
<filename>91930-introducao-a-programacao-2/praticas/src/manipulandoAtributosDeObjetos/TesteSimplesAutomovel.java
package manipulandoAtributosDeObjetos;
public class TesteSimplesAutomovel {
public static void main(String[] args) {
Automovel meuCarro = new Automovel();
meuCarro.modelo = "Gol";
meuCarro.cor = "Azul";
meuCarro.acelerar();
Automovel outroCarro = new Automovel();
outroCarro.modelo = "Gol";
outroCarro.cor = "Azul";
outroCarro.acelerar();
outroCarro.acelerar();
System.out.println(meuCarro.velocidade);
System.out.println(outroCarro.velocidade);
}
}
|
Talk about electric cars today repeatedly boils down to one issue: range. Regardless of the fact that the average person drives just 19 miles each day, today’s electric car’s 100 mile range means that sooner or later, driving an electric car is going to inconvenience you. But General Motors (GM) claims it has an answer that gives you best of both worlds - electric car emissions-free running and unlimited range. It’s called the Chevrolet Volt and has just gone on sale in the USA. It’s going on sale here early in 2012, so on a recent trip to Detroit we tried it out.
Like the Nissan Leaf and Ford Focus Electric, a lithium-ion battery back housed underneath the floor, which you plug in to recharge, primarily powers the Volt. But where this car differs to purely electric cars, is that once you’ve depleted the battery (it has a range of 40 to 50 miles) a petrol engine fires up to continue providing power (indirectly) to the wheels. The logic is that, on short trips from home, the car will be emissions free because it runs purely off the battery. But if you suddenly need to get to Lands End tomorrow, it’s not going to take you 3 days to get there as you might in an electric car, because you’ll just keep filling up with fuel.
So the Volt’s important for its powertrain, but there’s a strong digital aspect to this car, which makes the experience of driving and owning it all the more special - and appealing to people like us.
Jump into the cockpit and the first thing you’ll notice is the white centre stack and trim surrounds. Dave Lyons and Stuart Norris from the interior design team talk about trying to live up to Apple’s “gold standard” in design when describing this interior, and we can see why. The white finish is just like the one on that third gen iPod that’s now sat in your top drawer with a dead battery. It might be a generation out of step with Apple’s industrial design, but it’s still striking and appealing to use.
In fact, the "buttons" on the centre console aren’t buttons at all, but touch sensitive actuators which give a lovely positive feedback "click" when pressed…exactly like the sound made when scrolling through menus on an Apple click wheel. Crucially, they’re nothing like as distracting and hard to use on the move as a pure touch screen.
Instead of dials and gauges, two 7-inch TFT screens act as your dashboard displays. The first, which can be used as a touch-screen, sits on top of the centre stack and displays the air-con, navigation and music information. It’ll also display "power flow", which tells you whether the battery or the petrol engine is providing the power.
In front of the driver, you get a central digital speed read-out, which is surrounded by battery charge level, range, fuel level, and an eco-driving meter. GM’s design and tech team are very proud of the intense blacks they achieved with these screens, and we’ve got to say they are beautifully HD clear and high quality in appearance.
There’s even a neat retro touch, in the eco driving-meter gauge, which has the quality of ball bearing maze game/labyrinth toy. You try to keep the gently spinning green ball in its centre "hole" to which it feels slightly magnetized - but brake or accelerate too hard and it floats up or down the gauge, showing you that you’re no longer driving efficiently.
But it’s not just in the car that GM has created a digital experience. Like the system Ford announced at CES, the Volt app - “OnStar Mylink” - available for iPhone, Android and Blackberry, provides the driver with control of the car via a phone. This allows you to set when the car is charged and to see how full the battery is, remotely. But it also allows unlocking of the doors via your phone, remote heating or cooling of the cabin, and remote start of the car, which is an amusing gimmick.
So what is it like to drive? It doesn’t sound as Jetsons-like as some electric cars we’ve driven, but it’s very refined, comfortable and - best of all - normal and easy to drive. And you can forget any milk-float jokes, because while you might not be worrying Porsches in a traffic light grand-prix, flick the Volt into power mode, and there’s more than enough accelerative shove to out-run today’s petrol-powered cars of a similar size.
If this truly is the future of the car, then we’re impressed. GM really seems to be letting us have our cake and eat it. Not only will the average person be driving on electric power, most of time (barely spend any money on fuel). But they’ve removed the worries and constraints involved in driving an electric car. The interior experience and ownership benefits the digital elements of the car provide are the icing on the cake. So forget the long mused about idea of an Apple “iCar”, the car for the digital era has arrived: it’s called the Volt. |
Regulation of COL1A1 expression in type I collagen producing tissues: Identification of a 49 base pair region which is required for transgene expression in bone of transgenic mice Previous deletion studies using a series of COL1A1CAT fusion genes have indicated that the 625 bp region of the COL1A1 upstream promoter between 2295 and 1670 bp is required for high levels of expression in bone, tendon, and skin of transgenic mice. To further define the important sequences within this region, a new series of deletion constructs extending to 1997, 1794, 1763, and 1719 bp has been analyzed in transgenic mice. Transgene activity, determined by measuring CAT activity in tissue extracts of 6 to 8dayold transgenic mouse calvariae, remains high for all the new deletion constructs and drops to undetectable levels in calvariae containing the 1670 bp construct. These results indicate that the 49 bp region of the COL1A1 promoter between 1719 and 1670 bp is required for high COL1A1 expression in bone. Although deletion of the same region caused a substantial reduction of promoter activity in tail tendon, the construct extending to 1670 bp is still expressed in this tissue. However, further deletion of the promoter to 944 bp abolished activity in tendon. Gel mobility shift studies identified a protein in calvarial nuclear extracts that is not found in tendon nuclear extracts, which binds within this 49 bp region. Our study has delineated sequences in the COL1A1 promoter required for expression of the COL1A1 gene in high type I collagenproducing tissues, and suggests that different cis elements control expression of the COL1A1 gene in bone and tendon. |
<gh_stars>1-10
from django.forms import widgets
class RadioSelect(widgets.RadioSelect):
template_name = "efilling/widgets/radio_input.html"
option_template_name = "efilling/widgets/radio_option.html"
|
Building a KVM-based Hypervisor for a Heterogeneous System Architecture Compliant System Heterogeneous System Architecture (HSA) is an archi-tecture developed by the HSA foundation aiming at reduc-ing programmability barriers as well as improving commu-nication efficiency for heterogeneous computing. For ex-ample, HSA allows heterogeneous computing devices to share the same virtual address space. This feature allows programmers to bypass explicit data copying between devices, as was required in the past. HSA features such as job dispatching through user level queues and memory based signaling help to reduce communication latency between the host and other computing devices. While the new features in HSA enable more efficient heterogeneous computing, they also introduce new chal-lenges to system virtualization, especially in memory virtu-alization and I/O virtualization. This work investigates the issues involved in HSA virtualization and implements a KVM-based hypervisor that supports the main features of HSA inside guest operating systems. Furthermore, this work shows that with the newly introduced hypervisor for HSA, system resources in HSA-compliant AMD Kaveri can be effectively shared between multiple guest operating sys-tems. |
<filename>example/src/main/java/com/yechy/handyfont/example/ExampleApp.java
package com.yechy.handyfont.example;
import android.app.Application;
import android.content.Context;
import com.yechy.handyfont.HandyContextWrapper;
import com.yechy.handyfont.HandyFontConfig;
/**
* Created by cloud on 2019-09-15.
*/
public class ExampleApp extends Application {
private String chilanka_regular = "font/Chilanka-Regular.ttf";
private String dancingScript_bold = "font/DancingScript-Bold.ttf";
private String dancingScript_regular = "font/DancingScript-Regular.ttf";
private String sans_serif = "sans-serif";
private String sans_serif_medium = "sans-serif-medium";
@Override
public void onCreate() {
super.onCreate();
HandyFontConfig.getInstance()
.setLogEnabled(true)
.setDebugEnabled(true)
.setReplaceEnabled(true)
.addReplacedFontForDefaultFontFamily(dancingScript_bold)
.addReplacedFont(sans_serif, chilanka_regular)
.addReplacedFont(sans_serif_medium, dancingScript_regular);
}
@Override
protected void attachBaseContext(Context base) {
super.attachBaseContext(HandyContextWrapper.wrap(base));
}
}
|
// Load config from given file
func loadConfig(configFile string) (config apiConfig, blobSigningKey string, err error) {
if configFile == "" {
err = errors.New("Client config file not specified")
return
}
config, blobSigningKey, err = readConfigFromFile(configFile)
return
} |
President Trump set up the controversial issue of "late-term abortion" as a potential 2020 flashpoint on Tuesday, using his State of the Union address to graphically denounce Democrats for permitting "a baby to be ripped from the mother's womb moments from birth."
Driving the news: A pair of state measures that loosened, or sought to loosen, abortion restrictions in New York and Virginia were met with widespread condemnation from Republicans that reached a fever pitch after a radio interview on the topic by Virginia Gov. Ralph Northam last week — before his administration pitched into chaos over a racist medical school yearbook photo.
The legislation: New York's newly passed law allows abortion when there is "an absence of fetal viability, or the abortion is necessary to protect the patient’s life or health" after the 24th week of pregnancy.
And Virginia's proposed law, which failed to make it out of committee in the statehouse, would have eliminated some of the state's restrictions on late-term abortion, including reducing the numbers of doctors needed to sign off on such a procedure from three to one.
The controversy: Virginia Del. Kathy Tran, a sponsor of her state's bill, answered affirmatively during a state legislative session when she was asked by a Republican lawmaker if it would allow an abortion while a woman was in labor.
When asked by radio station WTOP last week about Tran's statement, Northam, who is a trained pediatric neurologist, said abortions may be "done in cases where there may be severe deformities. There may be a fetus that’s not viable. So in this particular example, if a mother’s in labor, I can tell you exactly what would happen. The infant would be delivered, the infant would be kept comfortable, the infant would be resuscitated if that’s what the mother and the family desired. And then a discussion would ensue between the physicians and the mother."
What they're saying: Both Tran and Northam's comments were condemned by Republicans as "infanticide," prompting backlash from top Republicans, including Trump and Vice President Mike Pence, and clarifications from the Virginia lawmakers.
Northam, via his spokesperson: "Attempts to extrapolate these comments otherwise is in bad faith and underscores exactly why the governor believes physicians and women, not legislators, should make these difficult and deeply personal medical decisions."
Tran: "I should have said: 'Clearly, no, because infanticide is not allowed in Virginia, and what would have happened in that moment would be a live birth.'"
Trump: "Democrats are becoming the Party of late term abortion, high taxes, Open Borders and Crime!"
Pence: "This shameless embrace of a culture of death is startling to every American who cherishes life."
Sen. Ben Sasse (R-Neb.): "In just a few years pro-abortion zealots went from 'safe, legal, and rare' to 'keep the newborns comfortable while the doctor debates infanticide.'"
The facts: According to the CDC, only about 1% of abortions take place after 21 weeks.
Abortion rates among U.S. women in all age groups dropped sharply to a decade low from 2006 to 2015 — and nearly 90% of abortions performed in 2015 were within a woman's first 13 weeks of pregnancy.
In an interview with CNN, Barbara Levy, the vice president of health policy at the American College of Obstetricians and Gynecologists, said, "The phrase 'late-term abortion,' is medically inaccurate and has no clinical meaning."
Jennifer Conti, a fellow with the advocacy group Physicians for Reproductive Health, also told CNN: "In obstetrics, we don't divide pregnancies into terms. 'Late term' is an invention of anti-abortion extremists to confuse, mislead and increase stigma."
The bottom line, via the New York Times: "[A]mong conservatives, the White House's outrage was also greeted as a clear and shrewd political strategy — to rally Republicans with an eye toward the 2020 presidential election, and to close ranks around Mr. Trump, embattled though he may be, as their unequivocal leader."
"It's going to come into play, quite frankly, in the elections next year. We're not going to let it go away," Carol Tobias, the president of the National Right to Life Committee, told the Times. |
#ifndef S3D_VIDEO_CAPTURE_DECKLINK_H
#define S3D_VIDEO_CAPTURE_DECKLINK_H
#include <decklink_sdk/DeckLinkAPI.h>
#include <memory>
// based on:
// https://stackoverflow.com/questions/42488497/using-stl-smart-pointers-with-com-interfaces
// Utilities for smart handling of DeckLink pointers
struct DecklinkPtrDeleter {
template <typename T>
void operator()(T* ptr) {
if (ptr != nullptr) {
ptr->Release();
}
}
};
namespace DecklinkHelpers {
template <class T>
struct decklink_iid_t {
using type = T;
constexpr decklink_iid_t() = default;
};
template <class T>
constexpr decklink_iid_t<T> decklink_iid{};
template <class T>
constexpr void get_iid(decklink_iid_t<T>) = delete; // overload this for your particular types
template <class T>
constexpr REFIID interface_iid = get_iid(decklink_iid<T> /*unused*/);
} // namespace DecklinkHelpers
constexpr REFIID get_iid(DecklinkHelpers::decklink_iid_t<IDeckLinkInput> /*unused*/) {
return IID_IDeckLinkInput;
}
constexpr REFIID get_iid(DecklinkHelpers::decklink_iid_t<IDeckLink> /*unused*/) {
return IID_IDeckLink;
}
constexpr REFIID get_iid(DecklinkHelpers::decklink_iid_t<IDeckLinkConfiguration> /*unused*/) {
return IID_IDeckLinkConfiguration;
}
constexpr REFIID get_iid(
DecklinkHelpers::decklink_iid_t<IDeckLinkVideoFrame3DExtensions> /*unused*/) {
return IID_IDeckLinkVideoFrame3DExtensions;
}
template <class T>
class DeckLinkBadAlloc : std::bad_alloc {
public:
DeckLinkBadAlloc() : std::bad_alloc() {}
};
template <class U, class T>
std::unique_ptr<U, DecklinkPtrDeleter> make_decklink_ptr(T const& src) {
if (!src) {
return {};
}
void* r = nullptr;
if (src->QueryInterface(DecklinkHelpers::interface_iid<U>, &r) != S_OK) {
throw DeckLinkBadAlloc<U>();
}
return {static_cast<U*>(r), {}};
}
template <class T>
using SmartDecklinkPtr = std::unique_ptr<T, DecklinkPtrDeleter>;
// use it this way
// auto decklLink = make_decklink_ptr<IDeckLinkInput>(deckLink);
#endif // S3D_VIDEO_CAPTURE_DECKLINK_H
|
<gh_stars>1000+
//-------------------------------------------------------------------------------------------------------
// Copyright (C) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
//-------------------------------------------------------------------------------------------------------
#pragma once
namespace JsUtil
{
class FBVEnumerator
{
// Data
private:
BVUnit *icur, *iend;
BVIndex curOffset;
BVUnit curUnit;
// Constructor
public:
FBVEnumerator(BVUnit * iterStart, BVUnit * iterEnd);
// Implementation
protected:
void MoveToValidWord();
void MoveToNextBit();
// Methods
public:
void operator++(int);
BVIndex GetCurrent() const;
bool End() const;
};
}
|
ViSQOL v3: An Open Source Production Ready Objective Speech and Audio Metric Estimation of perceptual quality in audio and speech is possible using a variety of methods. The combined v3 release of ViSQOL and ViSQOLAudio (for speech and audio, respectively,) provides improvements upon previous versions, in terms of both design and usage. As an open source C++ library or binary with permissive licensing, ViSQOL can now be deployed beyond the research context into production usage. The feedback from internal production teams at Google has helped to improve this new release, and serves to show cases where it is most applicable, as well as to highlight limitations. The new model is benchmarked against real-world data for evaluation purposes. The trends and direction of future work is discussed. I. INTRODUCTION There are numerous objective metrics available, i.e, metrics obtained by measurements on the audio signal, to assess the quality of recorded audio clips. Examples of physical measures include signal-to-noise ratio (SNR), total harmonic distortion (THD), and spectral (magnitude) distortion. When estimating perceived quality, PESQ, and POLQA, have become standards for speech, and in practice also for general audio, despite being originally designed to target only speech quality. There are other notable examples, e.g., PEAQ and PEMO-Q. Most of these metrics require commercial licenses. ViSQOL and ViSQOLAudio (referred to collectively as ViSQOL below), are freely available alternatives for speech and audio. These metrics are continually being expanded to cover additional domains. For example the work on AMBIQUAL extends the same principles used in ViSQOLAudio into the ambisonics domain. Advancements in speech and audio processing, such as denoising and compression, propel the need for improvements in quality estimation. For example, speech and audio codecs reach lower and lower useful bitrates. As such, it may be worthwhile to analyze the performance of ViSQOL for this extended domain. Furthermore, there have been a number of deep neural network (DNN) generative models that recreate Available at https://github.com/google/visqol the waveform by sampling from a distribution of learned parameters. One example is the WaveNet-based low bitrate coder, which is generative in nature. There are other DNN-based generative models, including SampleRNN and WaveGlow, with promising results that suggest that this trend will continue. These generative models typically do not lend themselves to being analyzed well by existing full reference speech quality metrics. While the work described in this paper does not propose a solution to the generative problem, the limitations of the current model should be acknowledged to encourage development of solutions. ViSQOL was originally designed with a polynomial mapping of the neurogram similarity index measure (NSIM) to MOS, and ViSQOLAudio was extended to use a model trained for support vector regression. Since then, deep neural network models have emerged and been applied to speech quality models,. Such approaches are promising and potentially can resolve some of the issues that the current architectures cannot. While such new directions are clearly interesting and warrant further investigation, they are rapidly evolving. we present a new version of ViSQOL, v3, which contains incremental improvements to the existing framework based on real-world feedback, rather than fundamental changes such as end-to-end DNN modeling. Since ViSQOL has been presented and benchmarked in a large number of experiments that have validated its application to a number of use cases,, - we consider it relatively well analyzed for the known datasets, which tend to be smaller and relatively homogeneous. We instead turn our attention to the data and types of problems encountered "in the wild" at Google teams that were independent of ViSQOL development, and the iterative improvements that have come from this analysis. Adapting it to these cases has yielded various improvements to usability and performance, along with feedback and insights about the design of future systems for estimating perceptual quality. Since the nature these improvements fill the 'blind spots' in the datasets, they are not expected to improve its results on these datasets. Until there is the creation of more diverse subjective score datasets, real-world validation seems to be a reasonable compromise. Additionally and alongside improving the quality of MOS 978-1-7281-5965-2/20/$31.00 ©2020 IEEE estimation from real-world data, we are concerned with how to make ViSQOL more useful to the community from a practical tooling perspective. Even though ViSQOL was available through a MATLAB implementation, there were still unnecessary hurdles to use it in certain cases, e.g. production and continuous integration testing, (which may need to run on a server), or may not have MATLAB licenses available. As a result, we chose to re-implement it in C++ because it is a widely available and extensible language that can be wrapped in other languages. We decided to put the code on GitHub for ease of access and contribution. This paper is structured as follows: in section II, a case study of the findings and challenges encountered when integrating ViSQOL into various Google projects. In section III, we present the general design and algorithmic improvements that are in the new version. Then in section IV, the improvements with respect to the case studies are discussed. Finally, we summarize in a concluding section V. II. CASE STUDIES AND USER FEEDBACK This version of ViSQOL is the result of the integration process of ViSQOL, using real production and integration testing cases at Google. The case studies described in this section were initiated by individual teams that were independent of prior ViSQOL development. They typically consulted with a ViSQOL developer to verify appropriate usage, or read the documentation and integrated ViSQOL on their own. A. Hangouts Meet The Meet team has been successfully using ViSQOL for assessing audio quality in Hangouts Meet. Hangouts Meet is a video communication service that uses WebRTC for transmitting audio. Meet uses a testbed that is able to reliably replicate adverse network conditions to assess the quality of audio during the call. For this use case they have 48 kHz-sampled reference and degraded audio samples and use ViSQOLAudio for calculating the results. In order to ensure that ViSQOL works reliably for this use case, it was compared to an internal no-reference audio quality metric that is based on technical metrics of a WebRTC-based receiver. The metric is on a scale from 0 to 1, with lower scores being better. ViSQOL's MOS is able to correlate to this metric, as seen in Figure 1. In this use case, Meet developers were mostly interested in the sensitivity of ViSQOL to audio degradations from network impairments. In Figure 2 there is a comparison between mean ViSQOL scores during a call that shows that the metric is sensitive to how audio quality changed from a good network conditions scenario with scores ranging from 4.21 to 4.28, to a medium impaired scenario with scores ranging from 4.04 to 4.16, to finally an extremely challenging network scenario with scores from 3.72 to 3.94. Although the exact network conditions can not be shared, here good network conditions indicated that the connection should allow for both video and audio to be near perfect in the call, medium conditions indicate that the call might have issues, but the audio should continue to Fig. 1. Hangouts Meet's internal no-reference metric has components to detect audio degradations. ViSQOL successfully detected these degradations in audio that contained them (blocks 1 and 4), while in the audio blocks that were not affected the scores from ViSQOL were higher (blocks 2 and 3). be good, while in extremely challenging conditions we expect to see both video and audio perceptually degraded, but the call would still go through. Some of the calls were run with good network conditions (green), some were simulating average network conditions, where the product should still perform well (yellow), while others were simulating extremely challenging network conditions, where it is expected to for issues to appear (red). In order to ensure that ViSQOL performs reliably, several hundreds of calls were collected from the testbed. The mean values obtained from ViSQOL and the internal metric from these calls were plotted in Figure 3. The results were reliably reproduced. Following the positive results from this investigation, ViSQOL is currently one of the main objective audio quality metrics deployed by the Hangouts Meet product team at Google. B. Opus Codec Google contributes to the development of the Opus codec. ViSQOL and POLQA were used to benchmark the quality of the Opus coder for both speech and music at various bitrates and computational complexities. In previous studies ViSQOLAudio has been shown to perform reasonably on low bitrate audio. However, ViSQOL's speech mode did not specifically target the low bitrate case. Additionally, recent advancements in Opus have pushed the lower bound of the range of bitrates further downwards for a given bandwidth since the time ViSQOL was introduced. For example, Opus 1.3 can produce a wideband signal at 9 kbps, whereas the TCDAudio14, CoreSV14, and AACvOpus15 datasets that ViSQOL's support vector regression was trained on have bitrates that only go as low as 24 kbps. POLQA and the original version of ViSQOL in speech mode display similar trends that are consistent with expectations with respect to the bitrate and complexity settings. The differences in the lower bitrates are more pronounced according to POLQA. The differences in higher bitrates are more pronounced according to ViSQOL. Although subjective scores were not available, the developers expected that MOS should be less sensitive to changes in higher bitrates, giving POLQA a better match. After the improvements described in section 3, ViSQOL v3 MOS was a closer match to the expectation as can be seen in Figure 4. For musical examples, the developers found that both metrics display similar trends with respect to bitrates. However, POLQA shows higher discrimination between 6-8 kbps, 10-12 kbps and 16-24 kbps. ViSQOL is able to discriminate between the different bitrates with monotonic behavior, but one point of concern is that this results in ViSQOLAudio being relatively insensitive to differences in complexity settings. In light of this, we would not recommend using ViSQOL for automated regression tests without retraining the model. The improvements made in section 3 slightly ameliorate these issues, as can be seen in Figure 5. On the other hand, ViSQOL identified a spurious bandwidth 'bump' at 12 kbps for the 5 and 6 complexity settings (which was perceived as higher quality in informal listening), where POLQA did not. Lastly, ViSQOL was used to analyze the results for both clean and noisy references. This is not a case ViSQOL was designed for, as it presumes a clean reference, similar to PESQ and POLQA. However, it was found to perform in a similar fashion to the clean cases for both speech and audio in the noisy cases. It was concluded that ViSQOL could be used for regression testing for speech. However, formal listening tests would be desirable for two reasons: to better interpret the differences between POLQA and ViSQOLAudio, and to allow training a model that represented the low bitrate ranges. C. Other Findings A number of other teams have also adapted ViSQOL for their products. In the majority of cases, their use case vaguely resembles the training data (e.g. wideband speech network degradations or music coding), but often has marked differences. For example, one team chose to analyze the network loop with a digital and analog interface, requiring a rig to be built for continuous automated testing. Typically these teams also had access to PESQ, POLQA or subjective scores for their cases and wanted to evaluate the accuracy of ViSQOL measurements as well as identify limitations. A frequent issue was related to the duration and segmentation of the audio that would be used with ViSQOL when used in an automated framework. While ViSQOL in speech mode has a Fig. 4). The bitrates follow the same key as Figure 4. The bump at complexity 5, 6, and 10 for 12 kbps is related to Opus deciding to use a 12 kHz bandwidth for some fraction of the files instead of the 8 kHz bandwidth it used for complexities 2-4 and 7-9. voice activity detector, it was found that ViSQOLAudio would perform poorly for segments where the reference was silent, because of either the averaging effects, or because of the lack of log-scale thresholding which was overly sensitive to small absolute differences in ambient noise levels. To resolve the averaging effects, it was recommended to extract segments of audio of 3 to 10 seconds where there was known activity. A solution to the thresholding issues is discussed in the next section. III. DESIGN AND IMPROVEMENTS This section summarizes the previous version and describes the changes made to the new version. Figure 6 shows the overall program flow and highlights the new components of the system that are referred to in the subsections. A. General Design The ViSQOL algorithms described in and share many components by design, such as the gammatone spectrogram and NSIM calculation. It then seems reasonable that the common components be shared and developed together. The differences between the two algorithms are related to differences in the characteristics of speech and music. For example, the use of voice activity detection (VAD) for speech, and analysis of the higher bands (up to 24 kHz) for general audio/music. The common components of both speech and audio systems include creating a gammatone spectrogram using equivalent rectangular bandwidth (ERB) filters, creation of patches on the order of a half-second, aligning them, computing the NSIM from the aligned patches, and then mapping the NSIM values to MOS. There were minor changes to some of these components because of practical reasons, such as modifying dependencies, or fixing issues found in case studies or test failures. For example, the VAD implementation uses a simple energy-based VAD, which should be sufficient given the requirement of clean references. As another example, window sizes were updated to be 80 ms with a hop of 20 ms after discovering an issue with the windowing of previous versions. B. C++ Library and Binary To make ViSQOL more available, we uncoupled the dependency on MATLAB by implementing a C++ version with only open source dependencies. The new version, v3, is available as a binary or as a library. The codebase was made available on GitHub because we wish for it to be easy to use by the public, and to invite external contributions. The majority of users were binary users, but some had requirements for finer control. For this purpose we designed a library with protobuf support and error checking, which the binary depends on. This library would also be useful for a user that wishes to wrap the functions in a different language, such as with python bindings. There were several changes to the input and output. Verbose output has also changed to include the average NSIM values per frequency band and mean NSIM per frame. Because ViSQOL is continuously changing to adapt to new problems, a conformance version number is included in the output. Whenever the MOS changes for known files, the conformance number will be incremented. Lastly, batch processing via comma-separated value (csv) files are also supported. A number of Google-related projects were used to build this version. The application binary was implemented using the Abseil C++ application framework. The Google Test C++ testing framework was integrated and various tests were implemented to ensure correctness, detect regressions, and increase stability for edge cases. 23 test classes with multiple tests were implemented. These include not only unit tests, but also a test to check the conformance of the current version to known scores. The Bazel framework was used to handle building and dependency fetching, as well as test development. C. Fine-scaled Time Alignment Although the previous versions of ViSQOL did two levels of alignment (global and patch), there were still issues with the patch alignment due to the spectrogram frames being misaligned at a fine scale. To address this, we implemented an additional alignment step that offsets by the lag found in a cross correlation step on the time-domain regions that corresponds to the aligned patches as described in. Next, the gammatone spectrogram is recomputed for sample-aligned patch audio and the NSIM score is taken. D. Silence Thresholds To deal with problem of log-scale amplitudes discussed in II-C, we introduce silence thresholds on the gammatone spectrogram. Because NSIM is calculated on log-amplitudes, we found that it was too sensitive to different levels of ambient noise. For example, a near-digital silence reference compared against a very low level of ambient noise would still have a very low NSIM score, despite being perceptually transparent. The silence threshold introduces an absolute floor as well as a relative floor that may be higher for high amplitude frames. The thresholded amplitude y t,f (x) for a time t and frequency band f given an input spectrogram x is subject to: where: given reference and degraded log amplitudes r t,f and d t,f, and global absolute threshold Y min, and relative per-frame threshold Y f min. E. NSIM to MOS Model The changes above ultimately affect the NSIM scores. This requires that a new SVR model is trained to map the frequency band NSIM to MOS using libsvm. We conducted a grid search to minimize the 4-way cross validation loss on the same training set (TCDAudio14, CoreSV14, AACvOpus15). However, we observed in Section II that this model was too specific to the training data and would behave poorly on very low bitrate (6-18 kbps) audio. This appears to be related to the fact that there is no monotonicity constraint in the SVR model used by ViSQOL (a strictly higher NSIM for out of distribution data produced lower MOS). To address this issue for the default model, we relaxed the SVR parameters by lowering the cost and gamma parameters to have a slightly higher cross validation error while providing behavior that was closer to monotonic behavior. Additionally, this version includes some tooling and documentation that allows for users to train their own SVR model by the use of CSV input files if the user can provide subjective scores for degraded/reference pairs. By following the grid search methods described by libsvm authors, users should be able to tailor a model that is able to represent their data. IV. DISCUSSION Here we present a discussion of the use cases and feedback in light of the improvements. This is followed by reflection on trends and the areas that are promising as future work. The case studies mentioned in Section II highlight the challenges with real world applications of ViSQOL. The findings are generally that ViSQOL can be used for various applications, but careful investigation is required for any use case. The users of these tools are the very developers of new audio processing and coding techniques, and are often analyzing new types of audio that is "out of distribution". In some cases, we can allow the user to retrain a model to match the new data. We find that developers are reasonably skeptical about how well ViSQOL will apply to their problem, given that it almost always has unique characteristics. Although ViSQOL is not guaranteed to give a meaningful absolute MOS for cases that are significantly different from what it was originally designed with, the developers in our case studies found some correlation that was useful for their use case. However, this conclusion is often facilitated by the use of additional metrics that can be used to validate ViSQOL's application. In other cases, for example, in the generative case, it is possible that it requires a redesign of the algorithm at a fundamental level, which could include different spectrogram representations or DNNs. Projects like LibriTTS have curated large amounts of freely available speech data, which has been a boon to speech-related DNNs, there is yet no standard and widely available subjective score dataset that is of similar scale. A larger dataset would enable new development, but also require rethinking of existing tools, such as support vector regression, used by ViSQOLAudio, which is intended for use on smaller datasets on the order of hundreds of points. V. CONCLUSION We have presented a new version of ViSQOL which is available for use on GitHub. The integration to real world problems by different teams at Google yielded a number of insights and improvements to the previous version. There are a number of promising avenues for future work, including DNN based approaches, a more general model, and taking the new generative audio approaches into account. |
We cannot take our food for granted. Far from it. For the first time in a decade, the number of hungry in the world is rising due, in part, to climate change and conflict. At the same time, the world’s population is growing, while our natural sources for food are diminishing. The good news is there are concrete efforts afoot to reduce current impacts and prevent possible disasters through the responsible management, use and sharing of the benefits from the world’s life-giving food plants.
This is where the International Treaty on Plant Genetic Resources for Food and Agriculture plays a pivotal role. Through this global pact, member countries work together to conserve and sensibly use the world’s limited precious plant genetic resources for food and agriculture.
The Seventh Session of the Governing Body – the global policy forum overseeing the International Treaty – will bring together 144 member nations, plus experts, observer countries, nongovernmental organizations, scientists and farmers’ representatives in Kigali from 30 October to 3 November 2017, to take some important decisions to strengthen their pact and to ensure that our children’s children can enjoy food and agriculture nutrition originating from plants.
80% of our plant food comes from just 64 crops.
All countries are dependent on other countries for their food crops and forages. Through the International Treaty, countries work together to ensure the availability of the basic material needed to grow food and cultivate agriculture. The number of food and agriculture plants currently covered under the International Treaty is limited to 64,* providing humans with 80% of their food intake from plants. However, a number of key food plants are yet to be included in this list, so that together, countries can make sure that these resources will benefit us all, not just today, but in the future. This will be a key point of discussion during the Kigali session of the Governing Body.
Equity and Food for All.
Key participants include:
144 Contracting Parties, Farmers’ Organizations, NGOs, Civil Society Organizations, etc.
H.E. José Graziano da Silva, Director-General, FAO
H.E. Geraldine Mukeshimana, Minister for Agriculture and Animal Resources of Rwanda
The event will be held October 30 to November 3, at the Kigali Convention Centre in Kigali, Rwanda. It will be simultaneously translated into Arabic, Chinese, English, French, Spanish, and Russian. For more information, please visit http://www.fao.org/plant-treaty/seventh-governing-body/en/. |
/**
* A simple {@link Fragment} subclass.
*/
public class FavoritesFragment extends Fragment {
private RecyclerView recyclerView;
private Snackbar snackbar;
private AudioManager audio;
public FavoritesFragment() {
// Required empty public constructor
}
@Override
public void onResume() {
super.onResume();
if (recyclerView != null) {
FavoritesAdapter adapter = (FavoritesAdapter) recyclerView.getAdapter();
adapter.setFavorites(loadDeserializedFavs(getActivity()));
}
}
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
//Get audio service
audio = (AudioManager) getActivity().getSystemService(Context.AUDIO_SERVICE);
final View view = inflater.inflate(R.layout.fragment_favorites, container, false);
ArrayList<Pair<String, String>> deserializedFavs = loadDeserializedFavs(getActivity());
recyclerView = (RecyclerView) view.findViewById(R.id.favorites_list);
recyclerView.setHasFixedSize(true);
LinearLayoutManager linearLayoutManager = new LinearLayoutManager(getActivity());
DefaultItemAnimator defaultItemAnimator = new DefaultItemAnimator();
DividerItemDecoration dividerItemDecoration = new DividerItemDecoration(getActivity(), linearLayoutManager.getOrientation());
final FavoritesAdapter adapter = new FavoritesAdapter(this, deserializedFavs);
recyclerView.setAdapter(adapter);
ItemTouchHelper touchHelper = new ItemTouchHelper(new ItemTouchHelper.SimpleCallback(0, ItemTouchHelper.LEFT) {
Drawable background;
Drawable DeletedIcon;
int DeletedIconMargin;
boolean initiated;
void init() {
background = new ColorDrawable(Utility.setColorByTheme(R.attr.favoriteButton, getActivity()));
DeletedIcon = ContextCompat.getDrawable(getActivity(), R.drawable.ic_close_white_24dp);
DeletedIconMargin = (int) getActivity().getResources().getDimension(R.dimen.deleted_icon_margin);
initiated = true;
}
@Override
public boolean onMove(RecyclerView recyclerView, RecyclerView.ViewHolder viewHolder, RecyclerView.ViewHolder target) {
return false;
}
@Override
public void onSwiped(final RecyclerView.ViewHolder viewHolder, int direction) {
adapter.remove(viewHolder.getAdapterPosition());
snackbar = Snackbar.make(view.findViewById(R.id.favorites_fragment_coordinator), "Removed Element from Favorites", (int) FavoritesAdapter.UNDO_TIMEOUT);
snackbar.setAction("UNDO", new View.OnClickListener() {
@Override
public void onClick(View v) {
UtilitySharedPrefs.addFavs(getActivity(), adapter.getTemp_fav());
adapter.setFavorites(loadDeserializedFavs(getActivity()));
adapter.notifyItemInserted(adapter.getFavorites().indexOf(adapter.getTemp_fav()));
}
});
snackbar.show();
}
@Override
public void onChildDraw(Canvas c, RecyclerView recyclerView, RecyclerView.ViewHolder viewHolder, float dX, float dY, int actionState, boolean isCurrentlyActive) {
super.onChildDraw(c, recyclerView, viewHolder, dX, dY, actionState, isCurrentlyActive);
View itemView = viewHolder.itemView;
// not sure why, but this method get's called for viewholder that are already swiped away
if (viewHolder.getAdapterPosition() == -1) {
// not interested in those
return;
}
if (!initiated) {
init();
}
// draw red background
background.setBounds(itemView.getRight() + (int) dX, itemView.getTop(), itemView.getRight(), itemView.getBottom());
background.draw(c);
int itemHeight = itemView.getBottom() - itemView.getTop();
int intrinsicWidth = DeletedIcon.getIntrinsicWidth();
int intrinsicHeight = DeletedIcon.getIntrinsicWidth();
int xMarkLeft = itemView.getRight() - DeletedIconMargin - intrinsicWidth;
int xMarkRight = itemView.getRight() - DeletedIconMargin;
int xMarkTop = itemView.getTop() + (itemHeight - intrinsicHeight) / 2;
int xMarkBottom = xMarkTop + intrinsicHeight;
DeletedIcon.setBounds(xMarkLeft, xMarkTop, xMarkRight, xMarkBottom);
DeletedIcon.draw(c);
}
});
recyclerView.addItemDecoration(dividerItemDecoration);
recyclerView.addItemDecoration(new RecyclerView.ItemDecoration() {
// we want to cache this and not allocate anything repeatedly in the onDraw method
Drawable background;
boolean initiated;
private void init() {
background = new ColorDrawable(Utility.setColorByTheme(R.attr.favoriteButton, getActivity()));
initiated = true;
}
@Override
public void onDraw(Canvas c, RecyclerView parent, RecyclerView.State state) {
if (!initiated) {
init();
}
// only if animation is in progress
if (parent.getItemAnimator().isRunning()) {
// some items might be animating down and some items might be animating up to close the gap left by the removed item
// this is not exclusive, both movement can be happening at the same time
// to reproduce this leave just enough items so the first one and the last one would be just a little off screen
// then remove one from the middle
// find first child with translationY > 0
// and last one with translationY < 0
// we're after a rect that is not covered in recycler-view views at this point in time
View lastViewComingDown = null;
View firstViewComingUp = null;
// this is fixed
int left = 0;
int right = parent.getWidth();
// this we need to find out
int top = 0;
int bottom = 0;
// find relevant translating views
int childCount = parent.getLayoutManager().getChildCount();
for (int i = 0; i < childCount; i++) {
View child = parent.getLayoutManager().getChildAt(i);
if (child.getTranslationY() < 0) {
// view is coming down
lastViewComingDown = child;
} else if (child.getTranslationY() > 0) {
// view is coming up
if (firstViewComingUp == null) {
firstViewComingUp = child;
}
}
}
if (lastViewComingDown != null && firstViewComingUp != null) {
// views are coming down AND going up to fill the void
top = lastViewComingDown.getBottom() + (int) lastViewComingDown.getTranslationY();
bottom = firstViewComingUp.getTop() + (int) firstViewComingUp.getTranslationY();
} else if (lastViewComingDown != null) {
// views are going down to fill the void
top = lastViewComingDown.getBottom() + (int) lastViewComingDown.getTranslationY();
bottom = lastViewComingDown.getBottom();
} else if (firstViewComingUp != null) {
// views are coming up to fill the void
top = firstViewComingUp.getTop();
bottom = firstViewComingUp.getTop() + (int) firstViewComingUp.getTranslationY();
}
background.setBounds(left, top, right, bottom);
background.draw(c);
}
super.onDraw(c, parent, state);
}
});
recyclerView.setLayoutManager(linearLayoutManager);
recyclerView.setItemAnimator(defaultItemAnimator);
touchHelper.attachToRecyclerView(recyclerView);
return view;
}
public static ArrayList<Pair<String, String>> loadDeserializedFavs(Context context) {
UtilitySharedPrefs.loadFavs(context);
ArrayList<String> SerializedFavs = new ArrayList<>(MainActivity.FAVORITES);
ArrayList<Pair<String, String>> DeserializedFavs = new ArrayList<>();
Gson gson = new Gson();
for (String element : SerializedFavs) {
SayItPair pair = gson.fromJson(element, SayItPair.class);
DeserializedFavs.add(pair);
}
Collections.sort(DeserializedFavs, new Comparator<Pair<String, String>>() {
@Override
public int compare(Pair<String, String> pair1, Pair<String, String> pair2) {
return pair1.first.compareTo(pair2.first);
}
});
return DeserializedFavs;
}
/*public void startTutorialPlayActivity(FavoritesAdapter.ViewHolder holder) {
MainActivity.showCaseFragmentView = new MaterialShowcaseView.Builder(getActivity())
.setTarget(holder.wordTextView)
.setDismissText(getString(R.string.showcase_str_btn_5))
.setContentText(getString(R.string.showcase_str_5))
.setDelay(100) // optional but starting animations immediately in onCreate can make them choppy
.singleUse(MainActivity.id_showcase_fragments) // provide a unique ID used to ensure it is only shown once
.setDismissOnTouch(true)
.withoutShape()
.show();
}*/
public boolean isVolumeMuted() {
int currentVolume = audio.getStreamVolume(AudioManager.STREAM_MUSIC);
if (currentVolume == 0) return true;
else return false;
}
} |
. The "triple combination" review system provides an opportunity for the transformation of human use experience into new Chinese drugs. However, there are some methodological and technical limitations in the assessment of human experience. Hence, the efficacy and safety evaluation methods should be established in accordance with the characteristics of Chinese herbs. This study summarized some evidence-based methodology to promote the transformation of human use experience to new Chinese drugs, mainly including the individualized pragmatic randomized controlled trial(RCT), cluster RCT, single-case RCT, single arm RCT with objective performance criteria, and partially nested RCT. As the real world data can be used to support the transformation of human experience, attention should be paid to convenient and efficient collection of data, prudent selection of design types, and adoption of appropriate ana-lysis methods to deal with confounding bias, including multi-factor regression model and propensity score. The newly proposed mixed research method can also be utilized to assess the human use experience, which is suitable for mining the theory of traditional Chinese medicine(TCM) and expert experience from different aspects. Meanwhile, considering the study design requirements and TCM cha-racteristics, this study put forward the common problems and solutions in the development of new Chinese drugs based on human use experience, including how to select the feasible outcome indicators, how to collect prescription data in the case of herb and dosage adjustment, and how to evaluate the comprehensive effectiveness of TCM from the perspective of "combination of disease and syndrome". |
The research, carried out by a team of scientists from the University of Oxford (UK) and the University of Queensland (Australia), found that archerfish were able to learn and recognise faces with a high degree of accuracy – an impressive feat, given this task requires sophisticated visual recognition capabilities.
The study is published in the journal Scientific Reports.
First author Dr Cait Newport, Marie Curie Research Fellow in the Department of Zoology at Oxford University, said: 'Being able to distinguish between a large number of human faces is a surprisingly difficult task, mainly due to the fact that all human faces share the same basic features. All faces have two eyes above a nose and mouth, therefore to tell people apart we must be able to identify subtle differences in their features. If you consider the similarities in appearance between some family members, this task can be very difficult indeed.
'It has been hypothesised that this task is so difficult that it can only be accomplished by primates, which have a large and complex brain. The fact that the human brain has a specialised region used for recognising human faces suggests that there may be something special about faces themselves. To test this idea, we wanted to determine if another animal with a smaller and simpler brain, and with no evolutionary need to recognise human faces, was still able to do so.'
">Video of Study reveals archerfish can recognize human faces
The researchers found that fish, which lack the sophisticated visual cortex of primates, are nevertheless capable of discriminating one face from up to 44 new faces. The research provides evidence that fish (vertebrates lacking a major part of the brain called the neocortex) have impressive visual discrimination abilities.
In the study, archerfish – a species of tropical fish well known for its ability to spit jets of water to knock down aerial prey – were presented with two images of human faces and trained to choose one of them using their jets. The fish were then presented with the learned face and a series of new faces and were able to correctly choose the face they had initially learned to recognise. They were able to do this task even when more obvious features, such as head shape and colour, were removed from the images.
The fish were highly accurate when selecting the correct face, reaching an average peak performance of 81% in the first experiment (picking the previously learned face from 44 new faces) and 86% in the second experiment (in which facial features such as brightness and colour were standardised).
Dr Newport said: 'Fish have a simpler brain than humans and entirely lack the section of the brain that humans use for recognising faces. Despite this, many fish demonstrate impressive visual behaviours and therefore make the perfect subjects to test whether simple brains can complete complicated tasks.
'Archerfish are a species of tropical freshwater fish that spit a jet of water from their mouth to knock down insects in branches above the water. We positioned a computer monitor that showed images of human faces above the aquariums and trained them to spit at a particular face. Once the fish had learned to recognise a face, we then showed them the same face, as well as a series of new ones.
'In all cases, the fish continued to spit at the face they had been trained to recognise, proving that they were capable of telling the two apart. Even when we did this with faces that were potentially more difficult because they were in black and white and the head shape was standardised, the fish were still capable of finding the face they were trained to recognise.
'The fact that archerfish can learn this task suggests that complicated brains are not necessarily needed to recognise human faces. Humans may have special facial recognition brain structures so that they can process a large number of faces very quickly or under a wide range of viewing conditions.'
Human facial recognition has previously been demonstrated in birds. However, unlike fish, they are now known to possess neocortex-like structures. Additionally, fish are unlikely to have evolved the ability to distinguish between human faces. |
package net.ninjacat.smooth.iterators;
import net.ninjacat.smooth.collections.Collect;
import net.ninjacat.smooth.functions.Func;
import net.ninjacat.smooth.functions.Function2;
import net.ninjacat.smooth.functions.Predicate;
import net.ninjacat.smooth.functions.Promise;
import org.junit.Test;
import java.util.*;
import static net.ninjacat.smooth.iterators.IterFixtures.*;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
public class LazyIterTest {
@Test
public void toListShouldReturnOriginalList() throws Exception {
List<String> original = Arrays.asList("1", "1", "Last");
List<String> result = LazyIter.of(original).toList();
assertThat(result, is(original));
}
@Test
public void toSetShouldReturnOnlyUniqueElements() throws Exception {
List<String> original = Arrays.asList("1", "1", "Last");
Set<String> result = LazyIter.of(original).toSet();
assertThat(result, is(Collect.setOf("1", "Last")));
}
@Test
public void mapShouldTransformValues() throws Exception {
LazyIter<Integer> iter = LazyIter.of(1, 2, 3);
Iterable<Integer> transformed = iter.map(new Func<Integer, Integer>() {
@Override
public Integer apply(Integer integer) {
return integer * 2;
}
});
Iterator<Integer> iterator = transformed.iterator();
verifyNext(iterator, 2);
verifyNext(iterator, 4);
verifyNext(iterator, 6);
verifyNoNext(iterator);
}
@Test
public void filterShouldReturnOddValues() throws Exception {
LazyIter<Integer> iter = LazyIter.of(1, 2, 3);
Iterable<Integer> transformed = iter.filter(new Predicate<Integer>() {
@Override
public boolean matches(Integer integer) {
return integer % 2 != 0;
}
});
Iterator<Integer> iterator = transformed.iterator();
verifyNext(iterator, 1);
verifyNext(iterator, 3);
verifyNoNext(iterator);
}
@Test
public void reduceShouldFoldLeft() throws Exception {
LazyIter<Integer> iter = LazyIter.of(1, 2, 3, 4);
Promise<Integer> result = iter.reduce(0, new Function2<Integer, Integer, Integer>() {
@Override
public Integer apply(Integer integer, Integer integer2) {
return integer + integer2;
}
});
assertThat(result.get(), is(10));
}
@Test
public void reduceShouldBeLazy() throws Exception {
LazyIter<Integer> iter = LazyIter.of(1, 2);
final SideEffect sideEffect = new SideEffect();
iter.reduce(0, new Function2<Integer, Integer, Integer>() {
@Override
public Integer apply(Integer integer, Integer integer2) {
sideEffect.sideEffect();
return 0;
}
});
assertThat(sideEffect.hasSideEffects(), is(false));
}
@Test
public void findShouldBeAbleToLocateElement() throws Exception {
LazyIter<Integer> iter = LazyIter.of(1, 2, 3, 4);
Promise<Integer> result = iter.find(new Predicate<Integer>() {
@Override
public boolean matches(Integer integer) {
return integer.equals(3);
}
}, -1);
assertThat(result.get(), is(3));
}
@Test
public void findShouldReturnDefaultValueIfElementNotFound() throws Exception {
LazyIter<Integer> iter = LazyIter.of(1, 2, 3, 4);
Promise<Integer> result = iter.find(new Predicate<Integer>() {
@Override
public boolean matches(Integer integer) {
return integer.equals(5);
}
}, -1);
assertThat(result.get(), is(-1));
}
@Test
public void findShouldBeLazy() throws Exception {
LazyIter<Integer> iter = LazyIter.of(1, 2);
final SideEffect sideEffect = new SideEffect();
iter.find(new Predicate<Integer>() {
@Override
public boolean matches(Integer integer) {
sideEffect.sideEffect();
return integer.equals(3);
}
}, -1);
assertThat(sideEffect.hasSideEffects(), is(false));
}
@Test
public void headShouldReturnFirstElement() throws Exception {
LazyIter<Integer> iter = LazyIter.of(Arrays.asList(1, 2));
assertThat(iter.head(), is(1));
}
@Test(expected = NoSuchElementException.class)
public void headShouldFailOnEmptyCollection() throws Exception {
LazyIter<Integer> iter = LazyIter.of(new ArrayList<Integer>());
iter.head();
}
@Test
public void tailShouldReturnAllButFirstElement() throws Exception {
LazyIter<Integer> iter = LazyIter.of(Arrays.asList(1, 2, 3));
Iterator<Integer> tail = iter.tail().iterator();
verifyNext(tail, 2);
verifyNext(tail, 3);
verifyNoNext(tail);
}
@Test
public void tailShouldReturnEmptyIterableForOneItemCollection() throws Exception {
LazyIter<Integer> iter = LazyIter.of(Arrays.asList(1));
Iterator<Integer> tail = iter.tail().iterator();
verifyNoNext(tail);
}
@Test(expected = NoSuchElementException.class)
public void tailShouldFailOnEmptyCollection() throws Exception {
LazyIter<Integer> iter = LazyIter.of(new ArrayList<Integer>());
iter.tail();
}
@Test
public void anyShouldReturnTrueIfAnyMatchingElementFound() throws Exception {
LazyIter<String> iter = LazyIter.of("First", "Middle", "Last");
Promise<Boolean> promise = iter.any(new Predicate<String>() {
@Override
public boolean matches(String o) {
return o.startsWith("M");
}
});
assertThat(promise.get(), is(true));
}
@Test
public void anyShouldReturnFalseIfNoMatchingElementFound() throws Exception {
LazyIter<String> iter = LazyIter.of("First", "Middle", "Last");
Promise<Boolean> promise = iter.any(new Predicate<String>() {
@Override
public boolean matches(String o) {
return o.startsWith("S");
}
});
assertThat(promise.get(), is(false));
}
@Test
public void anyShouldBeLazy() throws Exception {
LazyIter<String> iter = LazyIter.of("First", "Middle", "Last");
final SideEffect sideEffect = new SideEffect();
iter.any(new Predicate<String>() {
@Override
public boolean matches(String o) {
sideEffect.sideEffect();
return o.startsWith("M");
}
});
assertThat(sideEffect.hasSideEffects(), is(false));
}
@Test
public void allShouldReturnTrueIfAllElementsMatchPredicate() throws Exception {
LazyIter<String> iter = LazyIter.of("Mary", "Molly", "Maria");
Promise<Boolean> promise = iter.all(new Predicate<String>() {
@Override
public boolean matches(String o) {
return o.startsWith("M");
}
});
assertThat(promise.get(), is(true));
}
@Test
public void allShouldReturnFalseIfAtLeastOneElementDoesNotMatchPredicate() throws Exception {
LazyIter<String> iter = LazyIter.of("Mary", "Molly", "Jenny");
Promise<Boolean> promise = iter.all(new Predicate<String>() {
@Override
public boolean matches(String o) {
return o.startsWith("M");
}
});
assertThat(promise.get(), is(false));
}
@Test
public void allShouldBeLazy() throws Exception {
LazyIter<String> iter = LazyIter.of("First", "Middle", "Last");
final SideEffect sideEffect = new SideEffect();
iter.any(new Predicate<String>() {
@Override
public boolean matches(String o) {
sideEffect.sideEffect();
return o.startsWith("M");
}
});
assertThat(sideEffect.hasSideEffects(), is(false));
}
}
|
<gh_stars>0
/*******************************************************************************
* Copyright (c) 2006, 2019 Mountainminds GmbH & Co. KG and Contributors
* This program and the accompanying materials are made available under
* the terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
*
* Contributors:
* <NAME> - initial API and implementation
*
* Adapted by <NAME>
*
******************************************************************************/
package ru.capralow.dt.coverage.internal.core;
import org.eclipse.osgi.util.NLS;
/**
* Text messages for the core plug-in.
*/
public class CoreMessages extends NLS {
private static final String BUNDLE_NAME = "ru.capralow.dt.coverage.internal.core.coremessages";//$NON-NLS-1$
public static String LaunchSessionDescription_value;
public static String Launching_task;
public static String AnalyzingCoverageSession_task;
public static String ExportingSession_task;
public static String ImportingSession_task;
public static String MergingCoverageSessions_task;
public static String StatusNO_LOCAL_AGENTJAR_ERROR_message;
public static String StatusSESSION_LOAD_ERROR_message;
public static String StatusUNKOWN_LAUNCH_TYPE_ERROR_message;
public static String StatusMERGE_SESSIONS_ERROR_message;
public static String StatusEXEC_FILE_CREATE_ERROR_message;
public static String StatusEXEC_FILE_READ_ERROR_message;
public static String StatusAGENT_CONNECT_ERROR_message;
public static String StatusBUNDLE_ANALYSIS_ERROR_message;
public static String StatusEXPORT_ERROR_message;
public static String StatusAGENTSERVER_START_ERROR_message;
public static String StatusAGENTSERVER_STOP_ERROR_message;
public static String StatusEXECDATA_DUMP_ERROR_message;
public static String StatusDUMP_REQUEST_ERROR_message;
public static String StatusNO_COVERAGE_DATA_ERROR_message;
public static String ExportFormatHTML_value;
public static String ExportFormatHTMLZIP_value;
public static String ExportFormatXML_value;
public static String ExportFormatCSV_value;
public static String ExportFormatEXEC_value;
public static String Failed_to_create_injector_for_0;
static {
NLS.initializeMessages(BUNDLE_NAME, CoreMessages.class);
}
}
|
Iterative learning control under parameter uncertainty and failures This paper develops new results on the design of iterative learning control schemes using a repetitive process setting for analysis. Iterative learning control has been developed as a technique for controlling systems which are required to repeat the same operation over a finite duration known as the trial duration, or length, and information from previous executions is used to update the control input for the next one and thereby sequentially improve performance. This paper considers the design of iterative learning control laws for plants modeled by linear discrete systems with uncertain parameters and possible failures. Using a Lyapunov function approach both state and output feedback based schemes are developed. |
Ethical implications of using biobanks and population databases for genetic suicide research This article provides a review of the ethical considerations that drive research policy and practice related to the genetic study of suicide. As the tenth cause of death worldwide, suicide constitutes a substantial public health concern. Biometrical studies and populationbased molecular genetic studies provide compelling evidence of the utility of investigating genetic underpinnings of suicide. International, federal, and institutional policies regulating research are explored through the lenses of the ethical principles of autonomy, beneficence, nonmaleficence, and justice. Trapped between the Common Rule's definition of human subjects, and the Health Insurance Portability and Accountability Act's protected information, suicide decedent data occupy an ethical gray area fraught with jurisdictional, legal, and social implications. Two avenues of research, biobanks and psychological autopsies, provide tangible application for the ethical principles examining the risks to participants and their families. Additionally, studies surveying public opinion about research methods, especially broad consent, are explored. Our approach of applying the four ethical principles to policy, sample collection, data storage, and secondary research applications can also be applied to genetic research with other populations. We conclude that broad consent for secondary research, as well as nextofkin at the time of autopsy, serve to satisfy privacy and confidentiality under the ethical principle of autonomy. We recommend ongoing ethical evaluation of research policy and practice. |
Two couples hid more than $1.5 million each while collecting tens of thousands of dollars in Medicaid and other benefits as part of an elaborate scheme in Lakewood, according to criminal complaints filed against them by the FBI.
Mordechai and Rachel Sorotzkin were arrested by agents on Monday along with Yocheved and Shimon Nussbaum on federal criminal complaints, since unsealed, that accuse them of stealing government funds from a variety of federal benefit programs, including Medicaid.
The Sorotzkins and Nussbaums were charged as a result of what investigators described as a wide-ranging probe of benefits fraud in the Ocean County community that also led county prosecutors to bring state charges against two other couples, including Rabbi Zalmen Sorotzkin — Mordechai's brother — and his wife Tzipporah.
Here's what court documents say they claimed they needed, but were actually making: |
package axo.features.osm.model;
import java.io.Serializable;
import java.util.AbstractMap;
import java.util.AbstractSet;
import java.util.Collections;
import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
public abstract class OsmPrimitive implements Serializable {
private static final long serialVersionUID = -1492421473520577127L;
private final String[] kvps;
OsmPrimitive (final Map<String, String> kvps) {
if (kvps == null || kvps.isEmpty ()) {
this.kvps = null;
} else {
this.kvps = new String[kvps.size () * 2];
int i = 0;
for (final Map.Entry<String, String> entry: kvps.entrySet ()) {
this.kvps[i ++] = entry.getKey ();
this.kvps[i ++] = entry.getValue ();
}
}
}
public Map<String, String> getKvps () {
return new KvpMap (new KvpSet (kvps));
}
private final static class KvpSet extends AbstractSet<Map.Entry<String, String>> {
private final String[] kvps;
public KvpSet (final String[] kvps) {
this.kvps = kvps;
}
@Override
public Iterator<Entry<String, String>> iterator () {
if (kvps == null || kvps.length == 0) {
return Collections.<Entry<String, String>>emptyList ().iterator ();
}
return new Iterator<Map.Entry<String,String>> () {
private int i = 0;
@Override
public boolean hasNext () {
return i < kvps.length;
}
@Override
public Entry<String, String> next () {
final String key = kvps[i ++];
final String value = kvps[i ++];
return new Entry<String, String> () {
@Override
public String getKey () {
return key;
}
@Override
public String getValue () {
return value;
}
@Override
public String setValue (String value) {
throw new UnsupportedOperationException ();
}
};
}
};
}
@Override
public int size() {
return kvps == null ? 0 : kvps.length / 2;
}
}
private final static class KvpMap extends AbstractMap<String, String> {
private final KvpSet kvpSet;
public KvpMap (final KvpSet kvpSet) {
this.kvpSet = kvpSet;
}
@Override
public Set<Map.Entry<String, String>> entrySet() {
return kvpSet;
}
}
}
|
/*-
* ============LICENSE_START=======================================================
* ONAP - SO
* ================================================================================
* Copyright (C) 2017 AT&T Intellectual Property. All rights reserved.
* ================================================================================
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ============LICENSE_END=========================================================
*/
package org.onap.so.adapters.valet.beans;
import java.io.Serializable;
import java.util.Objects;
import com.fasterxml.jackson.annotation.JsonProperty;
/*
* This class represents the body of a Rollback request on a Valet Placement API call
*/
public class ValetRollbackRequest implements Serializable {
private static final long serialVersionUID = 768026109321305392L;
@JsonProperty("stack_id")
private String stackId;
@JsonProperty("suppress_rollback")
private Boolean suppressRollback = false;
@JsonProperty("error_message")
private String errorMessage;
public ValetRollbackRequest() {
super();
}
public String getStackId() {
return this.stackId;
}
public void setStackId(String stackId) {
this.stackId = stackId;
}
public Boolean getSuppressRollback() {
return this.suppressRollback;
}
public void setSuppressRollback(Boolean suppressRollback) {
this.suppressRollback = suppressRollback;
}
public String getErrorMessage() {
return this.errorMessage;
}
public void setErrorMessage(String errorMessage) {
this.errorMessage = errorMessage;
}
@Override
public int hashCode() {
return Objects.hash(stackId, suppressRollback, errorMessage);
}
@Override
public boolean equals(Object o) {
if (o == this)
return true;
if (!(o instanceof ValetRollbackRequest)) {
return false;
}
ValetRollbackRequest vrr = (ValetRollbackRequest) o;
return Objects.equals(stackId, vrr.stackId)
&& Objects.equals(suppressRollback, vrr.suppressRollback)
&& Objects.equals(errorMessage, vrr.errorMessage);
}
}
|
// Start is called when the manager starts up.
// We ensure that stale jobs are marked as failed so that we have place
// for new jobs.
func (s *Store) Start(b jobqueue.StartupBehaviour) error {
s.stmtOnce.Do(s.initStmt)
if b == jobqueue.MarkAsFailed {
ctx := context.Background()
err := internal.RunInTxWithRetry(ctx, s.db, func(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(
ctx,
`UPDATE jobqueue_jobs SET state = ?, completed = ? WHERE state = ?`,
jobqueue.Failed,
time.Now().UnixNano(),
jobqueue.Working,
)
if err != nil {
return err
}
return nil
}, func(err error) bool {
return internal.IsDeadlock(err)
})
if err != nil {
return s.wrapError(err)
}
}
return nil
} |
A Thanksgiving Service for the life of Mr. Kingsley Ernest Nelson will be held at St Mark's Anglican Church, Panton Street, Golden Square, on WEDNESDAY (April 10) at 2 pm, followed by a burial at the White Hills Monumental Cemetery.
Donations can be made in memory of Kingsley to the Peter McCallum Cancer Foundation. Envelopes will be available at the Church. |
<reponame>nathanawmk/fbthrift<gh_stars>0
#
# Autogenerated by Thrift
#
# DO NOT EDIT
# @generated
#
from thrift.py3lite.sync_client import SyncClient as _fbthrift_py3lite_SyncClient
from thrift.py3lite.async_client import AsyncClient as _fbthrift_py3lite_AsyncClient
import thrift.py3lite.exceptions as _fbthrift_py3lite_exceptions
import my.namespacing.test.hsmodule.lite_types as _my_namespacing_test_hsmodule_lite_types
class HsTestService:
class Sync(_fbthrift_py3lite_SyncClient):
def __init__(self, channel):
super().__init__(channel)
def init(
self,
int1
):
resp = self._send_request(
"HsTestService",
"init",
_my_namespacing_test_hsmodule_lite_types._fbthrift_HsTestService_init_args(
int1=int1,),
_my_namespacing_test_hsmodule_lite_types._fbthrift_HsTestService_init_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
class Async(_fbthrift_py3lite_AsyncClient):
def __init__(self):
super().__init__()
async def init(
self,
int1
):
resp = await self._send_request(
"HsTestService",
"init",
_my_namespacing_test_hsmodule_lite_types._fbthrift_HsTestService_init_args(
int1=int1,),
_my_namespacing_test_hsmodule_lite_types._fbthrift_HsTestService_init_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.