content
stringlengths
7
2.61M
#include <catch2/catch.hpp> #include <nirtree/nirtree.h> TEST_CASE("NIRTree: testPrefixConsistency") { auto tree = nirtree::NIRTree(2, 3); tree.insert(Point(-14.5, -13.0)); REQUIRE(Point(-14.5, -13.0) == tree.search(Point(-14.5, -13.0)).front()); tree.insert(Point(-14.0, -13.3)); REQUIRE(Point(-14.0, -13.3) == tree.search(Point(-14.0, -13.3)).front()); tree.insert(Point(-12.5, -14.5)); REQUIRE(Point(-12.5, -14.5) == tree.search(Point(-12.5, -14.5)).front()); tree.insert(Point(-15.0, -15.0)); REQUIRE(Point(-15.0, -15.0) == tree.search(Point(-15.0, -15.0)).front()); tree.insert(Point(-10.0, -2.0)); REQUIRE(Point(-10.0, -2.0) == tree.search(Point(-10.0, -2.0)).front()); tree.insert(Point(-11.0, -3.0)); REQUIRE(Point(-11.0, -3.0) == tree.search(Point(-11.0, -3.0)).front()); tree.insert(Point(-9.0, -3.1)); REQUIRE(Point(-9.0, -3.1) == tree.search(Point(-9.0, -3.1)).front()); tree.insert(Point(-12.5, -14.5)); REQUIRE(Point(-12.5, -14.5) == tree.search(Point(-12.5, -14.5)).front()); tree.insert(Point(-7.0, -3.7)); REQUIRE(Point(-7.0, -3.7) == tree.search(Point(-7.0, -3.7)).front()); tree.insert(Point(-10.1, -5.0)); REQUIRE(Point(-10.1, -5.0) == tree.search(Point(-10.1, -5.0)).front()); tree.insert(Point(-12.0, -3.4)); REQUIRE(Point(-12.0, -3.4) == tree.search(Point(-12.0, -3.4)).front()); }
<reponame>ZackDowning/NetInventory from exceptions import NoPhoneReportFound from net_async import multithread from openpyxl import Workbook from openpyxl.worksheet.table import Table, TableStyleInfo from datetime import datetime import re class CdpParser: """ Parses outputs of commands: 'show cdp neighbor', 'show interface switchport', and 'show mac address-table'. Attributes: phones = []\n routers_switches = []\n waps = []\n others = []\n Dictionary format within lists: { 'hostname',\n 'ip_address',\n 'model',\n 'software_version',\n 'neighbor': { (on router_switch, intfs are not in 'neighbor') 'hostname',\n 'ip_address',\n 'remote_intf', (neighbor interface)\n 'local_intf', (local to device, not on wap or phone)\n }\n 'mac_addr', (phone only)\n 'voice_vlan', (phone only) } """ def __init__(self, cdp_neighbors, switchports, mac_addrs, session): nxos = False try: _ = cdp_neighbors[0]['destination_host'] hostname_s = 'destination_host' version_s = 'software_version' mgmt_ip_s = 'management_ip' except KeyError: nxos = True hostname_s = 'dest_host' version_s = 'version' mgmt_ip_s = 'mgmt_ip' self.phones = [] self.routers_switches = [] self.waps = [] self.others = [] def phone_parse(neighbor): """Returns dictionary for CDP neighbor phone""" mgmt_ip = neighbor[mgmt_ip_s] hostname = neighbor[hostname_s].split('.')[0] if nxos: sysname = neighbor['sysname'] if sysname != '': hostname = sysname if mgmt_ip == '': mgmt_ip = neighbor['interface_ip'] l_intf = neighbor['local_port'] intf = re.findall(r'.{2}', l_intf)[0] + re.findall(r'\d.+', l_intf)[0] macreg = re.findall(r'.{4}', hostname.replace('SEP', '')) mac_address = f'{macreg[0]}.{macreg[1]}.{macreg[2]}'.lower() voice_vlan = 'None' software_version = neighbor[version_s].replace('.loads', '') platform = neighbor['platform'] for switchport in switchports: if switchport['interface'] == intf: for mac_addr in mac_addrs: if mac_addr['vlan'] == switchport['voice_vlan']: voice_vlan = mac_addr['vlan'] break break if platform.__contains__('Cisco IP Phone'): platform = neighbor['platform'].replace('Cisco IP Phone ', '') else: platform = neighbor['platform'] phone = { 'hostname': hostname, 'neighbor': { 'hostname': session.hostname, 'ip_address': session.ip_address, 'remote_intf': l_intf }, 'ip_address': mgmt_ip, 'mac_addr': mac_address, 'voice_vlan': voice_vlan, 'software_version': software_version, 'model': platform } self.phones.append(phone) def router_sw_parse(neighbor): """Returns dictionary for CDP neighbor router or switch""" mgmt_ip = neighbor[mgmt_ip_s] hostname = neighbor[hostname_s].split('.')[0] if hostname.__contains__('('): hostname = hostname.split('(')[0] if nxos: sysname = neighbor['sysname'] if sysname != '': hostname = sysname if mgmt_ip == '': mgmt_ip = neighbor['interface_ip'] software_version = neighbor[version_s] platform = neighbor['platform'] for software in software_version.split(','): if software.__contains__('Version'): software_version = software.split('Version')[1].split('REL')[0] if software_version.__contains__(':'): software_version = software_version.replace(': ', '') else: software_version = software_version.replace(' ', '') break if platform.__contains__('cisco '): platform = neighbor['platform'].replace('cisco ', '') elif platform.__contains__('Cisco '): platform = neighbor['platform'].replace('Cisco ', '') else: platform = neighbor['platform'] router_sw = { 'hostname': hostname, 'ip_address': mgmt_ip, 'remote_intf': neighbor['local_port'], 'local_intf': neighbor['remote_port'], 'software_version': software_version, 'model': platform } self.routers_switches.append(router_sw) def wap_parse(neighbor): """Returns dictionary for CDP neighbor wireless access point""" mgmt_ip = neighbor[mgmt_ip_s] hostname = neighbor[hostname_s].split('.')[0] if nxos: sysname = neighbor['sysname'] if sysname != '': hostname = sysname if mgmt_ip == '': mgmt_ip = neighbor['interface_ip'] software_version = neighbor[version_s] platform = neighbor['platform'] for software in software_version.split(','): if software.__contains__('Version'): software_version = software.split('Version')[1] if software_version.__contains__(':'): software_version = software_version.replace(': ', '') else: software_version = software_version.replace(' ', '') break if platform.__contains__('cisco '): platform = neighbor['platform'].replace('cisco ', '') elif platform.__contains__('Cisco '): platform = neighbor['platform'].replace('Cisco ', '') else: platform = neighbor['platform'] ap = { 'hostname': hostname, 'ip_address': mgmt_ip, 'model': platform, 'neighbor': { 'hostname': session.hostname, 'ip_address': session.ip_address, 'remote_intf': neighbor['local_port'] }, 'software_version': software_version } self.waps.append(ap) def other_parse(neighbor): """Returns dictionary for CDP neighbor that isn't a phone, access point, router, or switch""" mgmt_ip = neighbor[mgmt_ip_s] hostname = neighbor[hostname_s].split('.')[0] if nxos: sysname = neighbor['sysname'] if sysname != '': hostname = sysname if mgmt_ip == '': mgmt_ip = neighbor['interface_ip'] software_version = neighbor[version_s] if software_version.__contains__(','): for software in software_version.split(','): if software.__contains__('Version'): software_version = software.split('Version')[1].split('REL')[0] if software_version.__contains__(':'): software_version = software_version.replace(': ', '') else: software_version = software_version.replace(' ', '') break elif software_version.__contains__('Version'): found_1 = False for x in software_version.split(' '): if x.__contains__('Version'): found_1 = True continue if found_1: software_version = x break elif software_version.__contains__('version'): found_1 = False for x in software_version.split(' '): if x.__contains__('version'): found_1 = True continue if found_1: software_version = x break platform = neighbor['platform'] if platform.__contains__('cisco '): platform = neighbor['platform'].replace('cisco ', '') elif platform.__contains__('Cisco '): platform = neighbor['platform'].replace('Cisco ', '') else: platform = neighbor['platform'] other = { 'hostname': hostname, 'ip_address': mgmt_ip, 'neighbor': { 'hostname': session.hostname, 'ip_address': session.ip_address, 'remote_intf': neighbor['local_port'], 'local_intf': neighbor['remote_port'] }, 'software_version': software_version, 'model': platform } self.others.append(other) def parse(n): """Given TEXTFSM CDP neighbor, checks type of device and runs through corresponding parser function.""" capabilities = n['capabilities'] if n['platform'].__contains__('IP Phone') or capabilities.__contains__('Phone'): phone_parse(n) elif capabilities.__contains__('Router') and capabilities.__contains__('Source-Route-Bridge') or \ capabilities.__contains__('Switch'): router_sw_parse(n) elif capabilities.__contains__('Trans-Bridge'): wap_parse(n) else: other_parse(n) multithread(parse, cdp_neighbors) def cucm_export_parse(file): """Parses CUCM export of phones with fields 'Description', 'Device Name', and 'Directory Number 1' :returns: {'SEP000000000000': {'description', 'directory_number'}}""" phones = {} while True: try: with open(file) as phonelist_csv: for line in phonelist_csv: if not line.__contains__('Description,Device Name,Directory Number 1'): info = line.split(',') device_name = info[1] description = info[0] directory_number = info[2] phones[device_name.upper()] = { 'description': description, 'directory_number': directory_number } return phones except FileNotFoundError: raise NoPhoneReportFound('No phone report file found at provided location.') def output_to_spreadsheet(routers_switches, phones, aps, others, failed_devices, file_location): """Parses device lists and outputs to spreadsheet""" # Creates Excel workbook and worksheets wb = Workbook() routers_switches_ws = wb.active routers_switches_ws.title = 'Routers_Switches' phones_ws = wb.create_sheet('Phones') aps_ws = wb.create_sheet('APs') others_ws = wb.create_sheet('Others') failed_ws = wb.create_sheet('Failed') alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' # Checks if phones contain directory number and description from CUCM export merge if any('description' in phone for phone in phones): phone_string = 'CUCMPhone' else: phone_string = 'Phone' neighbor_count = 1 # Sets 'neighbor_count' to length of longest neighbor list in routers_switches dictionaries for rt_sw in routers_switches: if rt_sw['connection_attempt'] == 'Failed': if len(rt_sw['neighbors']) > neighbor_count: neighbor_count = len(rt_sw['neighbors']) def write_header(worksheet, device_type): """ :param device_type: 'RouterSwitch', 'Phone', 'CUCMPhone', 'WAP', 'Other', or 'Failed' :param worksheet: Device worksheet :return: int(header_length), list(header) """ header = ['Hostname', 'IP Address', 'Model', 'Software Version'] if device_type == 'RouterSwitch': header += ['Serial', 'Connection Type', 'ROMMON', 'Connection Attempt', 'Discovery Status'] for n in range(1, neighbor_count + 1): header += [f'Neighbor {n} Hostname', f'Neighbor {n} IP Address', f'Local Interface to Neighbor {n}', f'Neighbor {n} Interface'] elif device_type == 'Phone' or device_type == 'CUCMPhone': header += ['Voice VLAN', 'MAC Address', 'Switch Hostname', 'Switch IP Address', 'Switchport'] if device_type == 'CUCMPhone': header += ['Description', 'Main Directory Number'] elif device_type == 'WAP': header += ['Switch Hostname', 'Switch IP Address', 'Switchport'] elif device_type == 'Other': header += ['Neighbor Hostname', 'Neighbor IP Address', 'Local Interface to Neighbor', 'Neighbor Interface'] elif device_type == 'Failed': header = ['IP Address', 'Connection Type', 'Device Type', 'Connectivity', 'Authentication', 'Authorization', 'Discovery Status', 'Connection Exception'] worksheet.append(header) return len(header), header def write_to_sheet(device_list, worksheet, device_type): """ :param device_type: 'RouterSwitch', 'Phone', 'CUCMPhone', 'WAP', 'Other', or 'Failed' :param device_list: List of devices :param worksheet: Device worksheet :return: list(rows) """ rows = [] for device in device_list: if device_type != 'Failed': row = [device['hostname'], device['ip_address'], device['model'], device['software_version']] if device_type == 'RouterSwitch': if 'serial' in device: serial = device['serial'] connection_type = device['connection_type'] rommon = device['rommon'] else: serial = 'Unknown' connection_type = 'Unknown' rommon = 'Unknown' row += [serial, connection_type, rommon, device['connection_attempt'], device['discovery_status']] if device['connection_attempt'] == 'Failed': for neighbor in device['neighbors']: row += [neighbor['hostname'], neighbor['ip_address'], neighbor['local_intf'], neighbor['remote_intf']] if device_type == 'Phone' or device_type == 'CUCMPhone': neighbor = device['neighbor'] row += [device['voice_vlan'], device['mac_addr'], neighbor['hostname'], neighbor['ip_address'], neighbor['remote_intf']] if 'description' in device: row += [device['description'], device['directory_number']] if device_type == 'WAP' or device_type == 'Other': neighbor = device['neighbor'] row += [neighbor['hostname'], neighbor['ip_address'], neighbor['remote_intf']] if device_type == 'Other': row.append(neighbor['local_intf']) else: row = [device['ip_address'], device['connection_type'], device['device_type'], device['connectivity'], device['authentication'], device['authorization'], device['discovery_status'], device['exception']] worksheet.append(row) rows.append(row) return rows def complete_sheet(device_list, worksheet, device_type): """Completes workbook sheet""" column_num = len(device_list) + 1 header_out = write_header(worksheet, device_type) header = header_out[1] header_length = header_out[0] letter = header_length - 1 if letter > 25: column_letter = f'{alphabet[int(letter / 26) - 1]}{alphabet[letter % 26]}' else: column_letter = alphabet[letter] bottom_right_cell = f'{column_letter}{column_num}' rows = write_to_sheet(device_list, worksheet, device_type) # Creates table if there is data in table if len(device_list) != 0: table = Table(displayName=device_type, ref=f'A1:{bottom_right_cell}') style = TableStyleInfo(name='TableStyleMedium9', showFirstColumn=False, showLastColumn=False, showRowStripes=True, showColumnStripes=True) table.tableStyleInfo = style worksheet.add_table(table) # Sets column widths all_data = [header] all_data += rows column_widths = [] for row in all_data: for i, cell in enumerate(row): if len(column_widths) > i: if len(str(cell)) > column_widths[i]: column_widths[i] = len(str(cell)) else: column_widths += [len(str(cell))] for i, column_width in enumerate(column_widths): if i > 25: l1 = f'{alphabet[int(i / 26) - 1]}{alphabet[i % 26]}' else: l1 = alphabet[i] worksheet.column_dimensions[l1].width = column_width + 3 complete_sheet(routers_switches, routers_switches_ws, 'RouterSwitch') complete_sheet(phones, phones_ws, phone_string) complete_sheet(aps, aps_ws, 'WAP') complete_sheet(others, others_ws, 'Other') complete_sheet(failed_devices, failed_ws, 'Failed') # Saves workbook date_time = datetime.now().strftime('%m_%d_%Y-%H_%M_%S') wb.save(f'{file_location}/network_inventory-{date_time}-.xlsx')
Amazon's new Fire Phone does a lot you've never seen a smartphone do before. But that may not be enough to win over many new customers. For starters, there's the price. Amazon (AMZN)sells its Kindle and Kindle Fire tablets relatively cheaply in the hope of making money when customers use them to shop in its online store. The Fire Phone, however, starts at $199 with a two-year contract -- the same as the latest high-end phones from Apple (AAPL) and Samsung (SSNLF). That's too expensive for people who have been holding out on upgrading to smartphones for financial reasons -- they would be better off getting older iPhones or Android devices. The question, then, is what sets the Fire Phone apart from Apple's iPhone 5s and Samsung's Galaxy S5. First, and most obvious, is the "dynamic perspective" feature, which renders graphics in 3D using cameras mounted on the front of the phone that track users' head movements. This makes for beautiful home screen images, but it feels like little more than a gimmick at this point. The 3D-enabled maps app, for example, is nice to look at, but doesn't offer any compelling functions you wouldn't find on Google (GOOG) Maps. Independent software developers can tinker with the feature, so app makers may find some clever new ways to use 3D imaging. Game designers, for example, can build more immersive worlds that respond to players' gestures and head movements. More intriguing is the Firefly feature, which uses the phone's camera to recognize physical objects -- books, video games, food, household products and other items -- and gives you the option to buy all that stuff instantly on Amazon. You can also scan television shows and movies to get more information on them in real time. Firefly is an ingenious way for Amazon to make purchases on its site even more seamless, but whether that solves a big enough problem to convince many Apple and Samsung users to switch remains to be seen. Amazon is sweetening the deal in other ways. For example, every Fire Phone buyer gets a one-year subscription to Amazon Prime, worth $99. With Prime, users get free two-day shipping on Amazon products as well as access to streaming video and music services. There's also the Mayday feature, which gives access to customer service at any time in 15 seconds or less. That's a great way for customers to quickly solve problems and learn how to get the most out of their devices. A limitation for the device is that buyers won't be able choose their wireless provider -- the Fire Phone is, for now at least, being released exclusively with AT&T. The takeaway from all this? If you do a lot of shopping online and are in the market for a new handset, the Fire Phone is worth considering. But if you're happy with your iPhone 5s or Galaxy S5, there's no compelling reason to change.
Confronting the Challenges of Graphs and Networks Many current and emerging Department of Defense (DoD) challenges require the analysis of very large datasets containing millions, billions, or even trillions of entities. While the sheer number of entities presents a significant challenge by itself, the analysis task is further complicated by the fact that the activities, objects, or individuals of interest are commonly tightly embedded within large, noisy background data, have few or no individual distinguishing characteristics, and are changing rapidly. One way to address the challenge of identifying subtle events in large datasets is to consider not only individual entities and their attributes but also relationships between them. Considering relationships naturally leads to the analysis of graphs and networks. Simply, a graph is a mathematical representation of a set of entities (vertices) and their relationships (edges between those vertices). In this issue and other literature, the terms graph and network are often used interchangeably. One way to distinguish between those terms is to use the word graph when referring to the formal mathematical structure and the word network when referring to the specific instantiation. For example, a set of computers and the communications between them form a network. This network can be represented by a graph whose vertices represent the computers and whose edges represent the communication between computers. Graph analysis techniques can be used in a broad range of applications in which graphs and networks arise naturally (Figure 1). In the cyber security domain, graphs representing computer networks, such as the ones described in the previous paragraph and Figure 1a, can be analyzed to identify cyber threats. For intelligence, sur-New application needs, combined with the rapidly increasing sizes of datasets, are driving the development of a new field at the intersection of computer science, mathematics, signal processing, and the social sciences. Lincoln Laboratory has been pioneering research to address the challenges of using graphs and networks to exploit these vast databases. This issue of the Lincoln Laboratory Journal focuses on some of this innovative work.
Explaining vowel systems: dispersion theory vs natural selection Abstract We argue that the cross-linguistic distribution of vowel systems is best accounted for by grammar-external forces of learnability operating in tandem with cognitive constraints on phonological computation, as argued for other phonological phenomena by Blevins. On this view, the range of possible vowel systems is constrained only by what is computable and learnable; the range of attested vowel systems is a subset of this, constrained by relative learnability (Hale and Reiss 2000a, Hale and Reiss 2000b; Newmeyer 2005). A system that is easier to learn (e.g., one whose members are more dispersed in perceptual space) is predicted by our model to become more common cross-linguistically over evolutionary time than its less learnable competitors. This analysis efficiently accounts for both the typological patterns found in vowel systems and the existence of a non-trivial number of unnatural systems in the worlds languages. We compare this model with the leading forms of Dispersion Theory (notably Flemmings implementation in Optimality Theory), which seek to explain sound patterns in terms of interaction between conflicting functional constraints on maximization of perceptual contrast and minimization of articulatory effort. Dispersion Theory is shown to be unable to generate the attested range of vowel systems or predict their interesting properties, such as the centralization typically found in two-vowel systems and the quality of epenthetic segments.
We told you about the plans and simulations last May, but now a couple of bold Swiss adventurers just unveiled a prototype of Solar Impulse, a carbon fiber solar-powered airplane they plan to fly around the world in 2011. The 3000-pound aircraft will have a wingspan that's about the same as the Airbus A380, but instead of streaking around the globe at 560 mph, this one will poke along at a mere 40 mph. At that rate, it'll take four weeks for it to carry its single passenger all the way around the planet. Let's take a look at the details and more pics of the plane. Advertisement The solar cells on the wings will suck up power for a maximum seven to eight hours a day because of the angle of the sun. The pilot won't have to worry about clouds, though, because the plane will cruise at 42,000 feet. After sunset, the plane's designers hope batteries will power the plane through the night. They admit that battery technology isn't efficient enough now for this to work, but insist that it will be possible by 2011. For now, they're planning to test-fly the prototype in 2009, with a smaller wingspan of 197 feet that flies for 36 hours at 27,000 feet. Will they make it? With $87.5 million backing up the project, who knows if they'll actually fly all the way around the world, but they'll probably have enough cash to at least mount an attempt. [Times UK, via Inhabitat] Advertisement Advertisement
use crate::people::{Staff, StaffResult}; use crate::simulator::SimulationContext; pub use chrono::prelude::{DateTime, Datelike, NaiveDate, Utc}; #[derive(Debug, PartialEq)] pub enum StaffPosition { SportDirector, MainCoach, Coach, Physio, } #[derive(Debug, PartialEq)] pub enum StaffStatus { Active, ExpiredContract, } #[derive(Debug)] pub struct StaffClubContract { expired: NaiveDate, pub position: StaffPosition, pub status: StaffStatus, } impl StaffClubContract { pub fn new(expired: NaiveDate, position: StaffPosition, status: StaffStatus) -> Self { StaffClubContract { expired, position, status, } } pub fn is_expired(&self, context: &SimulationContext) -> bool { self.expired >= context.date.date() } pub fn simulate(&mut self, context: &SimulationContext) { if context.check_contract_expiration() && self.is_expired(context) { self.status = StaffStatus::ExpiredContract; } } }
package change import ( "testing" "github.com/stretchr/testify/assert" ) func TestParseAnnotations(t *testing.T) { testCases := []string{ "", "=", "test", "=test", "unit=", "unit=test", "unit=test=test", } wantKeys := []string{ "", "", "test", "", "unit", "unit", "unit", } wantValues := []string{ "", "", "", "test", "", "test", "test=test", } for i, test := range testCases { have := []string{test} got := ParseAnnotations(have) wantKey := wantKeys[i] wantValue := wantValues[i] value, ok := got[wantKey] assert.Equal(t, len(got), 1) assert.Truef(t, ok, "found key %q", wantKey) assert.Equal(t, value, wantValue, "got value") } }
Expression and regulation of mRNA coding for acidic and basic fibroblast growth factor and transforming growth factor alpha in cells derived from human skin. We investigated the regulation of mRNAs coding for acidic fibroblast growth factor (aFGF), basic fibroblast growth factor (bFGF), and transforming growth factor-alpha (TGF alpha) in cultures of human neonatal foreskin fibroblasts, keratinocytes, and melanocytes. Each cell type was propagated in an optimized serum-free medium. In rapidly growing fibroblasts, the addition of fetal bovine serum caused a modest induction of aFGF message within 2 h in conjunction with a concomitant elevation of bFGF transcripts. In these same cells, TGF alpha mRNA could not be detected in any experimental condition. In contrast, keratinocytes rapidly growing in the presence of epidermal growth factor (EGF) contained transcripts for TGF alpha that increased substantially when these cells were treated with serum. This observation suggests that factors present in serum can elevate the levels of TGF alpha mRNA beyond the levels already present in keratinocyte cultures growing in the presence of EGF. These same keratinocyte cultures had low to undetectable levels of bFGF or aFGF message, and the levels of these mRNAs were not affected by serum treatment. Treatment of keratinocytes proliferating in the presence of EGF with TGF beta for 48 h caused expression of bFGF mRNA in four of six independent cell strains. TGF beta-enhanced expression of bFGF mRNA occurred as early as 12-24 h after TGF beta exposure. TGF beta did not enhance the expression of mRNA for aFGF or TGF alpha in keratinocytes. Melanocytes failed to express detectable levels of mRNA coding for any of these growth factors in the presence of absence of TGF beta or serum.(ABSTRACT TRUNCATED AT 250 WORDS)
<gh_stars>0 #include <stdio.h> #include <string.h> #include <stdlib.h> #include <math.h> #include "tst.h" #include "dmcommon.h" FILE* fp; wavinfo* wi; dmarg* dminfo; void decodedm(FILE* fpi, wavinfo* wav, dmarg* dminfo, unsigned char* outfile); main(int argc, char** argv) { int i; unsigned char* outfile; printf("leonier's DM decoder\n"); if(argc<2) { printf("Usage: %s DM-file [wav-file]\n",argv[0]); exit(0); } else { outfile=malloc(strlen(argv[1])+5); strcpy(outfile,argv[1]); for(i=strlen(outfile); i>=0; i--) { if(*(outfile+i)=='.') { *(outfile+i)='\0'; break; } } strcat(outfile,".wav"); if(argc>=3) { if(strlen(argv[2])>strlen(outfile)) { free(outfile); outfile=malloc(strlen(argv[2])+1); if(!outfile) { printf("Memory Allocation Error!\n"); exit(0); } } strcpy(outfile, argv[2]); } } dminfo=malloc(sizeof(dmarg)); wi=malloc(sizeof(wavinfo)); if(!dminfo||!wi) { printf("Memory Allocation Error!\n"); exit(0); } fp=fopen(argv[1],"rb"); if(!fp) { printf("Error opening input file %s!\n", argv[1]); exit(0); } fread(dminfo,sizeof(dmarg),1,fp); if(dminfo->magicnum!=MAGICNUM) { printf("not a dm file!\n"); exit(0); } if(dminfo->delta<0x10000) printf("DM delta value %d\n",dminfo->delta); else { int deltal=dminfo->delta&0xffff; int deltar=(dminfo->delta-deltal)/0x10000; printf("DM delta value %d/%d\n",deltal,deltar); } if(dminfo->mode==MODE_TYPE1) printf("Channel Delta Mode\n"); if(dminfo->mode==MODE_TYPE2) printf("L+R L-R Mode\n"); fread(wi,sizeof(wavinfo),1,fp); printf("Sample rate %dHz, %d bits, %d channels\n",wi->srate,wi->bits,wi->channel); printf("Length %d:%d\n",(wi->samples/wi->srate)/60,(wi->samples/wi->srate)%60); decodedm(fp,wi,dminfo,outfile); fclose(fp); free(outfile); free(wi); free(dminfo); } void decodedm(FILE* fpi, wavinfo* wav, dmarg* dminfo, unsigned char* outfile) { FILE *fpo; unsigned char* head,* data; unsigned char samp[4]; int samp1l=0,samp1r=0; int sampl=0,sampr=0; int valuel=0,valuer=0; int delta,i; int wcount=0; unsigned int wbuf=0; unsigned int bmask=0x80000000; head=malloc(36); data=malloc(8); writeriffhead(head, 36+8+wav->datalen); writepcmfmt(head+12, wav->channel, wav->srate, wav->bits); writedatachunkhead(data, wav->channel, wav->bits, wav->samples); fpo=fopen(outfile,"wb"); if(!fpo) { printf("Open destination file failed!\n"); return; } fwrite(head, 36, 1, fpo); fwrite(data, 8, 1, fpo); free(head); free(data); memset(samp, 0, 4); fwrite(samp, 4, 1, fpo); switch(dminfo->mode) { int deltal, deltar; case MODE_TYPE0: if(dminfo->delta<65536) { deltal=deltar=dminfo->delta; } else { deltal=dminfo->delta&0xffff; deltar=(dminfo->delta-deltal)/0x10000; } for(i=0; i<wav->samples-1; i++) { if(!wcount) fread(&wbuf,sizeof(unsigned int),1,fpi); sampl=(wbuf&bmask)?1:0; bmask=bmask>>1; sampr=(wbuf&bmask)?1:0; bmask=bmask>>1; wcount+=2; if(wcount>31) { wcount=0; bmask=0x80000000; } sampl=dmp1bit(sampl,samp1l,deltal); sampr=dmp1bit(sampr,samp1r,deltar); samp[0]=sampl&0xff; samp[1]=(sampl&0xff00)>>8; samp[2]=sampr&0xff; samp[3]=(sampr&0xff00)>>8; fwrite(samp,4,1,fpo); samp1l=sampl; samp1r=sampr; } break; case MODE_TYPE1: for(i=0; i<(wav->samples-1)*2; i++) { if(!wcount) fread(&wbuf,sizeof(unsigned int),1,fpi); sampl=(wbuf&bmask)?1:0; bmask=bmask>>1; wcount++; if(wcount>31) { wcount=0; bmask=0x80000000; } sampl=dmp1bit(sampl,samp1l,dminfo->delta&0xffff); samp[0]=sampl&0xff; samp[1]=(sampl&0xff00)>>8; fwrite(samp,2,1,fpo); samp1l=sampl; } break; case MODE_TYPE2: for(i=0; i<wav->samples-1; i++) { if(!wcount) fread(&wbuf,sizeof(unsigned int),1,fpi); sampl=(wbuf&bmask)?1:0; bmask=bmask>>1; sampr=(wbuf&bmask)?1:0; bmask=bmask>>1; wcount+=2; if(wcount>31) { wcount=0; bmask=0x80000000; } sampl=dmp1bit(sampl,samp1l,dminfo->delta&0xffff); sampr=dmp1bit(sampr,samp1r,dminfo->delta&0xffff); valuel=(sampl+sampr); valuer=(sampr-sampl); samp[0]=valuel&0xff; samp[1]=(valuel&0xff00)>>8; samp[2]=valuer&0xff; samp[3]=(valuer&0xff00)>>8; fwrite(samp,4,1,fpo); samp1l=sampl; samp1r=sampr; } break; } fclose(fpo); }
Necessary and Terrible Moments MayJune 2013 Together, Kate and I lift the mattress onto the bed. Afterwards I raise my hand to my chest, hoping to coax out the tension gathering there. Kate seems like she is about to say something but stops herself; instead, she falls back, star-fished, onto the mattress. I still cant figure out how to feel about her body, dreaming, vulnerable, on the ex-marital bed. Even more jarring the thought of Kate having sex on it.
WASHINGTON — Federal Reserve officials, wrapping up a two-day policy meeting on Wednesday, are still thinking about raising the Fed’s benchmark interest rate in 2015, in part because they see strategic advantages in moving earlier than necessary. The Fed is widely expected to announce that it will continue for now to hold its benchmark rate near zero. The case for liftoff has weakened since the summer. The rest of the world is struggling and there are worrying signs of slower domestic growth. But Fed officials are unlikely to rule out an increase at their final meeting of the year, in December, in part because they are still waiting to see whether economic growth weakened in recent months, and in part because they want to start raising rates early so they can raise them slowly. “An advantage to beginning a little bit earlier is that we might have a more gradual path of rate increases,” the Fed’s chairwoman, Janet L. Yellen, told Congress this summer. She explained that moving slowly was prudent, while moving more quickly might be disruptive.
Photoacoustic Spectra of CdInGaS4 with a DC Electric Field Photoacoustic (PA) spectroscopy is used to study the heat generated by nonradiative processes of CdInGaS4 with an external dc electric field transverse to the light path in order to study the correlation between carrier diffusion effects and nonradiative processes in the wavelength range of 310 to 650 nm. Maxima are observed in PA spectra at 2.7 and 2.3 eV, the amplitudes of which values are proportional to the square of the applied voltage. The peak at 2.7 eV shifted toward higher energy when the modulation frequency increased. That at 2.3 eV did not shift. Therefore, there is a possibility that the peaks at 2.7 and 2.3 eV are related to the electron diffusion length and the impurity or deep levels of CdInGaS4, respectively. PA phase spectra showed minima at 2.4 eV, which shifted toward lower energy when the dc electric field strength increased.
import java.util.ArrayList; import java.util.Scanner; public class Main { public static void main(String[] args) { Scanner sc = new Scanner(System.in); int a = sc.nextInt(); int b = sc.nextInt(); int c = sc.nextInt(); int d = sc.nextInt(); int e = sc.nextInt(); Dishes dishes = new Dishes(a, b, c, d, e); System.out.println(dishes.getTotalTime()); } } class Dishes { ArrayList<Integer> times = new ArrayList<Integer>(); ArrayList<Integer> losses = new ArrayList<>(); Dishes(int... times) { for (int t : times) { this.times.add(t); int loss = (10 - t % 10) % 10; losses.add(loss); } } int getTotalTime() { int totalTime = 0; int maxLoss = 0; for (int time : times) { totalTime += time; } for (int loss : losses) { totalTime += loss; if (loss > maxLoss) { maxLoss = loss; } } totalTime -= maxLoss; return totalTime; } }
Dodgers catcher A.J. Ellis offered his analysis throughout the World Series. Ellis, 32, recently completed his second full season as a starter for the Dodgers by batting .333 in a National League division series against the Atlanta Braves and .316 in the NL Championship Series against the St. Louis Cardinals. Ellis is familiar with the Boston Red Sox, a team the Dodgers faced during the regular season.My favorite thing about our schedule is the opportunity to see new cities and play in new ballparks. This year the Dodgers had Baltimore, Toronto and New York on our road slate. While the experience of playing in new parks is always special, I have yet to find a stadium to replace the emotion I felt playing in Fenway Park. I can still remember the contrasting smells of old beer and new hot dogs as I entered the players' entrance behind the right-field gate. Entering the first aisle I saw, I made my way down toward the field. My eyes were immediately attracted to the Green Monster standing tall in left field. It was early enough that I hopped the fence and made my way across the field to the visiting clubhouse, soaking in the atmosphere. Looking back to right field, I saw the single red seat signifying the landing spot of Ted Williams 'final home run in his last at-bat at Fenway. As my teammates arrived we took turns having our picture taken in front of the Monster, traveled above the wall to get a view from the newly created Green Monster seats, and played home run derby in batting practice trying to launch balls over the 37-foot wall. We acted, looked and played like tourists in the series, as the Red Sox swept the three-game set. Nonetheless, the chance to play in front of the passionate Red Sox fans and listen to them sing "Sweet Caroline" in unison was a firsthand example of playing in front of fans who truly believe they are part of the team and help determine the outcome of the game. Even with two previous championships in the last decade, the Red Sox can now reward their fans in person. It hasn't happened since 1918 and the city wants to celebrate to make up for lost time. Shane Victorino had been held hitless through the first three games of the Series and was then forced to sit out the fourth and fifth games with lower-back tightness. The rest healed his back and cleared his head. As in the American League Championship Series, Victorino had the big hit in Game 6. It was his grand slam that knocked out the Tigers and his bases-loaded double that plated the first three runs of the game for the Red Sox. Victorino added a run-scoring single the next inning. Michael Wacha pitched around traffic the first two innings, but the Red Sox got to him in the third. With two on and two out Wacha tried to get a fastball inside past Jonny Gomes. The ball got away and hit Gomes on the elbow. Victorino found a fastball count and hammered a ball high off the Green Monster, clearing the bases while simultaneously putting doubt in the Cardinals' heads and confidence in a Red Sox championship. John Lackey lived up to his status as a big-game pitcher. The right-hander concluded his outstanding Series with 62/3 solid innings, scattering nine hits and striking out five. Lackey and the entire Boston staff did a phenomenal job not allowing baseball's best run-producing team to get the clutch hit. The plan to pitch around a hot hitter can backfire when you aren't able to get the hitters behind him out. The Cardinals walked David Ortiz four times, three of them intentionally, but the guys behind him drove in the runs Ortiz wasn't given a chance to. Allen Craig can flat-out hit. "What if?" questions are a favorite pastime of baseball and baseball fans. Similarly to Hanley Ramirez in the National League Championship Series, what if Craig had been healthy enough to play full time in the World Series? Watching this game felt eerily similar to the Dodgers' NLCS Game 6. We went on the road with our best pitcher on the mound confident we were headed to a Game 7. Instead the Cardinals scored early and often, energizing their crowd, and took all the wind out of our sails. In my second attempt providing analysis for the World Series, I appreciate the forum and freedom the Los Angeles Times has given me to share my insights. I also must thank the countless baseball writers who have reached out and commented on the articles. I am continually impressed at the job done by these gifted people and have new understanding of what an unknown outcome with a deadline looming does to your stomach. I finally want to thank the fans who have sent so much positive feedback through Twitter and other avenues. To Dodgers fans who have done nothing but embrace me in my time in Los Angeles, I am so grateful and I love being your catcher.
Accurate Dynamic Model of DFB lasers Efficient and reliable models for semiconductor distributed feedback (DFB) lasers design and optimization are highly desirable. An innovative dynamic model for accurate simulations of DFB lasers is proposed in this paper. The developed algorithm is based on a system of coupled partial differential equations which is solved by the finite element method (FEM). No approximation is assumed and obtained results are affected only by the numerical error of the FEM solver. The code has been validated on the large-signal response of a typical DFB device. Keywords-laser; semiconductor laser; DFB laser; finite element method
The life story of Lou Pearlman, the former multiplatinum boy band manager-turned-convicted felon, is getting the movie treatment. John Stamos, Desmond Child and Andreas Carlsson have acquired the rights to Tyler Gray’s 2008 book The Hit Charade: Lou Pearlman, Boy Bands, and the Biggest Ponzi Scheme in U.S. History and will serve as producers of the film, which is in the early development stage. This comes just weeks after news spread that a limited series about Pearlman was in the works. It will be based on an article in The New Yorker to which Magnet Management and Condé Nast has secured. The article, headlined, “We Live in the Pop-Culture World That Lou Pearlman Created,” details Pearlman’s rise from a blimp-business owner to one of the music industry’s most successful moguls and then to his eventual downfall. Pearlman, who is responsible for developing such boy band sensations as Backstreet Boys and *NSYNC, died last month of undisclosed causes while serving a 25-year prison term for running a complex Ponzi scheme for nearly two decades.
/** * Performs the line following challenge specific logic */ static void linePeriodic(void) { uint8_t reflectance = reflectance_read(); uint8_t outer_left_reflectance = !((reflectance & 8) >> 3); uint8_t inner_left_reflectance = !((reflectance & 4) >> 2); uint8_t inner_right_reflectance = !((reflectance & 2) >> 1); uint8_t outer_right_reflectance = !(reflectance & 1); if (!speed) { if (outer_left_reflectance == outer_right_reflectance && inner_left_reflectance == inner_right_reflectance){ accelerateToSpeed(100, 100, 2); } else if (outer_left_reflectance && ~outer_right_reflectance) { accelerateToSpeed(0,0,2); accelerateToSpeed(-30,50, 5); delay(100); } else if (outer_right_reflectance && ~outer_left_reflectance) { accelerateToSpeed(0,0,2); accelerateToSpeed(50,-30, 5); delay(100); } else if (inner_left_reflectance && ~inner_right_reflectance) { accelerateToSpeed(50,100, 2); } else if (inner_right_reflectance && ~inner_left_reflectance) { accelerateToSpeed(100,50, 2); } } else { if (outer_left_reflectance == outer_right_reflectance && inner_left_reflectance == inner_right_reflectance){ accelerateToSpeed(400, 400, 5); } else if (outer_left_reflectance && ~outer_right_reflectance) { accelerateToSpeed(-60,150, 1); } else if (outer_right_reflectance && ~outer_left_reflectance) { accelerateToSpeed(150,-60, 1); } else if (inner_left_reflectance && ~inner_right_reflectance) { accelerateToSpeed(200,250, 1); } else if (inner_right_reflectance && ~inner_left_reflectance) { accelerateToSpeed(250,200, 1); } } }
Comparison of mechanical and superconducting properties of titanium-added Nb3Sn composite wire with those of non-added ones The mechanical and superconducting properties of Ti-added Nb3Sn superconducting composite wire were studied experimentally and the results were compared with those of non-added ones. When titanium was added, the growth rate of the Nb3Sn compound was enhanced. The grain size of the compound was reduced when the annealing temperature was high but not when it was low. The fracture behaviour of Ti-added specimens was essentially the same as that of non-added ones: when the amount of Nb3Sn was small, the specimens showed large apparent plastic deformation; when it was large, the specimens showed brittle fracture without apparent plastic deformation, but the strength of the Nb3Sn compound was different between the Ti-added and non-added specimens. The strength of the Ti-added compound was lower than that of non-added ones. Although the strength of the Nb3Sn was different, the strength of both Ti-added and non-added compounds was nearly proportional to the inverse root grain size. The global pinning force of both compounds was proportional to the product of the inverse grain size and f(H,Hc2) which is a function of the applied magnetic field and upper critical magnetic field. The global pinning force was raised by the addition of titanium especially at high magnetic field due to an increase in the upper critical magnetic field.
The role of 99Tcm-tetrofosmin in the evaluation of thyroid nodules Various radionuclides, including 67Ga, 201Tl and 99Tcm-sestamibi, have been used to differentiate benign from malignant thyroid nodules. 99Tcm-tetrofosmin, a lipophilic cationic radiotracer, and 99Tcm-sestamibi have also been reported to accumulate in thyroid tumours. In this study, we evaluated the role of 99Tcm-tetrofosmin in the differentiation of malignant from benign thyroid nodules. Seventy-nine patients with solitary non-functioning thyroid nodules were included in the study. Fine-needle aspiration biopsy was performed in all patients. Sixty patients were subsequently operated on and 19 patients refused surgery. After the injection of 370 MBq 99Tcm-tetrofosmin, static images at 5, 30, 60, 120 and 180 min were acquired. Both visual and semi-quantitative analysis was performed. On visual interpretation, the nodules with late retention were classified as positive for malignancy and nodules without late retention were classified as negative for malignancy. In the semi-quantitative analysis, regions of interests were drawn over the nodule and contralateral normal thyroid tissue. The average number of counts was recorded and tumour-to-normal thyroid tissue ratios calculated. Post-operative histology revealed 19 malignant and 41 benign nodules. Of the benign nodules, adenomas behaved similarly to the malignant nodules with late retention of tracer, while adenomatous nodules revealed no late retention on delayed images and could be differentiated from malignant tumours. In the semi-quantitative analysis, there was a significant difference in tumour-to-normal tissue ratios for adenomatous nodules and malignant tumours as well as adenomas. We conclude that it is not possible to differentiate between malignant and benign thyroid nodules with 99Tcm-tetrofosmin. However, 99Tcm-tetrofosmin scintigraphy is helpful in selecting nodules that can be cured by surgical intervention.
<filename>cloudnet-lib/src/main/java/de/dytanic/cloudnet/lib/user/User.java /* * Copyright (c) <NAME> 2017 */ package de.dytanic.cloudnet.lib.user; import de.dytanic.cloudnet.lib.interfaces.Nameable; import lombok.AllArgsConstructor; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.ToString; import java.util.Collection; import java.util.Map; import java.util.UUID; /** * Created by Tareko on 11.09.2017. */ @Getter @EqualsAndHashCode @ToString @AllArgsConstructor public class User implements Nameable { protected String name; protected UUID uniqueId; protected String apiToken; protected String hashedPassword; protected Collection<String> permissions; protected Map<String, Object> metaData; public SimpledUser toSimple() { return new SimpledUser(name, apiToken); } public boolean hasPermission(String permission) { return permissions.contains("*") || permissions.contains(permission); } }
from ..const.const import ( MONTH_TO_NUMBER, SENSOR_LOCATIONS_TO_URL, _LOGGER, ) from datetime import datetime, date from bs4 import BeautifulSoup import urllib.request import urllib.error class VenloAfval(object): def get_date_from_afvaltype(self, tableRows, afvaltype): try: for row in tableRows: garbageDate = row.find("td") garbageType = row.find("span") if garbageDate and garbageType: garbageDate = row.find("td").string garbageType = row.find("span").string #Does the afvaltype match... if garbageType == afvaltype: day = garbageDate.split()[1] month = MONTH_TO_NUMBER[garbageDate.split()[2]] year = str( datetime.today().year if datetime.today().month <= int(month) else datetime.today().year + 1 ) garbageDate = year + "-" + month + "-" + day if datetime.strptime(garbageDate, '%Y-%m-%d').date() >= date.today(): return garbageDate # if nothing was found return "" except Exception as exc: _LOGGER.error("Error occurred while splitting data: %r", exc) return "" def get_data(self, city, postcode, street_number): _LOGGER.debug("Updating Waste collection dates") try: url = SENSOR_LOCATIONS_TO_URL["venlo"][0].format( postcode, street_number ) req = urllib.request.Request(url=url) f = urllib.request.urlopen(req) html = f.read().decode("utf-8") soup = BeautifulSoup(html, "html.parser") html = soup.find("div", {"class": "trash-removal-calendar"}) tableRows = html.findAll("tr") # Place all possible values in the dictionary even if they are not necessary waste_dict = {} # GFT waste_dict["gft"] = self.get_date_from_afvaltype(tableRows, "GFT") # Restafval waste_dict["restafval"] = self.get_date_from_afvaltype(tableRows, "Restafval/PMD") # PMD waste_dict["pbd"] = self.get_date_from_afvaltype(tableRows, "Restafval/PMD") return waste_dict except urllib.error.URLError as exc: _LOGGER.error("Error occurred while fetching data: %r", exc.reason) return False
<filename>betaas-plugins/betaas-adaptation-plugin/src/main/java/eu/betaas/adaptation/plugin/api/IAdaptorPlugin.java<gh_stars>0 /** * Copyright 2014-2015 Converge ICT * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package eu.betaas.adaptation.plugin.api; import java.util.HashMap; import java.util.List; import java.util.Vector; /** * This interface defines the service exposed through OSGI by the ETSI Plugin for TA * * @author Intecs */ public interface IAdaptorPlugin { /** * @param listener When register() is called, this is the listener that will * be notified when the corresponding data will be available. * Once the unregister() is called, the setListener should be * called with a null argument. */ public void setListener(IAdaptorListener listener); /** * @return the set of attributes of the available ETSI devices */ public Vector<HashMap<String, String>> discover(); /** * Register for a specific sensor device. The TA will be notified about * new incoming data from that sensor * @param sensorID unique identifier of the sensor * @return true iff the registration is completed successfully */ public boolean register(String sensorID); /** * Unregister for a specific sensor device. TA will stop receiving notifications. * @param sensorID unique identifier of the sensor * @return true if the unregistration is completed successfully */ public boolean unregister(String sensorID); /** * @param ThingId as the SensorID created. * @return the current measurement of the sensor/thing */ public String getData(String sensorID); /** * @param ThingId as the SensorID created. * @return the current measurement of the sensor/thing */ public String setData(String sensorID, String value); }
/** * Fetch the remaining timing data from the client which were not sent. */ public String fetchPerformanceData() throws JSONException, TimeoutException, CommunicationException, InterruptedException { ClientPerformanceExtensionConnection conn = currentConnection; if (conn == null || !conn.isOpen()) { conn = connector.waitForNextConnection(connectionTimeout); } final JSONObject message = new JSONObject(); message.put("action", "GET_DATA"); message.put("storageTimeout", storageTimeout); return conn.sendRequest(message, messageTimeout).getString("data"); }
def _RunAndGetFileList(self, args, options, file_list, cwd=None): cwd = cwd or self.checkout_path scm.SVN.RunAndGetFileList( options.verbose, args + ['--ignore-externals'], cwd=cwd, file_list=file_list)
<filename>limiter_test.go package climit import ( "testing" ) func TestNewLimiter(t *testing.T) { l := NewLimiter(0) if l.Cap() != 1 { t.Errorf("got: %v, want: %v\n", l.Cap(), 1) } } func TestTryGet(t *testing.T) { l := NewLimiter(1) if !l.TryGet() { t.Errorf("Expect obtain slot, but didn't") } if l.TryGet() { t.Errorf("Expect fail to obtain slot, but got a lot") } } func Test(t *testing.T) { const limit = 5 l := NewLimiter(limit) l.Get() for i := 1; i <= limit; i++ { if l.TryGet() { if i >= limit { t.Errorf("Shouldn't get a slot, but did") } } else { if i < limit { t.Errorf("Should get a slot, but didn't") } } } for i := 0; i < limit; i++ { l.Done() } l.Wait() }
<reponame>thinlizzy/die-tk #include "Label.h" #include <memory> namespace tk { Label::Label(Surface & parent, ControlParams const & controlParams): CustomControl(parent), labelImpl(std::make_shared<LabelImpl>(controlParams)) { redimAutosize(); addToParent(labelImpl); } LabelImpl & Label::controlImpl() { return *labelImpl; } LabelImpl const & Label::controlImpl() const { return *labelImpl; } void Label::setText(NativeString const & text) { labelImpl->text = text; invalidateIfVisible(); if( redimAutosize() ) { invalidateIfVisible(); } } NativeString Label::text() const { return labelImpl->text; } void Label::setColor(RGBColor color) { labelImpl->color = color; invalidateIfVisible(); } RGBColor Label::color() const { return labelImpl->color; } void Label::setBackgroundColor(optional<RGBColor> color) { labelImpl->backgroundColor = color; invalidateIfVisible(); } optional<RGBColor> Label::backGroundColor() const { return labelImpl->backgroundColor; } void Label::setAutosize(bool enable) { if( enable == autosize() ) return; labelImpl->autosize = enable; if( redimAutosize() ) { invalidateIfVisible(); } } bool Label::autosize() const { return labelImpl->autosize; } bool Label::redimAutosize() { if( ! autosize() ) return false; labelImpl->rect = labelImpl->rect.resize(parent.canvas().measureText(labelImpl->text)); return true; } }
<reponame>altugbakan/accelerated-cpp-solutions<filename>Chapter 15/Pic.cpp #include <iostream> #include <vector> #include <string> #include "Pic.h" using std::ostream; using std::endl; using std::vector; using std::string; // code is taken from §15.2.1/279 Picture frame(const Picture& pic, char bordertb, char borderlr, char borderc) { return new Frame_Pic(pic.p, bordertb, borderlr, borderc); } // code is taken from §15.2.1/280 Picture hcat(const Picture& l, const Picture& r) { return new HCat_Pic(l.p, r.p); } // code is taken from §15.2.1/280 Picture vcat(const Picture& t, const Picture& b) { return new VCat_Pic(t.p, b.p); } // code is taken from §15.2.1/280 Picture::Picture(const vector<string>& v): p(new String_Pic(v)) { } // code is taken from §15.2.1/280 ostream& operator<<(ostream& os, const Picture& picture) { const Pic_base::ht_sz ht = picture.p->height(); for (Pic_base::ht_sz i = 0; i != ht; ++i) { picture.p->display(os, i, false); os << endl; } return os; } // code is taken from §15.2.2/281 Pic_base::wd_sz String_Pic::width() const { Pic_base::wd_sz n = 0; for (Pic_base::ht_sz i = 0; i != data.size(); ++i) n = std::max(n, data[i].size()); return n; } // code is taken from §15.2.2/282 void String_Pic::display(ostream& os, ht_sz row, bool do_pad) const { wd_sz start = 0; // write the row if we're still in range if (row < height()) { os << data[row]; start = data[row].size(); } // pad the output if necessary if (do_pad) pad(os, start, width()); } // code is taken from §15.2.4/284 void VCat_Pic::display(ostream& os, ht_sz row, bool do_pad) const { wd_sz w = 0; if (row < top->height()) { // we are in the top subpicture top->display(os, row, do_pad); w = top->width(); } else if (row < height()) { // we are in the bottom subpicture bottom->display(os, row - top->height(), do_pad); w = bottom->width(); } if (do_pad) pad(os, w, width()); } // Q5 void HCat_Pic::display(ostream& os, ht_sz row, bool do_pad) const { if (left->height() >= right->height()) { // find the difference ht_sz diff = left->height() - right->height(); // display left since it is larger left->display(os, row, do_pad); // display right if placement is correct if (row >= diff / 2) right->display(os, row - diff / 2, do_pad); } else { // find the difference ht_sz diff = right->height() - left->height(); // display left if placement is correct, // or display the padding if (row >= diff / 2) left->display(os, row - diff / 2, true); else left->display(os, left->height(), true); // display right since it is larger right->display(os, row, do_pad); } } void Frame_Pic::display(ostream& os, ht_sz row, bool do_pad) const { if (row >= height()) { // out of range if (do_pad) pad(os, 0, width()); } else { if (row == 0 || row == height() - 1) { // top or bottom row os << c << string(width() - 2, tb) << c; // Q2 } else if (row == 1 || row == height() - 2) { // second from top or bottom row os << lr; // Q2 pad(os, 1, width() - 1); os << lr; // Q2 } else { // interior row os << lr << " "; // Q2 p->display(os, row - 2, true); os << " " << lr; // Q2 } } } // Q4 void Frame_Pic::reframe(const char bordertb, const char borderlr, const char borderc) { tb = bordertb; lr = borderlr; c = borderc; p->reframe(bordertb, borderlr, borderc); } // Q4 void HCat_Pic::reframe(const char bordertb, const char borderlr, const char borderc) { left->reframe(bordertb, borderlr, borderc); right->reframe(bordertb, borderlr, borderc); } // Q4 void VCat_Pic::reframe(const char bordertb, const char borderlr, const char borderc) { top->reframe(bordertb, borderlr, borderc); bottom->reframe(bordertb, borderlr, borderc); }
/** * Created by runqi.wei on 2018/1/8. */ public class MessageManager<H> { protected String indexPrefix; protected OnMessageQueriedListener<H> onMessageQueriedListener; protected HashMap<String, ArrayList<H>> registrationMap; protected HashMap<String, Pair<String, H>> indexMap; protected int autoIncreaseIndex; public MessageManager(@NonNull String indexPrefix, OnMessageQueriedListener<H> onMessageQueriedListener) { autoIncreaseIndex = 0; registrationMap = new HashMap<>(); indexMap = new HashMap<>(); this.indexPrefix = indexPrefix; this.onMessageQueriedListener = onMessageQueriedListener; } public void clear() { if (registrationMap != null) { registrationMap.clear(); } if (indexMap != null) { indexMap.clear(); } autoIncreaseIndex = 0; } /** * For Test * @return */ public HashMap<String, ArrayList<H>> getRegistrationMap() { return registrationMap; } /** * For Test * @return */ public HashMap<String, Pair<String, H>> getIndexMap() { return indexMap; } public void setOnMessageQueriedListener(OnMessageQueriedListener<H> onMessageQueriedListener) { this.onMessageQueriedListener = onMessageQueriedListener; } protected String getNewIndex() { String index = ((indexPrefix == null) ? "" : indexPrefix) + autoIncreaseIndex; autoIncreaseIndex++; return index; } public String registerHandler(@NonNull String key, @NonNull H handler) { if (registrationMap == null) { registrationMap = new HashMap<>(); } if (indexMap == null) { indexMap = new HashMap<>(); } ArrayList<H> list = registrationMap.get(key); if (list == null) { list = new ArrayList<>(); registrationMap.put(key, list); } String idx = null; if (!list.contains(handler)) { // 如果没有注册,添加新的 list.add(handler); idx = getNewIndex(); indexMap.put(idx, new Pair<>(key, handler)); } else { // 如果有注册, 返回上次注册的 id Pair<String, H> pair = new Pair<>(key, handler); for (Map.Entry<String, Pair<String, H>> entry : indexMap.entrySet()) { if (pair.equals(entry.getValue())) { idx = entry.getKey(); break; } } } return idx; } public void removeHandler(@NonNull String id) { if (indexMap == null || indexMap.isEmpty()) { return; } Pair<String, H> pair = indexMap.get(id); if (pair == null || pair.first == null || pair.second == null) { return; } removeHandler(pair.first, pair.second); } public void removeHandler(@NonNull String key, @NonNull H handler) { if (registrationMap == null || registrationMap.isEmpty()) { return; } ArrayList<H> list = registrationMap.get(key); if (list == null || list.isEmpty() || !list.contains(handler)) { return; } list.remove(handler); removeId(key, handler); } public void removeHandler(@NonNull H handler) { if (registrationMap == null || registrationMap.isEmpty()) { return; } for (Map.Entry<String, ArrayList<H>> entry : registrationMap.entrySet()) { ArrayList<H> list = entry.getValue(); if (list == null || list.isEmpty() || !list.contains(handler)) { continue; } list.remove(handler); removeId(entry.getKey(), handler); } } protected void removeId(@NonNull String key, @NonNull H handler){ if (indexMap == null || indexMap.isEmpty()) { return; } String idx = null; Pair<String, H> pair = new Pair<>(key, handler); for (Map.Entry<String, Pair<String, H>> entry : indexMap.entrySet()) { if (pair.equals(entry.getValue())) { idx = entry.getKey(); break; } } indexMap.remove(idx); } public ArrayList<Object> queryMessage(String key, Object parameter){ ArrayList<Object> result = new ArrayList<>(); ArrayList<H> handlerArrayList = registrationMap.get(key); if (handlerArrayList != null && !handlerArrayList.isEmpty()) { for (H handler : handlerArrayList) { if (handler == null) { continue; } Object res = null; if (onMessageQueriedListener != null) { res = onMessageQueriedListener.onMessageQueried(key, parameter, handler); } if (res != null) { result.add(res); } } } return result; } public interface OnMessageQueriedListener<HA>{ Object onMessageQueried(String key, Object parameter, HA handler); } }
Olefins (CnH2n), such as ethylene (C2H4), are produced by pyrolysis of the hydrocarbons (naphtha, natural gas, ethane, or the like). In particular, to produce olefinic hydrocarbons (ethylene, propylene, or the like), the hydrocarbons are supplied with steam into a tube provided in a reacting furnace and heat is supplied to the hydrocarbons from outer surface of the tube so that a pyrolysis reaction of the hydrocarbons can be generated in the tube. The tube is made of a high-Cr and high-Ni alloy as represented by a 25% Cr-25% Ni alloy, a 25% Cr-38% Ni alloy or the like, or is made of a stainless steel as represented by AISI 304 type. In the above described pyrolysis reaction, it is necessary to transfer heat (which is supplied to the outer surface of the tube) from the outer surface of the tube to the inner surface of the tube efficiently, so as to prevent unreacted hydrocarbons from discharging to outside of the furnace. That is, the tube needs superior “heat exchange characteristic”. The heat exchange characteristic can be evaluated by measuring the average temperature of fluid at the outlet of the tube. When the tube has the superior heat exchange characteristic, the average temperature of the fluid at the outlet of the tube is increased. A mixed gas composed of hydrocarbons and steam is supplied into a steel tube from an inlet of the steel tube with low pressure and high speed. Unreacted mixed gas and newly formed gas due to the reaction move a long distance along the ribs provided on the inner surface of the tube. Therefore, the gas flow is interrupted depending on the shape of the ribs. In this case, a fluid in the central portion of the tube and a fluid in the bottom part of the rib are separated. Thus, a mass transfer (reaction) between the central portion of the tube and the bottom part of the rib becomes insufficient. In such cases, since the reaction products are accumulated in the bottom parts of the ribs, an over pyrolysis reaction occurs. On the contrary, the reaction becomes insufficient in the center portion of the tube, leading to the yield loss. In order to solve such problems, the tube should have superior “pyrolysis reaction characteristics”. The reaction characteristics can be evaluated by the deviation of temperatures at outlet of the tube, since the pyrolysis reaction characteristics depend on the mass flow in the tube. Patent Document 1 (JP 58-173022A) discloses a production process of a tube with intratubular spiral ribs. In the above production process, the tube with intratubular spiral ribs is produced by torsional work from a metal tube with intratubular straight ribs which is produced by hot extrusion processing. Patent Document 2 (JP 01-127896A) discloses a tube material for a heat exchanger with wavy shape on the inner surface of the cross section, in which the radius of convex curvature of the crests, RF, and the radius of concave curvature of the valleys, RS, satisfy a relationship of RS≧RF. Furthermore, Patent Document 3 (JP 08-82494A) discloses a tube for heat exchange. The tube is provided with fins which are formed on the inner surface of the tube at given pitches and extending to directions which intersect with the tube axis. In particular, the fins are arranged in one or a plurality of areas on the inner surface along the tube axis direction or in an entire area on the inner surface. Patent Document 4 (JP2005-533917A) discloses a tube with intratubular spiral fins, which is used for the pyrolysis reaction process of hydrocarbons under the existing steam. The tubes with intratubular ribs or fins disclosed in the above described Patent Documents, however, cannot balance the “heat exchange characteristic” and the “pyrolysis reaction characteristic”, and cannot improve both of the characteristics sufficiently. Thus, heat exchangeable tubes with intratubular ribs, in which both of the characteristics described above are further improved, were desired. Meanwhile, regarding the usage conditions of metal tubes used in the pyrolysis reaction in a cracking furnace of an ethylene plant or the like, the temperature tends to become higher due to yield improvement, with a recent increase of resinoid demand. In such metal tubes used for pyrolysis reaction at higher temperatures, carbon is unavoidably formed due to the pyrolysis reaction. Then, the carbon is attached to the inner surface of the tube and deposits on the inner surface. This phenomenon is called “coking”. When coking occurs, the pyrolysis reaction efficiency decreases since the deposited carbon prevents transferring the heat supplied from the outer surface of the tube to the mixed gas. Furthermore, the steel tube becomes brittle since the accumulated carbon diffuses inside the steel tube which causes carburization of the steel tube. Thus, the damage of the steel tube is caused from the carburization portion. Moreover, when the carbon, which is flaked from the deposited layer, accumulates in the steel tube, the gas flow is interrupted and the pyrolysis reaction is inhibited as well and causes the above described damage. In addition, when the carbon deposits in large amounts, serious accidents such as an explosion or the like may take place. Therefore, a periodically flowing air and steam into the tube so as to oxidize and remove the precipitation carbon i.e. decoking is carried out in a practice. However, the decoking work leads to big problems such as a shutdown during the decoking work and an increment of man-hour or the like. The inner surface of the metal tube for pyrolysis reaction is exposed to the carburizing gas atmosphere containing hydro carbon gas, CO gas, or the like. Therefore, a heat resistant material having resistances to carburization and coking in the carburizing gas atmosphere is required as a tube material. Patent Document 5 (JP 2005-48284A) discloses a stainless steel tube that consists of a mother material including 20 to 35 mass % Cr and the tube has resistances to carburization and coking. The disclosed tube has a surface layer comprised of a Cr-depleted layer that includes more than or equal to 10 mass % Cr and which thickness is less than or equal to 20 μm. Although it is disclosed that protrusion, fin or the like may be provided on the inner surface of the tube in the Patent Document 5, the specific configurations are not disclosed at all. [Patent Document 1] JP 58-173022A [Patent Document 2] JP 01-127896A [Patent Document 3] JP 08-82494A [Patent Document 4] JP2005-533917A [Patent Document 5] JP 2005-48284A
Where Do You Want to Go Today? Escalating Privileges by Pathname Manipulation We analyze filename-based privilege escalation attacks, where an attacker creates filesystem links, thereby tricking a victim program into opening unintended files. We develop primitives for a POSIX environment, providing assurance that files in safe directories (such as /etc/passwd) cannot be opened by looking up a file by an unsafe pathname (such as a pathname that resolves through a symbolic link in a world-writable directory). In todays UNIX systems, solutions to this problem are typically built into (some) applications and use application-specific knowledge about (un)safety of certain directories. In contrast, we seek solutions that can be implemented in the filesystem itself (or a library on top of it), thus providing protection to all applications. Our solution is built around the concept of pathname manipulators, which are roughly the users that can influence the result of a file lookup operation. For each user, we distinguish unsafe pathnames from safe pathnames according to whether or not the pathname has any manipulators other than that user or root. We propose a safe-open procedure that keeps track of the safety of the current pathname as it resolves it, and that takes extra precautions while opening files with unsafe pathnames. We prove that our solution can prevent a common class of filename-based privilege escalation attacks, and describe our implementation of the safe-open procedure as a library function over the POSIX filesystem interface. We tested our implementation on several UNIX variants to evaluate its implications for systems and applications. Our experiments suggest that this solution can be deployed in a portable way without breaking existing systems, and that it is effective against this class of pathname resolution attacks.
Multi-UAV Cooperative Mission Assignment Algorithm Based on ACO method Taking multiple unmanned aerial vehicles (UAVs) cooperative combat field as background, a systemic research on multiple UAVs cooperative mission assignment algorithm based on ant colony optimization method is given in this paper. Multi-UAV cooperative mission assignment is an important part of multi-UAV cooperative combat field, which is also the security guarantee for multi-UAV system to accomplish cooperative suppression of enemy air defences. The essence of multi-UAV cooperative mission assignment is a kind of complicatedly combined optimization problem that has the characteristics as multiple objectives, multiple constraints, non-linear, strong coupling, etc. Thus, as a multi-parameter, multi-constrained NP-hard problem, multi-UAV cooperative mission assignment area is similar to task assignment field and resource allocation category, which can be validly solved by intelligent optimization methods. To address this problem, we introduce ant colony optimization approach in this paper to establish a multi-UAV cooperative mission assignment strategy to meet the cooperative combat task requirements of multiUAV system. In the simulation part, using the example that multiple UAVs cooperatively attack multiple objectives as the application background, we give a comprehensively experimental analyses of multi-UAV cooperative mission assignment based on ant colony optimization method. Finally, the rationality and effectiveness of the proposed multi-UAV cooperative mission assignment algorithm are verified. Therefore, the certain significance of this paper has been proved.
Against the backdrop of a clampdown on visas by the US and growing antagonism towards foreign workers and immigrants in that country, Infosys Technologies, India's second-largest IT services firm, is mulling an 'extreme offshoring' model to help reduce its dependence on H1 and L1 visas. In the year ended March 31, 2010, Infosys's onsite revenues were around Rs 10,461.32 crore (Rs 104.61 billion), or 46.7 per cent, and offshore revenues were Rs 12,121.5 crore (Rs 121.21 billion), about 53.3%. The Bangalore-headquartered company says it is capable of increasing its offshore utilisation capabilities to 95 per cent. It said the intent was to prepare the company to face an extreme situation should negative sentiment brewing in the US intensify further. "There is a cost element (due to the visa fee hike) to what is happening now and there is a philosophical or directional element. "The cost is no doubt increasing, but it is manageable. But it is more about what it indicates. If there a build-up of negativity in sentiment, we have to prepare ourselves (for extreme offshoring) if need be. "However, as long as unemployment remains high, the negative sentiment will continue, unfortunately," Kris Gopalakrishnan, CEO and MD, Infosys Technologies, told Business Standard. Infosys has already conducted pilot programmes with a couple of clients in the US with which it has proven the model. By sitting at remote offshore locations, the company successfully transitioned outsourced projects to India. The Infosys extreme offshoring model is expected to have a far-reaching impact on the delivery of IT services, as this could lead to increased hiring at offshore locations like India and fewer jobs being created onsite. Front-end sales and support jobs would have to be primarily manned by US citizens. The US border security legislation will double the visa application fee, and is seen as targeted the Indian IT services industry. Unless there are further laws passed in the US that completely halt outsourcing, US companies have no reason to stop shipping work to India. "As long as globalisation is not reversed or stopped, I think the growth of remote delivery of services should continue. And it's an opportunity for countries like India and China," said Gopalakrishnan. The Indian IT industry championed the offshore model in the early 1990s, which received a boost when global clients leveraged offshore work to gain a cost advantage and gained access to India's vast talent pool. However, today's standard delivery model is a mix of offshore, onsite and nearshore. Despite offshoring being at the forefront of the delivery roadmap, the IT industry used to send people to the US on H1 and L1 visas on a temporary basis to transition clients' work to India. This process requires gathering first-hand information about a client's business requirements and readying the framework before shifting work offshore. "The technological capabilities for extreme offshoring is already there with us. You have video conferencing, which allows you to be there virtually, rather than in person, and the quality of video conferencing is extremely high today, with the availability of technologies like telepresence," added Gopalakrishnan. Over the last couple of quarters, Infosys has increased its offshore ratio a few percentage points. "Our offshore-onsite ratio has shifted towards offshore in the last two to three years, and we can look at further shifting it down to offshore. In fact, we have many clients who are pushing us to reduce onsite work further," said S D Shibulal, COO, Infosys Technologies. Infosys has also reduced its dependance on H1 and L1 visas. According to the company, the number of visa renewals has fallen 80 per cent in the last three years. Infosys today employs around 13,000 people in the US, of which the approximate number of H-1B visa holders is 8,900 and L-1 visas around 1,800. This does not include figures for its BPO subsidiary and other wholly-owned subsidiaries.
# encoding: utf-8 """ @author: yp @software: PyCharm @file: run.py @time: 2019/8/5 0005 14:13 """ import unittest from AutoTestPlatform.report.HTMLTestReportCN import HTMLTestRunner with open("report.html", "wb") as f: dis = unittest.defaultTestLoader.discover("E:/pythonProject/newWorld/seleniumDemo/D2AdminTestDemo", pattern="*_test.py") r = HTMLTestRunner(stream=f, title="测试", description="登录测试", tester="yp") r.run(dis)
/** * Informs the UIPM that the values have been updated for particular Session. * * @param sessionIds an object of List&lt;String&gt; specifies sessionIds * @param paths an object of List&lt;String&gt; specifies paths * @param operations an object of List&lt;String&gt; operations * @param values an object of List&lt;String&gt; specifies values * @param props a List&lt;Map&lt;String, String&gt;&gt; of map containing attributes for path e.g. socketElType(var, cmd or ntf), hasDynRes and notification type( alert, error, info). */ void updateValues(List<String> sessionIds, List<String> paths, List<String> operations, List<String> values, List<Map<String, String>> props) { Map<IUIPM, List<String>> uipmSessionIdsMap = getUipmSessionIdsMap(sessionIds); if ( uipmSessionIdsMap == null ) return; for ( Entry<IUIPM, List<String>> entry : uipmSessionIdsMap.entrySet() ) { if ( entry == null ) continue; IUIPM uipm = entry.getKey(); List<String> sessionIdList = entry.getValue(); if ( (uipm == null) || (sessionIdList == null) || (sessionIdList.size() == 0) ) continue; try { uipm.updateValues(sessionIdList, paths, operations, values, props); } catch (UIPMFatalException e) { logger.warning("UIPMFatalException : Going to update values for sessionIds '"+sessionIdList+"'."); } } }
. Cholinergic vasodilatation evoked by hypothalamic stimulation in cats could be elicited by stimulation of both the emotional and mator areas of the brain. Tranquilizers affected the cholinergic vasodilatation in different ways: reserpine decreased, catapresan increased, while diazepam had no effect on amplitude of the cholinergic vasodilatation. The latter seems to be closely connected with activity of skeletal muscles but not with emotional reactions. Alpha--adrenergic structures of the c. n. s. are the activating mechanism of the central integration of cholinergic vasodilatation.
/* AMT22.cpp - Arduino library for ATM22 series absolute encoders by CUI Devices. Created by <NAME>, December 2020. */ #include "AMT22_lib.h" AMT22::AMT22(uint8_t cs, uint8_t resolution) { digitalWrite(cs, HIGH); //Get the CS line high which is the default inactive state _cs = cs; _resolution = resolution; } /* * This function gets the absolute position from the AMT22 encoder using the SPI bus. The AMT22 position includes 2 checkbits to use * for position verification. Both 12-bit and 14-bit encoders transfer position via two bytes, giving 16-bits regardless of resolution. * For 12-bit encoders the position is left-shifted two bits, leaving the right two bits as zeros. This gives the impression that the encoder * is actually sending 14-bits, when it is actually sending 12-bit values, where every number is multiplied by 4. * Error values are returned as 0xFFFF */ uint16_t AMT22::getPositionSPI(){ uint16_t currentPosition; //16-bit response from encoder bool binaryArray[16]; //after receiving the position we will populate this array and use it for calculating the checksum //get first byte which is the high byte, shift it 8 bits. don't release line for the first byte currentPosition = spiWriteRead(AMT22_NOP, false) << 8; //this is the time required between bytes as specified in the datasheet. delayMicroseconds(3); //OR the low byte with the currentPosition variable. release line after second byte currentPosition |= spiWriteRead(AMT22_NOP,true); //run through the 16 bits of position and put each bit into a slot in the array so we can do the checksum calculation for(int i = 0; i < 16; i++) binaryArray[i] = (0x01) & (currentPosition >> (i)); //using the equation on the datasheet we can calculate the checksums and then make sure they match what the encoder sent if ((binaryArray[15] == !(binaryArray[13] ^ binaryArray[11] ^ binaryArray[9] ^ binaryArray[7] ^ binaryArray[5] ^ binaryArray[3] ^ binaryArray[1])) && (binaryArray[14] == !(binaryArray[12] ^ binaryArray[10] ^ binaryArray[8] ^ binaryArray[6] ^ binaryArray[4] ^ binaryArray[2] ^ binaryArray[0]))) currentPosition &= 0x3FFF; //we got back a good position, so just mask away the checkbits else currentPosition = 0xFFFF; //bad position //If the resolution is 12-bits, and wasn't 0xFFFF, then shift position, otherwise do nothing if ((_resolution == RES12) && (currentPosition != 0xFFFF)) currentPosition = currentPosition >> 2; return currentPosition; } /* * This function does the SPI transfer. sendByte is the byte to transmit. * Use releaseLine to let the spiWriteRead function know if it should release the chip select line after transfer. * The received data is returned. */ uint8_t AMT22::spiWriteRead(uint8_t sendByte, uint8_t releaseLine){ //holder for the received over SPI uint8_t data; //set cs low, cs may already be low but there's no issue calling it again except for extra time setCSLine(LOW); //There is a minimum time requirement after CS goes low before data can be clocked out of the encoder. delayMicroseconds(3); //send the command data = SPI.transfer(sendByte); delayMicroseconds(3); //There is also a minimum time after clocking that CS should remain asserted before we release it setCSLine(releaseLine); //if releaseLine is high set it high else it stays low return data; } /* * This function sets the state of the SPI line. */ void AMT22::setCSLine(uint8_t csLine){ digitalWrite(_cs, csLine); } /* * The AMT22 bus allows for extended commands. The first byte is 0x00 like a normal position transfer, but the second byte is the command. */ void AMT22::setZeroSPI(){ spiWriteRead(AMT22_NOP, false); //this is the time required between bytes as specified in the datasheet. delayMicroseconds(3); spiWriteRead(AMT22_ZERO, true); delay(250); //250 second delay to allow the encoder to reset } /* * The AMT22 bus allows for extended commands. The first byte is 0x00 like a normal position transfer, but the second byte is the command. */ void AMT22::resetAMT22(){ spiWriteRead(AMT22_NOP, false); //this is the time required between bytes as specified in the datasheet. delayMicroseconds(3); spiWriteRead(AMT22_RESET, true); delay(250); //250 second delay to allow the encoder to start back up } /* * This function sets the resolution of the encoder. */ void AMT22::setResolution(uint8_t resolution) { _resolution = resolution; } /* * This function is not related to the AMT22 class. It allows to set up communication via SPI. * It must be performed in the setup section of the Arduino main. */ void setUpSPI(uint8_t mosi, uint8_t miso, uint8_t sclk, uint8_t clk_divider){ pinMode(sclk, OUTPUT); pinMode(mosi, OUTPUT); pinMode(miso, INPUT); SPI.setClockDivider(clk_divider); SPI.begin(); }
NFB2/p52 enhances androgenindependent growth of human LNCaP cells via protection from apoptotic cell death and cell cycle arrest induced by androgendeprivation Androgendeprivation therapy only causes a temporary regression of prostate cancer, as all tumors will eventually progress to refractory to hormonal therapy after 13 years of treatment. The underlying mechanisms of prostate cancer androgen refractory progression are incompletely understood. In this study, we employed in vitro as well as in vivo models to examine the role of NFB2/p52 in prostate cancer growth and androgen independent progression.
North Fork Optical, a three-decades old Mattituck business, has a fresh new digital presence courtesy of icXpro. The Main Road optical center was the winner of a website giveaway sponsored by ICVM Group of Mattituck and created by Times Review Partners. Launched using the icXpro platform, a service designed specifically for professional services businesses such as law firms, accounting firms, health care providers, consultants, financial planners and others, the new site features updated and better organized information about North Fork Optical and makes it easier for potential customers to find them on the web. A big part of the story of North Fork Optical is its history, having launched in 1981 and following its clients’ eye health throughout the years with a level of personal attention not often met by big chain competitors. The business has also evolved with technology, offering new diagnostics and screening for a wider variety of conditions than in the past. “It’s a modern practice, but with the same friendly and personal attention it’s always been known for,” Roussan said, adding that it was important to include all those details in the company’s website. Roussan said clients enjoy the icXpro website creation process because it challenges them to have conversations that help them better understand and talk about their own business. “It’s engaging, and because it doesn’t drag on forever you really have the client’s full attention for that important window of time,” he added. North Fork Optical said its website hadn’t been updated in more than a half dozen years. Even as its business evolved, the site had remained the same. For Roussan and ICVM, the launch brings life to another successful project.
Drug repurposing: mebendazole as effective antitumor agent. Are we seeing the whole story? PURPOSE To examine the antitumor effects of Mebendazole (MZ) in a model of experimental fibrosarcoma induced by inoculation of BHK-21/C13 cells in Syrian golden hamster. METHODS Hamsters were inoculated with a suspension of BHK cells by subcutaneous injection and randomly divided into 5 experimental and 2 control groups. Treatment started on the 10th day after inoculation, when the tumor grew to a diameter of 5mm. The experimental design was based on distributing the total amount of drug MZ(z) in different protocols and approaches (oral/intraperitoneal) to the 5 experimental groups. The positive control group received doxorubicin intraperitoneally. Negative control group received olive oil orally. The total amount of MZ(z) was chosen to be the highest for the animal to survive during the experiment. For antitumor effect evaluation, the main parameters were tumor size, number of mitoses, cytochrome-C immunopositivity and tumor tissue morphology incuding cytoarchitecture and percentage of preserved tumor tissue in stereologically reconstructed tumor mass. RESULTS The results of this study showed absence of objective MZ antitumor effect on experimental fibrosarcoma. MZ does not exhibit activity similar to DNA-damaging agents on the fibrosarcoma model. CONCLUSIONS It might be postulated that soft tissue tumors on animal models could show high level of resistance to MZ effect.
Non-Uniform Narrow Groove Plasmonic Nano-Gratings for SPR Sensing and Imaging A surface plasmon resonance sensing and imaging platform based on plasmonic non-uniform nano-gratings with narrow groove (sub-10 nm) is presented. In these nanogratings, normally incident optical radiation is directly coupled to surface plasmons without the requirements of any other conventional surface plasmon coupling mechanisms such as prism-based or grating-based coupling. Theoretical analysis of practically realizable plasmonic non-uniform nano-gratings with rounded tops and slanted sidewalls is carried out to numerically to determine reflectance and differential reflectance signals when the localized refractive index of the medium around the gold layer present in these nano-gratings is changed. This change in the localized refractive index can occur due to the binding of biomolecules to the gold layer. Two kinds of plasmonic non-uniform nano-gratings are studied using finite difference time domain (FDTD) modelling: gold nano-gratings (GNGs) and gold-coated silicon nano-gratings (GSNGs). The plasmonic non-uniform nano-gratings being proposed, more specifically the GSNGs, can be easily fabricated with the presently existing nanofabrication and thin film deposition methods as opposed to uniform nano-gratings (with parallel sidewalls) that are very difficult to fabricate. The plasmonic non-uniform nano-gratings with narrow grooves eliminate the strict requirements on the angle of incidence for coupling of light into surface plasmons, which are needed in conventional prism-based coupling mechanisms. By employing FDTD calculations, we demonstrate that these plasmonic non-uniform nano-gratings provide very high differential reflectance amplitude values, which are indicative of high sensitivities of the SPR or SPRi sensors when the localized refractive index around the sensors is varied. Moreover, the sensors being proposed in this article provide a maximum sensitivity of localized refractive index sensing (i.e. surface sensitivity or $\text{S}_{\mathrm {S}}$ ) of 70 nm/nm with a figure of merit of the localized sensor (FOMS) of 1.5 nm $^{-1}$. This sensitivity of localized refractive index sensing is the highest reported thus far in comparison with previously reported plasmonic sensors. Moreover, these plasmonic non-uniform nano-grating based sensors exhibit significantly better performance when compared with conventional SPR or SPRi sensors based on the Kretschmann configuration.
/* * Copyright © 2014 <NAME> (<EMAIL>) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.emmalanguage.labyrinth.util; import org.apache.flink.api.common.typeutils.CompatibilityResult; import org.apache.flink.api.common.typeutils.TypeSerializer; import org.apache.flink.api.common.typeutils.TypeSerializerConfigSnapshot; import org.apache.flink.core.memory.DataInputView; import org.apache.flink.core.memory.DataOutputView; import java.io.IOException; import java.io.Serializable; public final class TupleIntInt implements Serializable { public int f0, f1; public TupleIntInt() {} public TupleIntInt(int f0, int f1) { this.f0 = f0; this.f1 = f1; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; TupleIntInt that = (TupleIntInt) o; if (f0 != that.f0) return false; return f1 == that.f1; } @Override public int hashCode() { int result = f0; result = 31 * result + f1; return result; } @Override public String toString() { return "TupleIntInt{" + "f0=" + f0 + ", f1=" + f1 + '}'; } public static TupleIntInt of(int f0, int f1) { return new TupleIntInt(f0, f1); } // ------------------------- Serializers ------------------------- public static final class TupleIntIntSerializer extends TypeSerializer<TupleIntInt> { @Override public TypeSerializerConfigSnapshot snapshotConfiguration() { return null; } @Override public CompatibilityResult<TupleIntInt> ensureCompatibility(TypeSerializerConfigSnapshot configSnapshot) { return null; } @Override public boolean isImmutableType() { return false; } @Override public TypeSerializer<TupleIntInt> duplicate() { return this; } @Override public TupleIntInt createInstance() { return new TupleIntInt(); } @Override public TupleIntInt copy(TupleIntInt from) { return copy(from, new TupleIntInt()); } @Override public TupleIntInt copy(TupleIntInt from, TupleIntInt reuse) { reuse.f0 = from.f0; reuse.f1 = from.f1; return reuse; } @Override public int getLength() { return 8; } @Override public void serialize(TupleIntInt record, DataOutputView target) throws IOException { target.writeInt(record.f0); target.writeInt(record.f1); } @Override public TupleIntInt deserialize(DataInputView source) throws IOException { return deserialize(createInstance(), source); } @Override public TupleIntInt deserialize(TupleIntInt reuse, DataInputView source) throws IOException { reuse.f0 = source.readInt(); reuse.f1 = source.readInt(); return reuse; } @Override public void copy(DataInputView source, DataOutputView target) throws IOException { target.write(source, getLength()); } @Override public boolean equals(Object obj) { return obj instanceof TupleIntIntSerializer; } @Override public boolean canEqual(Object obj) { return obj instanceof TupleIntIntSerializer; } @Override public int hashCode() { return 44; } } }
<filename>src/FusionEKF.cpp<gh_stars>0 #include "FusionEKF.h" #include <iostream> #include "Eigen/Dense" #include "tools.h" #include <cmath> using Eigen::MatrixXd; using Eigen::VectorXd; using std::cout; using std::endl; using std::vector; /** * Constructor. */ FusionEKF::FusionEKF() { is_initialized_ = false; previous_timestamp_ = 0; // initializing matrices R_laser_ = MatrixXd(2, 2); R_radar_ = MatrixXd(3, 3); H_laser_ = MatrixXd(2, 4); //measurement covariance matrix - laser R_laser_ << 0.0225, 0, 0, 0.0225; //measurement covariance matrix - radar R_radar_ << 0.09, 0, 0, 0, 0.0009, 0, 0, 0, 0.09; /** * TODO: Finish initializing the FusionEKF. * TODO: Set the process and measurement noises */ H_laser_ << 1, 0, 0, 0, 0, 1, 0, 0; ekf_.P_ = MatrixXd(4, 4); ekf_.P_ << 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1000, 0, 0, 0, 0, 1000; noise_ax = 9.; noise_ay = 9.; } /** * Destructor. */ FusionEKF::~FusionEKF() {} MatrixXd FusionEKF::obtainF(float dt) { MatrixXd F(4,4); F<< 1, 0, dt, 0 , 0, 1, 0 , dt, 0, 0, 1 , 0 , 0, 0, 0 , 1 ; return F; } MatrixXd FusionEKF::obtainQ(float dt) { float dt2 = pow(dt, 2); float dt3 = pow(dt, 3); float dt4 = pow(dt, 4); MatrixXd Q = MatrixXd(4, 4); Q << dt4/4*noise_ax, 0, dt3/2*noise_ax, 0, 0, dt4/4*noise_ay, 0, dt3/2*noise_ay, dt3/2*noise_ax, 0, dt2*noise_ax, 0, 0, dt3/2*noise_ay, 0, dt2*noise_ay; return Q; } void FusionEKF::ProcessMeasurement(const MeasurementPackage &measurement_pack) { /** * Initialization */ cout<<"Measurement: "; if (measurement_pack.sensor_type_ == MeasurementPackage::RADAR) cout<<"Radar"; else cout<<"Lidar"; cout<<endl; cout << "raw measurement = " << measurement_pack.raw_measurements_ << endl; if (!is_initialized_) { previous_timestamp_ = measurement_pack.timestamp_; /** * TODO: Initialize the state ekf_.x_ with the first measurement. * TODO: Create the covariance matrix. * You'll need to convert radar from polar to cartesian coordinates. */ // first measurement cout << "EKF: " << endl; ekf_.x_ = VectorXd(4); ekf_.x_ << 1, 1, 0, 0; if (measurement_pack.sensor_type_ == MeasurementPackage::RADAR) { cout << "Radar Initialization" << endl; float rho = measurement_pack.raw_measurements_[0]; float phi = measurement_pack.raw_measurements_[1]; float rhodot = measurement_pack.raw_measurements_[2]; ekf_.x_(0)= rho*cos(phi); ekf_.x_(1)= rho*sin(phi); // TODO: Convert radar from polar to cartesian coordinates // and initialize state. } else if (measurement_pack.sensor_type_ == MeasurementPackage::LASER) { // TODO: Initialize state. cout << "Lidar Initialization" << endl; ekf_.x_(0) = measurement_pack.raw_measurements_[0]; ekf_.x_(1) = measurement_pack.raw_measurements_[1]; } cout << "x_ = " << ekf_.x_ << endl; cout << "P_ = " << ekf_.P_ << endl; // done initializing, no need to predict or update is_initialized_ = true; return; } /** * Prediction */ /** * TODO: Update the state transition matrix F according to the new elapsed time. * Time is measured in seconds. * TODO: Update the process noise covariance matrix. * Use noise_ax = 9 and noise_ay = 9 for your Q matrix. */ cout<<"Predict"<<endl; float dt = (measurement_pack.timestamp_ - previous_timestamp_)/1000000.0; previous_timestamp_ = measurement_pack.timestamp_; cout<<"dt="<<dt<<endl; if (dt>1e-6){ ekf_.F_ = obtainF(dt); ekf_.Q_ = obtainQ(dt); ekf_.Predict(); } /** * Update */ cout << "x_ = " << ekf_.x_ << endl; cout << "P_ = " << ekf_.P_ << endl; /** * TODO: * - Use the sensor type to perform the update step. * - Update the state and covariance matrices. */ if (measurement_pack.sensor_type_ == MeasurementPackage::RADAR) { // TODO: Radar updates cout << "Radar Update" << endl; ekf_.R_ = R_radar_; ekf_.H_ = tools.CalculateJacobian(ekf_.x_); ekf_.UpdateEKF(measurement_pack.raw_measurements_); } else { // TODO: Laser updates cout << "Lidar Update" << endl; ekf_.R_ = R_laser_; ekf_.H_ = H_laser_; ekf_.Update(measurement_pack.raw_measurements_); } // print the output cout << "x_ = " << ekf_.x_ << endl; cout << "P_ = " << ekf_.P_ << endl; }
<filename>database/sql/__init__.py from .insert import insert from .delete import delete from . import select
Emotional competences of teachers in preschool facilities Preschool teachers have an important role in child development, considering that, in addition to parents, they are very influential persons who are in continuous contact with children, represent an authority and model children's behavior. Knowledge, skills, and abilities, but also character traits of the teacher (competence), are therefore very important, which is why this paper focuses on the competence of teachers in preschool facilities, with a special accent on their emotional competence, which, as the most important aspect, include recognizing emotions and impulse control in yourself and others. The goal of this research is to determine the development level of teachers' emotional competence. The research encompasses 108 teachers, and the questioning was done using an assessment scale which, using grades 1 to 5, assesses the level of agreement with claims devised based on the theory concept by Suzic. The results of the research have shown that the younger teachers (by age and job experience) have scored higher on the instrument compared to older teachers, on almost all subscales (except recognizing your own and others' emotions as well as confidence). This data show that younger teachers assess their own emotional competencies as better developed compared to older teachers. Based on this data, certain recommendations were derived for further research and pedagogic practice.
Effects of tRNALeu1 overproduction in Escherichia coli Strains of Escherichia coli have been produced which express very high levels of the tRNAleu1 isoacceptor. This was accomplished by transforming cells with plasmids containing the leuV operon which encodes three copies of the tRNALeu1 gene. Most transformants grew very slowly and exhibited a 15fold increase in cellular concentrations of tRNALeu1 As a result, total cellular tRNA concentration was approximately doubled and 56% of the total was tRNALeu1. We examined a number of parameters which might be expected to be affected by imbalances in tRNA concentration: in vivo tRNA charging levels, misreading, ribosome step time, and tRNA modification. Surprisingly, no increase in intracellular ppGpp levels was detected even though only about 40% of total leucyl tRNA was found to be charged in vivo. Gross ribosomal misreading was not detected, and it was shown that ribosomal step times were reduced between two and threefold. Analyses of leucyl tRNA isolated from these slowgrowing strains showed that at least 90% of the detectable tRNALeu1 was hypomodified as judged by altered mobility on RPC5 reversephase columns, and by specific modification assays using tRNA(m1G)methyltransferase and pseudouridylate synthetase. Analysis of fastgrowing revertants demonstrated that tRNA concentration per se may not explain growth inhibition because selected revertants which grew at wildtype growth rates displayed levels of tRNA comparable to that of control strains bearing the leuV operon. A synthetic tRNALeu1 operon under the control of the T7 promoter was prepared which, when induced, produced six to sevenfold increases in tRNALeu1 levels. This level of tRNALeu1 titrated the modification system as judged by RPC5 column chromatography. Overall, our results suggest that hypomodified tRNA may explain, in part, the observed effects on growth, and that the proteinsynthesizing system can tolerate an enormous increase in the concentration of a single tRNA.
// Generated from /backup/apps/grammars/src/main/java/com/datascience9/pb/parse/PowerBuilderParser.g4 by ANTLR 4.9.1 package com.datascience9.pb.parse; import org.antlr.v4.runtime.tree.ParseTreeVisitor; /** * This interface defines a complete generic visitor for a parse tree produced * by {@link PowerBuilderParser}. * * @param <T> The return type of the visit operation. Use {@link Void} for * operations with no return type. */ public interface PowerBuilderParserVisitor<T> extends ParseTreeVisitor<T> { /** * Visit a parse tree produced by {@link PowerBuilderParser#start_rule}. * @param ctx the parse tree * @return the visitor result */ T visitStart_rule(PowerBuilderParser.Start_ruleContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#body_rule}. * @param ctx the parse tree * @return the visitor result */ T visitBody_rule(PowerBuilderParser.Body_ruleContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#forward_decl}. * @param ctx the parse tree * @return the visitor result */ T visitForward_decl(PowerBuilderParser.Forward_declContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#datatype_decl}. * @param ctx the parse tree * @return the visitor result */ T visitDatatype_decl(PowerBuilderParser.Datatype_declContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#type_variables_decl}. * @param ctx the parse tree * @return the visitor result */ T visitType_variables_decl(PowerBuilderParser.Type_variables_declContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#global_variables_decl}. * @param ctx the parse tree * @return the visitor result */ T visitGlobal_variables_decl(PowerBuilderParser.Global_variables_declContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#variable_decl}. * @param ctx the parse tree * @return the visitor result */ T visitVariable_decl(PowerBuilderParser.Variable_declContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#variable_decl_sub}. * @param ctx the parse tree * @return the visitor result */ T visitVariable_decl_sub(PowerBuilderParser.Variable_decl_subContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#variable_decl_sub0}. * @param ctx the parse tree * @return the visitor result */ T visitVariable_decl_sub0(PowerBuilderParser.Variable_decl_sub0Context ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#variable_decl_sub1}. * @param ctx the parse tree * @return the visitor result */ T visitVariable_decl_sub1(PowerBuilderParser.Variable_decl_sub1Context ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#variable_decl_sub2}. * @param ctx the parse tree * @return the visitor result */ T visitVariable_decl_sub2(PowerBuilderParser.Variable_decl_sub2Context ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#variable_decl_event}. * @param ctx the parse tree * @return the visitor result */ T visitVariable_decl_event(PowerBuilderParser.Variable_decl_eventContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#decimal_decl_sub}. * @param ctx the parse tree * @return the visitor result */ T visitDecimal_decl_sub(PowerBuilderParser.Decimal_decl_subContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#array_decl_sub}. * @param ctx the parse tree * @return the visitor result */ T visitArray_decl_sub(PowerBuilderParser.Array_decl_subContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#constant_decl_sub}. * @param ctx the parse tree * @return the visitor result */ T visitConstant_decl_sub(PowerBuilderParser.Constant_decl_subContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#constant_decl}. * @param ctx the parse tree * @return the visitor result */ T visitConstant_decl(PowerBuilderParser.Constant_declContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#function_forward_decl}. * @param ctx the parse tree * @return the visitor result */ T visitFunction_forward_decl(PowerBuilderParser.Function_forward_declContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#function_forward_decl_alias}. * @param ctx the parse tree * @return the visitor result */ T visitFunction_forward_decl_alias(PowerBuilderParser.Function_forward_decl_aliasContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#parameter_sub}. * @param ctx the parse tree * @return the visitor result */ T visitParameter_sub(PowerBuilderParser.Parameter_subContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#parameters_list_sub}. * @param ctx the parse tree * @return the visitor result */ T visitParameters_list_sub(PowerBuilderParser.Parameters_list_subContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#functions_forward_decl}. * @param ctx the parse tree * @return the visitor result */ T visitFunctions_forward_decl(PowerBuilderParser.Functions_forward_declContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#function_body}. * @param ctx the parse tree * @return the visitor result */ T visitFunction_body(PowerBuilderParser.Function_bodyContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#on_body}. * @param ctx the parse tree * @return the visitor result */ T visitOn_body(PowerBuilderParser.On_bodyContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#event_forward_decl}. * @param ctx the parse tree * @return the visitor result */ T visitEvent_forward_decl(PowerBuilderParser.Event_forward_declContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#event_body}. * @param ctx the parse tree * @return the visitor result */ T visitEvent_body(PowerBuilderParser.Event_bodyContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#access_type}. * @param ctx the parse tree * @return the visitor result */ T visitAccess_type(PowerBuilderParser.Access_typeContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#access_modif}. * @param ctx the parse tree * @return the visitor result */ T visitAccess_modif(PowerBuilderParser.Access_modifContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#access_modif_part}. * @param ctx the parse tree * @return the visitor result */ T visitAccess_modif_part(PowerBuilderParser.Access_modif_partContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#scope_modif}. * @param ctx the parse tree * @return the visitor result */ T visitScope_modif(PowerBuilderParser.Scope_modifContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#expression}. * @param ctx the parse tree * @return the visitor result */ T visitExpression(PowerBuilderParser.ExpressionContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#value}. * @param ctx the parse tree * @return the visitor result */ T visitValue(PowerBuilderParser.ValueContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#expression_list}. * @param ctx the parse tree * @return the visitor result */ T visitExpression_list(PowerBuilderParser.Expression_listContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#boolean_expression}. * @param ctx the parse tree * @return the visitor result */ T visitBoolean_expression(PowerBuilderParser.Boolean_expressionContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#condition_or}. * @param ctx the parse tree * @return the visitor result */ T visitCondition_or(PowerBuilderParser.Condition_orContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#condition_and}. * @param ctx the parse tree * @return the visitor result */ T visitCondition_and(PowerBuilderParser.Condition_andContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#condition_not}. * @param ctx the parse tree * @return the visitor result */ T visitCondition_not(PowerBuilderParser.Condition_notContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#condition_comparison}. * @param ctx the parse tree * @return the visitor result */ T visitCondition_comparison(PowerBuilderParser.Condition_comparisonContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#add_expr}. * @param ctx the parse tree * @return the visitor result */ T visitAdd_expr(PowerBuilderParser.Add_exprContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#mul_expr}. * @param ctx the parse tree * @return the visitor result */ T visitMul_expr(PowerBuilderParser.Mul_exprContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#unary_sign_expr}. * @param ctx the parse tree * @return the visitor result */ T visitUnary_sign_expr(PowerBuilderParser.Unary_sign_exprContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#statement}. * @param ctx the parse tree * @return the visitor result */ T visitStatement(PowerBuilderParser.StatementContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#public_statement}. * @param ctx the parse tree * @return the visitor result */ T visitPublic_statement(PowerBuilderParser.Public_statementContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#throw_statement}. * @param ctx the parse tree * @return the visitor result */ T visitThrow_statement(PowerBuilderParser.Throw_statementContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#goto_statement}. * @param ctx the parse tree * @return the visitor result */ T visitGoto_statement(PowerBuilderParser.Goto_statementContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#statement_sub}. * @param ctx the parse tree * @return the visitor result */ T visitStatement_sub(PowerBuilderParser.Statement_subContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#try_catch_statement}. * @param ctx the parse tree * @return the visitor result */ T visitTry_catch_statement(PowerBuilderParser.Try_catch_statementContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#sql_statement}. * @param ctx the parse tree * @return the visitor result */ T visitSql_statement(PowerBuilderParser.Sql_statementContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#sql_insert_statement}. * @param ctx the parse tree * @return the visitor result */ T visitSql_insert_statement(PowerBuilderParser.Sql_insert_statementContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#sql_values}. * @param ctx the parse tree * @return the visitor result */ T visitSql_values(PowerBuilderParser.Sql_valuesContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#sql_delete_statement}. * @param ctx the parse tree * @return the visitor result */ T visitSql_delete_statement(PowerBuilderParser.Sql_delete_statementContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#sql_select_statement}. * @param ctx the parse tree * @return the visitor result */ T visitSql_select_statement(PowerBuilderParser.Sql_select_statementContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#sql_update_statement}. * @param ctx the parse tree * @return the visitor result */ T visitSql_update_statement(PowerBuilderParser.Sql_update_statementContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#sql_connect_statement}. * @param ctx the parse tree * @return the visitor result */ T visitSql_connect_statement(PowerBuilderParser.Sql_connect_statementContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#set_value}. * @param ctx the parse tree * @return the visitor result */ T visitSet_value(PowerBuilderParser.Set_valueContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#where_clause}. * @param ctx the parse tree * @return the visitor result */ T visitWhere_clause(PowerBuilderParser.Where_clauseContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#select_clause}. * @param ctx the parse tree * @return the visitor result */ T visitSelect_clause(PowerBuilderParser.Select_clauseContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#sql_commit_statement}. * @param ctx the parse tree * @return the visitor result */ T visitSql_commit_statement(PowerBuilderParser.Sql_commit_statementContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#execute_statement}. * @param ctx the parse tree * @return the visitor result */ T visitExecute_statement(PowerBuilderParser.Execute_statementContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#close_sql_statement}. * @param ctx the parse tree * @return the visitor result */ T visitClose_sql_statement(PowerBuilderParser.Close_sql_statementContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#declare_procedure_statement}. * @param ctx the parse tree * @return the visitor result */ T visitDeclare_procedure_statement(PowerBuilderParser.Declare_procedure_statementContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#declare_cursor_statement}. * @param ctx the parse tree * @return the visitor result */ T visitDeclare_cursor_statement(PowerBuilderParser.Declare_cursor_statementContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#open_cursor_statement}. * @param ctx the parse tree * @return the visitor result */ T visitOpen_cursor_statement(PowerBuilderParser.Open_cursor_statementContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#close_cursor_statement}. * @param ctx the parse tree * @return the visitor result */ T visitClose_cursor_statement(PowerBuilderParser.Close_cursor_statementContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#fetch_into_statement}. * @param ctx the parse tree * @return the visitor result */ T visitFetch_into_statement(PowerBuilderParser.Fetch_into_statementContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#prepare_sql_stateent}. * @param ctx the parse tree * @return the visitor result */ T visitPrepare_sql_stateent(PowerBuilderParser.Prepare_sql_stateentContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#increment_decrement_statement}. * @param ctx the parse tree * @return the visitor result */ T visitIncrement_decrement_statement(PowerBuilderParser.Increment_decrement_statementContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#assignment_rhs}. * @param ctx the parse tree * @return the visitor result */ T visitAssignment_rhs(PowerBuilderParser.Assignment_rhsContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#describe_function_call}. * @param ctx the parse tree * @return the visitor result */ T visitDescribe_function_call(PowerBuilderParser.Describe_function_callContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#assignment_statement}. * @param ctx the parse tree * @return the visitor result */ T visitAssignment_statement(PowerBuilderParser.Assignment_statementContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#variable_name}. * @param ctx the parse tree * @return the visitor result */ T visitVariable_name(PowerBuilderParser.Variable_nameContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#return_statement}. * @param ctx the parse tree * @return the visitor result */ T visitReturn_statement(PowerBuilderParser.Return_statementContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#function_call_expression_sub}. * @param ctx the parse tree * @return the visitor result */ T visitFunction_call_expression_sub(PowerBuilderParser.Function_call_expression_subContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#function_name}. * @param ctx the parse tree * @return the visitor result */ T visitFunction_name(PowerBuilderParser.Function_nameContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#function_event_call}. * @param ctx the parse tree * @return the visitor result */ T visitFunction_event_call(PowerBuilderParser.Function_event_callContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#function_virtual_call_expression_sub}. * @param ctx the parse tree * @return the visitor result */ T visitFunction_virtual_call_expression_sub(PowerBuilderParser.Function_virtual_call_expression_subContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#open_call_sub}. * @param ctx the parse tree * @return the visitor result */ T visitOpen_call_sub(PowerBuilderParser.Open_call_subContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#close_call_sub}. * @param ctx the parse tree * @return the visitor result */ T visitClose_call_sub(PowerBuilderParser.Close_call_subContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#function_call_statement}. * @param ctx the parse tree * @return the visitor result */ T visitFunction_call_statement(PowerBuilderParser.Function_call_statementContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#ancestor_function_call}. * @param ctx the parse tree * @return the visitor result */ T visitAncestor_function_call(PowerBuilderParser.Ancestor_function_callContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#call_statement}. * @param ctx the parse tree * @return the visitor result */ T visitCall_statement(PowerBuilderParser.Call_statementContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#super_call_statement}. * @param ctx the parse tree * @return the visitor result */ T visitSuper_call_statement(PowerBuilderParser.Super_call_statementContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#ancestor_event_call_statement}. * @param ctx the parse tree * @return the visitor result */ T visitAncestor_event_call_statement(PowerBuilderParser.Ancestor_event_call_statementContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#event_call_statement_sub}. * @param ctx the parse tree * @return the visitor result */ T visitEvent_call_statement_sub(PowerBuilderParser.Event_call_statement_subContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#event_call_statement}. * @param ctx the parse tree * @return the visitor result */ T visitEvent_call_statement(PowerBuilderParser.Event_call_statementContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#create_call_sub}. * @param ctx the parse tree * @return the visitor result */ T visitCreate_call_sub(PowerBuilderParser.Create_call_subContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#create_call_statement}. * @param ctx the parse tree * @return the visitor result */ T visitCreate_call_statement(PowerBuilderParser.Create_call_statementContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#destroy_call_sub}. * @param ctx the parse tree * @return the visitor result */ T visitDestroy_call_sub(PowerBuilderParser.Destroy_call_subContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#destroy_call_statement}. * @param ctx the parse tree * @return the visitor result */ T visitDestroy_call_statement(PowerBuilderParser.Destroy_call_statementContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#for_loop_statement}. * @param ctx the parse tree * @return the visitor result */ T visitFor_loop_statement(PowerBuilderParser.For_loop_statementContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#do_while_loop_statement}. * @param ctx the parse tree * @return the visitor result */ T visitDo_while_loop_statement(PowerBuilderParser.Do_while_loop_statementContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#do_loop_while_statement}. * @param ctx the parse tree * @return the visitor result */ T visitDo_loop_while_statement(PowerBuilderParser.Do_loop_while_statementContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#if_statement}. * @param ctx the parse tree * @return the visitor result */ T visitIf_statement(PowerBuilderParser.If_statementContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#if_simple_statement}. * @param ctx the parse tree * @return the visitor result */ T visitIf_simple_statement(PowerBuilderParser.If_simple_statementContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#continue_statement}. * @param ctx the parse tree * @return the visitor result */ T visitContinue_statement(PowerBuilderParser.Continue_statementContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#continue_sub}. * @param ctx the parse tree * @return the visitor result */ T visitContinue_sub(PowerBuilderParser.Continue_subContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#post_event}. * @param ctx the parse tree * @return the visitor result */ T visitPost_event(PowerBuilderParser.Post_eventContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#exit_statement}. * @param ctx the parse tree * @return the visitor result */ T visitExit_statement(PowerBuilderParser.Exit_statementContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#choose_statement}. * @param ctx the parse tree * @return the visitor result */ T visitChoose_statement(PowerBuilderParser.Choose_statementContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#choose_case_value_sub}. * @param ctx the parse tree * @return the visitor result */ T visitChoose_case_value_sub(PowerBuilderParser.Choose_case_value_subContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#choose_case_cond_sub}. * @param ctx the parse tree * @return the visitor result */ T visitChoose_case_cond_sub(PowerBuilderParser.Choose_case_cond_subContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#choose_case_else_sub}. * @param ctx the parse tree * @return the visitor result */ T visitChoose_case_else_sub(PowerBuilderParser.Choose_case_else_subContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#label_stat}. * @param ctx the parse tree * @return the visitor result */ T visitLabel_stat(PowerBuilderParser.Label_statContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#identifier}. * @param ctx the parse tree * @return the visitor result */ T visitIdentifier(PowerBuilderParser.IdentifierContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#string_literal}. * @param ctx the parse tree * @return the visitor result */ T visitString_literal(PowerBuilderParser.String_literalContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#identifier_array}. * @param ctx the parse tree * @return the visitor result */ T visitIdentifier_array(PowerBuilderParser.Identifier_arrayContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#operator}. * @param ctx the parse tree * @return the visitor result */ T visitOperator(PowerBuilderParser.OperatorContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#identifier_name_ex}. * @param ctx the parse tree * @return the visitor result */ T visitIdentifier_name_ex(PowerBuilderParser.Identifier_name_exContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#identifier_name}. * @param ctx the parse tree * @return the visitor result */ T visitIdentifier_name(PowerBuilderParser.Identifier_nameContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#bind_param}. * @param ctx the parse tree * @return the visitor result */ T visitBind_param(PowerBuilderParser.Bind_paramContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#atom_sub}. * @param ctx the parse tree * @return the visitor result */ T visitAtom_sub(PowerBuilderParser.Atom_subContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#atom_sub_call1}. * @param ctx the parse tree * @return the visitor result */ T visitAtom_sub_call1(PowerBuilderParser.Atom_sub_call1Context ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#atom_sub_member1}. * @param ctx the parse tree * @return the visitor result */ T visitAtom_sub_member1(PowerBuilderParser.Atom_sub_member1Context ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#array_access_atom}. * @param ctx the parse tree * @return the visitor result */ T visitArray_access_atom(PowerBuilderParser.Array_access_atomContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#data_type_name}. * @param ctx the parse tree * @return the visitor result */ T visitData_type_name(PowerBuilderParser.Data_type_nameContext ctx); /** * Visit a parse tree produced by {@link PowerBuilderParser#dataTypeSub}. * @param ctx the parse tree * @return the visitor result */ T visitDataTypeSub(PowerBuilderParser.DataTypeSubContext ctx); }
import pickle from model.entity_quotes import * from model.complete_quote import * from model.utils import * from itertools import * from pathlib2 import Path from requests.utils import requote_uri import re import rdflib from rdflib.namespace import CSVW, DC, DCAT, DCTERMS, DOAP, FOAF, ODRL2, ORG, OWL, \ PROF, PROV, RDF, RDFS, SDO, SH, SKOS, SOSA, SSN, TIME, \ VOID, XMLNS, XSD from rdflib import Namespace from rdflib import Graph from rdflib import URIRef, BNode, Literal import urllib all_languages = ["aa","ab","ae","af","ak","am","an","ar","as","av","ay","az","ba","be","bg","bh","bi","bm","bn","bo","br","bs","ca","ce","ch","co","cr","cs","cu","cv","cy","da","de","dv","dz","ee","el","en","eo","es","et","eu","fa","ff","fi","fj","fo","fr","fy","ga","gd","gl","gn","gu","gv","ha","he","hi","ho","hr","ht","hu","hy","hz","ia","id","ie","ig","ii","ik","io","is","it","iu","ja","jv","ka","kg","ki","kj","kk","kl","km","kn","ko","kr","ks","ku","kv","kw","ky","la","lb","lg","li","ln","lo","lt","lu","lv","mg","mh","mi","mk","ml","mn","mr","ms","mt","my","na","nb","nd","ne","ng","nl","nn","no","nr","nv","ny","oc","oj","om","or","os","pa","pi","pl","ps","pt","qu","rm","rn","ro","ru","rw","sa","sc","sd","se","sg","si","sk","sl","sm","sn","so","sq","sr","ss","st","su","sv","sw","ta","te","tg","th","ti","tk","tl","tn","to","tr","ts","tt","tw","ty","ug","uk","ur","uz","vi","vo","wa","wo","xh","yi","yo","za","zh","zu"] current_date = (2021, 4, 20) # this needs to be synchronised with erroneous results from datefinder pattern_brackets = re.compile(r'\[[^\]]*\]') # https://stackoverflow.com/questions/640001/how-can-i-remove-text-within-parentheses-with-a-regex pattern_brackets2 = re.compile(r'\([^\)]*\)') # https://stackoverflow.com/questions/640001/how-can-i-remove-text-within-parentheses-with-a-regex pattern_ref = re.compile(r'<ref[^<]*</ref>') pattern_ref2 = re.compile(r'<ref>[^<]*$') pattern_ref3 = re.compile(r'<ref[^>]*/>') pattern_small = re.compile(r'<small[^<]*</small>') pattern_small2 = re.compile(r'<small>[^<]*$') pattern_small3 = re.compile(r'<small[^>]*/>') pattern_multiple_spaces = re.compile(r' +') class RDFEntity: def __init__(self, uri): self.uri = uri self.types = set() self.wikiquoteIds = dict() self.wikiquotePageIds = dict() self.wikidata_id = None self.labels = dict() def cleanText(text, isQuote=False): # remove everything in [] text = re.sub(pattern_brackets, "", text) # remove everything in "()" if it is quote text (not in context) if isQuote: text = re.sub(pattern_brackets2, "", text) # remove everything in "<ref" text = re.sub(pattern_ref, "", text) text = re.sub(pattern_ref2, "", text) text = re.sub(pattern_ref3, "", text) # remove everything in "<small" text = re.sub(pattern_small, "", text) text = re.sub(pattern_small2, "", text) text = re.sub(pattern_small3, "", text) # remove quotation marks at the start and end if text.endswith('".'): text = text[:-2] + '."' if text.endswith('».'): text = text[:-2] + '.»' text = text.strip('\"') text = text.strip('\\"') text = text.strip('"-”„«»') # remove any sequences of spaces text = re.sub(pattern_multiple_spaces, ' ', text) # strip spaces at begin and end text = text.strip() if text == "{{Citace monografie": text = None return text def createDateString(selected_date): year = str(selected_date[0]) if len(year) > 4: return None while len(year) != 4: year = "0" + year month = selected_date[1] if month < 1 or month > 12: print("Error month:",month) return None month = str(month) if len(month) < 2: month = "0" + month day = selected_date[2] if day < 1 or day > 31: print("Error day:",day) return None day = str(day) if len(day) < 2: day = "0" + day return year + "-" + month + "-" + day print(cleanText("\"W1 W2 [2] [3] <ref>test</ref> W3 <ref mode='ccc'/> W4 <ref mode='aaa'>xyz</ref> W5 <small>"), flush=True) # a completeQuote object contains a quotes dictionary attribute, and an id attribute # the quotes dictionary is composed of {language : quote} pairs # the quote value is an object of the class untemplatedQuote or of the class templatedQuote # the quote object can contain different attributes depending on what was extracted, but # the string of the quote will always be in the quote attribute of the object languages = ["si","is","sa","vi","da","ka","hi","uz","eu","ku","ro","kn","sq","ml","no","cy","be","te","ur","th","gl","gu","simple","sr","sah","ta","la","ja","nl","ko","hu","li","sv","id","nn","su","el","hy","hr","ar","bg","et","zh","eo","lt","sl","az","fi","ca","tr","bs","sk","he","uk","fr","es","pt","de","fa","cs","ru","pl","it","en"] languages.reverse() QKG = Namespace("http://quotekg.l3s.uni-hannover.de/resource/") SO = Namespace("https://schema.org/") WD = Namespace("http://www.wikidata.org/entity/") DBO = Namespace("http://dbpedia.org/ontology/") wqCurid = dict() wq = dict() dbp = dict() for lang in languages: wqCurid[lang] = URIRef("https://"+lang+".wikiquote.org/wiki?curid=") wq[lang] = URIRef("https://"+lang+".wikiquote.org/wiki/") dbp[lang] = "http://"+lang+".dbpedia.org/resource/" g = Graph() g.bind("qkg", QKG) g.bind("rdf", RDF) g.bind("rdfs", RDFS) g.bind("so", SO) g.bind("owl", OWL) g.bind("wd", WD) g.bind("skos", SKOS) g.bind("dbo", DBO) g.bind("dcterms", DCTERMS) g.bind("xsd", XSD) file = "/home/gottschalk/quotekg/rdf/corpus_evaluation_fast_0.8.pkl" with open(file,"rb") as f: corpus = pickle.load(f) # choose n to get list of all completeEntity objects with n aligned quotes print("Data loaded.", flush=True) completeQuoteId = 1 quotationId = 1 personId = 1 entityId = 1 contextId = 1 entity_dict = dict() existing_quotes = dict() # convert URLs in an RDF-friendly format def cleanURL(url): return requote_uri(url) # create "Context" object for templated quotations def processTemplateContext(g, templateObject, contextURI, page_language): if isinstance(templateObject,str): text = cleanText(templateObject) if text: g.add((contextURI, SO.text, Literal(text, page_language))) elif isinstance(templateObject, Line): text = cleanText(templateObject.text) if text: g.add((contextURI, SO.text, Literal(text, page_language))) if templateObject.external_links: for external_link in templateObject.external_links: g.add((contextURI, SO.source, URIRef(cleanURL(external_link.uri)))) # collect triples for all quotations for completeQuote in corpus.completeQuotes.values(): if completeQuoteId % 500 == 0: print("Quotation",completeQuoteId,flush=True) # each complete quotation needs to have at least one valid quotation valid = False for lang, quotes in completeQuote.quotes.items(): for quote in quotes: if not quote.about: valid = True break if valid: break if not valid: continue completeEntity = completeQuote.entity # quotation quotationURI = URIRef(QKG)+"Quotation" + str(completeQuoteId) completeQuoteId += 1 g.add((quotationURI, RDF.type, SO.Quotation)) # person wikidata_id = completeEntity.wikidata_id if wikidata_id not in existing_quotes: existing_quotes[wikidata_id] = dict() if wikidata_id not in entity_dict: personURI = URIRef(QKG)+"Person" + str(personId) person = RDFEntity(personURI) person.wikidata_id = wikidata_id for lang in completeEntity.wikiquoteIds: person.wikiquoteIds[lang] = completeEntity.wikiquoteIds[lang] person.labels[lang] = completeEntity.wikiquoteIds[lang] for lang in completeEntity.wikiquotePageIds: person.wikiquotePageIds[lang] = completeEntity.wikiquotePageIds[lang] entity_dict[wikidata_id] = person person.types.add("Person") personId += 1 else: person = entity_dict[wikidata_id] personURI = person.uri person.types.add("Person") g.add((quotationURI, SO.spokenByCharacter, personURI)) misattributed = False date_candidates_with_year = set() date_candidates_with_month = set() date_candidates_with_day = set() found_dates = False years = set() complete_dates=set() for dates in completeQuote.dates: for date in dates: if date: if date == current_date: continue if isinstance(date, tuple): if len(date) == 3: complete_dates.add(date) else: years.add(date[0]) else: years.add(date) found_dates = True selected_year = None selected_date = None if found_dates: # if there is any conflict: ignore dates if len(years) <= 1 and len(complete_dates) <= 1: if len(complete_dates) == 0: selected_year = years.pop() elif len(years) == 0: selected_date = complete_dates.pop() selected_year = selected_date[0] else: selected_year = years.pop() selected_date = complete_dates.pop() if selected_year != selected_date[0]: selected_year = None selected_date = None if selected_date: date_string = createDateString(selected_date) if date_string: g.add((quotationURI, SO.dateCreated,Literal(date_string, datatype=XSD.date))) if selected_year: g.add((quotationURI, DBO.year,Literal(str(selected_year), datatype=XSD.gYear))) for lang, quotes in completeQuote.quotes.items(): if lang not in all_languages: continue for quote in quotes: if quote.misattributed: misattributed = True if quote.about: continue if "http" in quote.quote: # TODO: Remove and do this in pre-processing continue # linked entities if quote.entities: for entity in quote.entities: if entity.wikidata_id: wikidata_id = entity.wikidata_id if wikidata_id not in entity_dict: if "Person" in entity.types: entityURI = URIRef(QKG)+"Person" + str(personId) personId += 1 else: entityURI = URIRef(QKG)+"Entity" + str(entityId) entityId += 1 rdf_entity = RDFEntity(entityURI) rdf_entity.wikidata_id = wikidata_id entity_dict[wikidata_id] = rdf_entity else: rdf_entity = entity_dict[wikidata_id] rdf_entity.wikiquoteIds[quote.page_language] = entity.wikiquote_id rdf_entity.labels[quote.page_language] = entity.wikiquote_id for entity_type in entity.types: rdf_entity.types.add(entity_type) g.add((quotationURI, SO.mentions, rdf_entity.uri)) if quote.footnotes: for footnote in quote.footnotes: # footnotes are text-only contexts text = cleanText(footnote) if text: contextURI = URIRef(QKG)+"Context" + str(contextId) g.add((contextURI, RDF.type, QKG.Context)) g.add((contextURI, SO.text, Literal(text, quote.page_language))) g.add((contextURI, SO.inLanguage, Literal(quote.page_language, datatype=XSD.language))) g.add((quotationTextURI, QKG.hasContext, contextURI)) contextId += 1 text = quote.quote if lang not in existing_quotes[completeEntity.wikidata_id]: existing_quotes[completeEntity.wikidata_id][lang] = set() if text in existing_quotes[completeEntity.wikidata_id][lang]: # Remove duplicates. TODO: Which of the duplicates to keep? What if quotation is empty? continue existing_quotes[completeEntity.wikidata_id][lang].add(text) quotationTextURI = URIRef(QKG)+"QuotationText" + str(quotationId) quotationId += 1 g.add((quotationURI, QKG.hasText, quotationTextURI)) g.add((quotationTextURI, RDF.type, QKG.QuotationText)) # text = cleanText(quote.quote, isQuote = True) # TODO: check if text is non-empty g.add((quotationTextURI, SO.text, Literal(cleanText(quote.quote), lang))) g.add((quotationTextURI, SO.inLanguage, Literal(lang, datatype=XSD.language))) # print(quote.section_titles) # TODO: Section titles for section_title in quote.section_titles: section_title_text = cleanText(section_title) if section_title: if len(section_title) >= 3 and section_title != 'Quotes': if quote.page_language not in person.wikiquoteIds or person.wikiquoteIds[quote.page_language] != section_title: g.add((quotationTextURI, DCTERMS.description, Literal(section_title, quote.page_language))) if quote.contexts: for context in quote.contexts: contextURI = URIRef(QKG)+"Context" + str(contextId) #print(text) #text = cleanText(context.text) text = context.text g.add((contextURI, RDF.type, QKG.Context)) g.add((quotationTextURI, QKG.hasContext, contextURI)) if text: g.add((contextURI, SO.text, Literal(text, quote.page_language))) g.add((contextURI, SO.inLanguage, Literal(quote.page_language, datatype=XSD.language))) contextId += 1 for external_link in context.external_links: g.add((contextURI, SO.source, URIRef(cleanURL(external_link.uri)))) if quote.source or quote.comment or quote.explanation or quote.notes or quote.title: contextURI = URIRef(QKG)+"Context" + str(contextId) contextId += 1 # addTemplateContext() if quote.source: processTemplateContext(g, quote.source, contextURI, quote.page_language) if quote.comment: processTemplateContext(g, quote.comment, contextURI, quote.page_language) if quote.explanation: processTemplateContext(g, quote.explanation, contextURI, quote.page_language) if quote.notes: processTemplateContext(g, quote.notes, contextURI, quote.page_language) if quote.title: processTemplateContext(g, quote.title, contextURI, quote.page_language) g.add((contextURI, RDF.type, QKG.Context)) g.add((quotationTextURI, QKG.hasContext, contextURI)) #if found_dates: # print("") g.add((quotationURI, QKG.isMisattributed, Literal(misattributed, datatype=XSD.boolean))) print("Open Wikidata->DBpedia mapping") wikidata_to_dbpedia = dict() used_wikidata_ids = entity_dict.keys() wikidata_to_dbpedia_file = open("sameas-all-wikis.csv") for line in wikidata_to_dbpedia_file: wikidata_id, dbpedia_id = line.split() if wikidata_id in used_wikidata_ids: if wikidata_id not in wikidata_to_dbpedia: wikidata_to_dbpedia[wikidata_id] = [] if dbpedia_id.startswith("http://dbpedia.org/resource/"): # English wikidata_to_dbpedia[wikidata_id].append(dbpedia_id) else: for lang in languages: if dbpedia_id.startswith("http://" + lang + ".dbpedia.org/resource/"): wikidata_to_dbpedia[wikidata_id].append(dbpedia_id) print("Create entity triples",flush=True) for entity in entity_dict.values(): if "Person" in entity.types: g.add((entity.uri, RDF.type, SO.Person)) for entity_type in entity.types: g.add((entity.uri, RDF.type, URIRef(DBO)+entity_type)) g.add((entity.uri, OWL.sameAs, URIRef(WD)+entity.wikidata_id)) #for lang, wikiquotePageId in entity.wikiquotePageIds.items(): # g.add((entity.uri, OWL.sameAs, wqCurid[lang] + str(wikiquotePageId))) for lang, wikiquoteId in entity.wikiquoteIds.items(): g.add((entity.uri, OWL.sameAs, wq[lang] + requests.utils.quote(wikiquoteId))) g.add((entity.uri, RDFS.label, Literal(wikiquoteId, lang))) if entity.wikidata_id in wikidata_to_dbpedia: for dbpedia_url in wikidata_to_dbpedia[entity.wikidata_id]: g.add((entity.uri, OWL.sameAs, URIRef(dbpedia_url))) # Take the label of the most prioritised language for lang in languages: if lang in entity.wikiquoteIds: label = entity.wikiquoteIds[lang] break if label: g.add((entity.uri, SKOS.prefLabel, Literal(label))) print("Write file",flush=True) filename = "quotekg.ttl" file = open(filename, "w") file.write(g.serialize(format='ttl').decode("utf-8")) file.close() # fix rdflib issue https://github.com/RDFLib/rdflib/issues/747 print("Fix gYear bug", flush=True) path = Path(filename) text = path.read_text() text = text.replace('-01-01"^^xsd:gYear', '"^^xsd:gYear') path.write_text(text) print("Done")
Genome-wide analysis and identification of KT/HAK/KUP potassium transporter gene family in peach (Prunus persica). The KT/HAK/KUP family members encoding high-affinity potassium (K(+)) transporters mediate K(+) transport across the plasma membranes of plant cells to maintain plant normal growth and metabolic activities. In this paper, we identified 16 potassium transporter genes in the peach (Prunus persica) using the Hidden Markov model scanning strategy and searching the peach genome database. Utilizing the Arabidopsis KT/HAK/KUP family as a reference, phylogenetic analysis indicates that the KT/HAK/KUP family in the peach can be classified into 3 groups. Genomic localization indicated that 16 KT/HAK/KUP family genes were well distributed on 7 scaffolds. Gene structure analysis showed that the KT/HAK/KUP family genes have 6-9 introns. In addition, all of the KT/HAK/KUP family members were hydrophobic proteins; they exhibited similar secondary structure patterns and homologous tertiary structures. Putative cis-elements involved in abiotic stress adaption, Ca(2+) response, light and circadian rhythm regulation, and seed development were observed in the promoters of the KT/HAK/KUP family genes. Subcellular localization prediction indicated that the KT/HAK/KUP members were mainly located in the plasma membrane. Expression levels of the KT/HAK/ KUP family genes were much higher in the fruit and flower than those in the other 7 tissues examined, indicating that the KT/HAK/KUP family genes may have important roles in K(+) uptake and transport, which mainly contribute to flower formation and fruit development in the peach.
def parseGoals(shape=None,weights=[.2,.2,.2,.2,.2],verbose=False): dpath = "../data/Goals/" fnames = ["100 Life Goals - 1.html","100 Life Goals - 2.html","100 Life Goals - 3.html"] topics = ['financial', 'spiritual','social','knowledge','personal', 'health','adventure','career','unknown'] if not shape: pass goal_json = [] for fname in fnames: path = dpath+fname if verbose: print("Parsing {}...".format(path)) with open(path, 'r') as f: source = f.read() parsed = BeautifulSoup(source,'lxml') topics = [] goals = [] cur_topic = '' for child in parsed.recursiveChildGenerator(): if child.name: if(child.name == "h2"): if verbose: print(">> {}".format(child.text)) topics.append(child.text) cur_topic = child.text.lower() if(child.name == 'p'): tmp = re.sub(r"(\d+)\. ", "", child.text, 0, re.MULTILINE) tmp = re.sub(r"\s+"," ",tmp) goal = tmp.strip().replace(r"\n", "") goals.append((cur_topic,goal)) try: goal_json.append({"topic":cur_topic,"goal":goal}) except: if verbose: print("!! Unknown goal topic: {}".format(cur_topic)) goal_json.append({"topic":cur_topic,"goal":goal}) if verbose: pprint(goal_json) if verbose: print("Total goals: {}".format(len(goal_json))) return goal_json
A study of vocal cord palsy. Vocal cord paralysis is a clinical problem that comes to the internist's attention frequently, but most recent reviews have been in the otolaryngologic literature. For this reason, and because interdisciplinary investigation is commonly required, the present study was undertaken with the following objectives: examination of the array of causes of cord paralysis as currently seen; examination of the diagnostic usefulness of the various elements of the modern workup of this presenting syndrome; and inquiry into the prognosis of patients in whom no cause for cord palsy can be identified at the time of the initial workup. Paralysis of one or both vocal cords is usually due to a lesion involving the vagus somewhere in its course, from the lower motor neurons arising in the nucleus ambiguus in the medulla, through the jugular foramen, the neck and mediastinum. The vagus supplies the motor innervation to the intrinsic muscles of the larynx by way ofpaired superior and recurrent laryngeal nerves. The superior laryngeal nerve branches from the vagus high in the neck and passes downward medial to the carotid artery and along the pharynx toward the thyroid cartilage. A large internal branch supplies sensation to the larynx above the vocal cords; a small external branch innervates the cricothyroid muscle, which tenses and lengthens the vocal cord. The recurrent nerves are the major motor nerves to the intrinsic muscles of the larynx. (Figure 1) The right recurrent nerve branches from the vagus adjacent to the right subclavian artery and passes dorsal to this vessel; the left recurrent nerve arises from the vagus anterior to the aortic arch and passes dorsally beneath the arch adjacent to the attachment of the ligamentum arteriosum. Both recurrent nerves run cephalad in the groove between the trachea and the esophagus near the posterior aspect of the thyroid gland and the inferior thyroid artery, continuing as the inferior laryngeal nerves. They enter the larynx behind the cricothyroid joint and supply the principal abductor and adductor muscles of the larynx. The human larynx serves three functions. First, it provides an airway
YOUR DOCTOR IS NOT IN, BY JANE M. ORIENT The government has signed a contract to pay $850,000 for development of "practice guidelines" and "protocols" to tell doctors how to treat an ear infection, a $20 problem. If the Clinton administration has its way, there will be protocols for the treatment of virtually every ailment. Yet there is no evidence that protocols save money or improve quality. Nurses, for instance, outperform protocols in deciding how to treat abdominal pain. So why aren't doctors raising a cry of alarm? Many have been browbeaten into submission, or have discovered that it's easier to play the game than to buck the system. But also, a different type of person is entering medical practice these days. Although the evidence is largely anecdotal, Dr. Orient says that the best students are avoiding medical schools and the schools are lowering their standards. (In 1990, 16% of medical graduates flunked the national boards, compared with 9% in 1984.)
1. Field of the Invention Embodiments disclosed herein relate generally to wellbore fluids that may include silicate-based additives for stabilizing an unconsolidated formation. 2. Background Art Hydrocarbon fluids, such as oil and natural gas, and other desirable formation fluids are obtained from a subterranean geologic formation, i.e., a reservoir, by drilling a well that penetrates the formation zone that contains the desired fluid. Once a wellbore has been drilled, the well must be completed. A well “completion” involves the design, selection, and installation of equipment and materials in or around the wellbore for conveying, pumping, or controlling the production or injection of fluids. After the well has been completed, production of the formation fluids can begin. When the subterranean formation is “soft” or poorly consolidated, small particulates (typically sand) present in the formation may dislodge and travel along with the produced fluid to the wellbore. Production of sand is highly undesirable as it tends to cause erosion of surface and subterranean equipment, and therefore, it must be removed from the produced fluids before they can be processed. In addition, the migrating sand can plug the flow channels in the formation, thereby necessitating other stimulation techniques, such as acid stimulation, to restore the well's performance. Various types of unconsolidated formations include dune sands, alluvial deposits of sand and gravel, and unconsolidated marine deposits. The challenges in drilling and completing wells in these types of formations are to keep the borehole open and prevent caving, and to avoid reducing the hydraulic conductivity of the near-well formation by introducing irrecoverable mud or smearing clay at the well/aquifer interface during the drilling process. Drilling fluids for unconsolidated formations are typically water-based and generally include clean fresh water, water with clay additives, water with polymeric additives and water with a mixture of clay and polymeric additives. One method of controlling loose sands in unconsolidated formations involves placing a filtration bed of gravel near the wellbore in order to present a physical barrier to the transport of unconsolidated formation fines with the production of hydrocarbons. Typically, such so-called “gravel packing operations” involve the pumping and placement of a quantity of a desired particulate into the unconsolidated formation adjacent the wellbore. Such packs are time consuming and expensive to install. Another method used to control loose sands in unconsolidated formations involves consolidating or stabilizing the unconsolidated subterranean producing zones into hard permeable masses by pre-flushing the formation, applying a hardenable resin composition, applying a spacer fluid, applying an external catalyst to cause the resin to set, and applying an afterflush fluid to remove excess resin from the pore spaces of the zones. Such multiple-component applications, however, often result in uncertainty and create a risk for undesirable results. For example, when an insufficient amount of spacer fluid is used between the application of the hardenable resin and the application of the external catalyst the resin may come into contact with the external catalyst in the wellbore itself rather than in the unconsolidated subterranean producing zone. When resin is contacted with an external catalyst an exothermic reaction occurs that may result in rapid polymerization. The polymerization may damage the formation by plugging the pore channels, may halt pumping when the wellbore is plugged with solid material, or may even result in a downhole explosion as a result of the heat of polymerization. Also, using these conventional processes to treat long intervals of unconsolidated regions is not practical due to the difficulty in determining if the entire interval that has been treated with both the resin and the activation agent. These techniques typically involve the injection of a consolidating fluid, such as a resin-based consolidating fluid, through the wellbore and into the formation surrounding the interval of interest. Resin-based consolidating fluids generally include an organic resin, a curing agent, a catalyst and an oil wetting agent. The resin system hardens in the formation, thereby consolidating it. Examples of such resin-based consolidating fluids and methods for using them are described in, for example, U.S. Pat. Nos. 4,291,766; 4,427,069; 4,669,543; 5,199,492; and 5,806,593. Resin-based consolidation systems may be complicated to apply, especially those involving multiple treatment stages, and the treatment results may be erratic. When the individual components of the consolidating fluid are pumped at different stages into the formation they may or may not come together in the right order, or in the right amounts, or they may not even come together at all. And, even when they do come together, good mixing of the components is not assured, helping to explain the erratic and unreliable results that operators have experienced using such multi-stage consolidating fluids. In an effort to improve performance, other well treatments have been proposed which use inorganic systems, specifically the use of components which form silica gels, to modify the formation and thereby reduce the production of formation fines. For example, U.S. Pat. No. 3,593,796 describes a multi-stage process in which the following components are injected sequentially into the formation: (1) an aqueous solution containing a silicate adapted to wet the fine sand grain particles, (2) an aqueous solution of a silicate-precipitating agent capable of reacting with the silicate in solution (1) so as to form a solidifying material and therein to bind the fine sand grain particles, and (3) a solution containing an oil-wetting agent. This treatment is designed to immobilize the fine particles in the formation and prevent their migration when subjected to subsequent fluid flow. The patent states that aqueous solutions of alkaline earth metal salts (e.g., calcium chloride), acidic iron salts, and certain other metal salts can be used as the silicate-precipitating agent. In another instance, U.S. Pat. No. 3,741,308 describes a method of converting an unconsolidated sand formation into a consolidated, permeable formation by flowing volumes of aqueous calcium hydroxide (or compounds which hydrolyze or react with each other to form calcium hydroxide) through the pores of the unconsolidated formation. The patent states that the calcium hydroxide solution could be formed by adding sodium hydroxide to a solution of calcium chloride. The patent also states that during the practice of the process the sand particles in the formation become coated with calcium silicates of unknown or indefinite composition, and proposes that the coating cements the individual grains together and increases the structural strength of the sand assemblage. Yet another approach has been described in two companion cases (U.S. Pat. Nos. 5,088,555 and 5,101,901). In U.S. Pat. No. 5,088,555, a sand consolidation method was described involving sequential injections of (a) an aqueous solution of an alkali metal silicate and (b) certain organic solutions of a calcium salt (e.g., calcium chloride hydrate or chelated calcium) through perforations in the casing of a borehole. The components of these two solutions are said to react to form a calcium silicate cement with permeability retention characteristics in the formation interval being treated that prevents sand from being produced during the production of hydrocarbon fluids from the well. However, the use of a silicate-precipitating agent that is in solution may give rise to short gellation times upon contact silicates. Accordingly, there exists a need for consolidation or stabilization methods which allow for longer and/or controllable gellation times.
Report On the ADAC Astronomical Catalog Data Service System Our development and rearrangement of the system of Astronomical Data Analysis Center (ADAC) are described. We have begun by mirroring automatically or semi-automatically some of the other astronomical data centers such as CDS in France and ADC in USA. ADAC has now become the center with the greatest number of astronomical catalogs in Eastern Asia. Some mechanisms for the effective management of the catalog center have been developed. They include a WWW server and an anonymous FTP service for searching and retrieving the catalog data from ADAC, and the software for automatically constructing a list of existing catalogs.
<filename>db_dump/etender2mongoscripts/myconfig.py import pymongo import os jsonfolder = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../data") def getMongoDb(): client = pymongo.MongoClient('localhost', 27017) return client.md
<gh_stars>0 package com.jdbc.beans; public class Store { String storeNumber; String storeName; String contactNumber; String emailAddress; String addressLine1; String addressLine2; String city; String state; String zip; String country; public String getStoreNumber() { return storeNumber; } public void setStoreNumber(String storeNumber) { this.storeNumber = storeNumber; } public String getStoreName() { return storeName; } public void setStoreName(String storeName) { this.storeName = storeName; } public String getContactNumber() { return contactNumber; } public void setContactNumber(String contactNumber) { this.contactNumber = contactNumber; } public String getEmailAddress() { return emailAddress; } public void setEmailAddress(String emailAddress) { this.emailAddress = emailAddress; } public String getAddressLine1() { return addressLine1; } public void setAddressLine1(String addressLine1) { this.addressLine1 = addressLine1; } public String getAddressLine2() { return addressLine2; } public void setAddressLine2(String addressLine2) { this.addressLine2 = addressLine2; } public String getCity() { return city; } public void setCity(String city) { this.city = city; } public String getState() { return state; } public void setState(String state) { this.state = state; } public String getZip() { return zip; } public void setZip(String zip) { this.zip = zip; } public String getCountry() { return country; } public void setCountry(String country) { this.country = country; } @Override public String toString() { return "Store [storeNumber=" + storeNumber + ", storeName=" + storeName + ", contactNo=" + contactNumber + ", emailAddress=" + emailAddress + ", addressLine1=" + addressLine1 + ", addressLine2=" + addressLine2 + ", city=" + city + ", state=" + state + ", zip=" + zip + ", country=" + country + "]"; } }
<reponame>Catorpilor/LeetCode package intersection func Intersection(nums1, nums2 []int) []int { if len(nums1) == 0 || len(nums2) == 0 { return nil } hm := make(map[int]int) for _, v := range nums1 { if _, ok := hm[v]; !ok { hm[v] = 1 } else { hm[v] += 1 } } var ret []int for _, k := range nums2 { if _, ok := hm[k]; ok { ret = append(ret, k) if hm[k] == 1 { delete(hm, k) } else { hm[k] -= 1 } } } return ret }
/* * JasperReports - Free Java Reporting Library. * Copyright (C) 2001 - 2013 Jaspersoft Corporation. All rights reserved. * http://www.jaspersoft.com * * Unless you have purchased a commercial license agreement from Jaspersoft, * the following license terms apply: * * This program is part of JasperReports. * * JasperReports is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * JasperReports is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with JasperReports. If not, see <http://www.gnu.org/licenses/>. */ package net.sf.jasperreports.engine; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeMap; import net.sf.jasperreports.charts.JRAreaPlot; import net.sf.jasperreports.charts.JRBar3DPlot; import net.sf.jasperreports.charts.JRBarPlot; import net.sf.jasperreports.charts.JRBubblePlot; import net.sf.jasperreports.charts.JRCandlestickPlot; import net.sf.jasperreports.charts.JRCategoryDataset; import net.sf.jasperreports.charts.JRCategorySeries; import net.sf.jasperreports.charts.JRDataRange; import net.sf.jasperreports.charts.JRGanttDataset; import net.sf.jasperreports.charts.JRGanttSeries; import net.sf.jasperreports.charts.JRHighLowDataset; import net.sf.jasperreports.charts.JRHighLowPlot; import net.sf.jasperreports.charts.JRLinePlot; import net.sf.jasperreports.charts.JRMeterPlot; import net.sf.jasperreports.charts.JRPieDataset; import net.sf.jasperreports.charts.JRPieSeries; import net.sf.jasperreports.charts.JRScatterPlot; import net.sf.jasperreports.charts.JRThermometerPlot; import net.sf.jasperreports.charts.JRTimePeriodDataset; import net.sf.jasperreports.charts.JRTimePeriodSeries; import net.sf.jasperreports.charts.JRTimeSeries; import net.sf.jasperreports.charts.JRTimeSeriesDataset; import net.sf.jasperreports.charts.JRTimeSeriesPlot; import net.sf.jasperreports.charts.JRValueDataset; import net.sf.jasperreports.charts.JRXyDataset; import net.sf.jasperreports.charts.JRXySeries; import net.sf.jasperreports.charts.JRXyzDataset; import net.sf.jasperreports.charts.JRXyzSeries; import net.sf.jasperreports.charts.util.JRMeterInterval; import net.sf.jasperreports.crosstabs.JRCellContents; import net.sf.jasperreports.crosstabs.JRCrosstab; import net.sf.jasperreports.crosstabs.JRCrosstabBucket; import net.sf.jasperreports.crosstabs.JRCrosstabCell; import net.sf.jasperreports.crosstabs.JRCrosstabColumnGroup; import net.sf.jasperreports.crosstabs.JRCrosstabDataset; import net.sf.jasperreports.crosstabs.JRCrosstabMeasure; import net.sf.jasperreports.crosstabs.JRCrosstabParameter; import net.sf.jasperreports.crosstabs.JRCrosstabRowGroup; import net.sf.jasperreports.crosstabs.design.JRDesignCrosstab; import net.sf.jasperreports.engine.analytics.dataset.DataAxis; import net.sf.jasperreports.engine.analytics.dataset.DataAxisLevel; import net.sf.jasperreports.engine.analytics.dataset.DataLevelBucket; import net.sf.jasperreports.engine.analytics.dataset.DataLevelBucketProperty; import net.sf.jasperreports.engine.analytics.dataset.DataMeasure; import net.sf.jasperreports.engine.analytics.dataset.MultiAxisData; import net.sf.jasperreports.engine.analytics.dataset.MultiAxisDataset; import net.sf.jasperreports.engine.component.Component; import net.sf.jasperreports.engine.component.ComponentCompiler; import net.sf.jasperreports.engine.component.ComponentKey; import net.sf.jasperreports.engine.component.ComponentManager; import net.sf.jasperreports.engine.component.ComponentsEnvironment; /** * An expression collector traverses a report and collects report expressions * out of it. * * <p> * The expressions are then included into evaluator classes which are compiled * and used at report fill time to evaluate expressions. * * @author <NAME> (<EMAIL>) * @version $Id: JRExpressionCollector.java 6934 2014-02-27 13:24:34Z lucianc $ */ public class JRExpressionCollector { public static JRExpressionCollector collector(JasperReportsContext jasperReportsContext, JRReport report) { JRExpressionCollector collector = new JRExpressionCollector(jasperReportsContext, null, report); collector.collect(); return collector; } public static List<JRExpression> collectExpressions(JasperReportsContext jasperReportsContext, JRReport report) { return collector(jasperReportsContext, report).getExpressions(); } public static JRExpressionCollector collector(JasperReportsContext jasperReportsContext, JRReport report, JRCrosstab crosstab) { JRExpressionCollector collector = new JRExpressionCollector(jasperReportsContext, null, report); collector.collect(crosstab); return collector; } public static List<JRExpression> collectExpressions(JasperReportsContext jasperReportsContext, JRReport report, JRCrosstab crosstab) { return collector(jasperReportsContext, report, crosstab).getExpressions(crosstab); } /** * @deprecated Replaced by {@link #collector(JasperReportsContext, JRReport)}. */ public static JRExpressionCollector collector(JRReport report) { return collector(DefaultJasperReportsContext.getInstance(), report); } /** * @deprecated Replaced by {@link #collectExpressions(JasperReportsContext, JRReport)}. */ public static List<JRExpression> collectExpressions(JRReport report) { return collectExpressions(DefaultJasperReportsContext.getInstance(), report); } /** * @deprecated Replaced by {@link #collector(JasperReportsContext, JRReport, JRCrosstab)}. */ public static JRExpressionCollector collector(JRReport report, JRCrosstab crosstab) { return collector(DefaultJasperReportsContext.getInstance(), report, crosstab); } /** * @deprecated Replaced by {@link #collectExpressions(JasperReportsContext, JRReport, JRCrosstab)}. */ public static List<JRExpression> collectExpressions(JRReport report, JRCrosstab crosstab) { return collectExpressions(DefaultJasperReportsContext.getInstance(), report, crosstab); } private final JasperReportsContext jasperReportsContext; private final JRReport report; private final JRExpressionCollector parent; private Map<JRExpression,Integer> expressionIds; private LinkedList<Object> contextStack; private Map<JRExpression, Object> expressionContextMap; protected static class GeneratedIds { private final TreeMap<Integer, JRExpression> ids = new TreeMap<Integer, JRExpression>(); private int nextId; private List<JRExpression> expressions; public JRExpression put(Integer id, JRExpression expression) { expressions = null; return ids.put(id, expression); } public Integer nextId() { Integer id = Integer.valueOf(nextId); while(ids.containsKey(id)) { id = Integer.valueOf(++nextId); } return id; } public List<JRExpression> expressions() { if (expressions == null) { expressions = new ArrayList<JRExpression>(ids.values()); } return expressions; } public JRExpression expression(int id) { return ids.get(Integer.valueOf(id)); } } private GeneratedIds generatedIds = new GeneratedIds(); private Map<JRCrosstab,Integer> crosstabIds; /** * Collectors for sub datasets indexed by dataset name. */ private Map<String,JRExpressionCollector> datasetCollectors; /** * Collectors for crosstabs. */ private Map<JRCrosstab,JRExpressionCollector> crosstabCollectors; private final Set<JRStyle> collectedStyles; /** * @deprecated Replaced by {@link #JRExpressionCollector(JasperReportsContext, JRExpressionCollector, JRReport)}. */ protected JRExpressionCollector(JRExpressionCollector parent, JRReport report) { this(DefaultJasperReportsContext.getInstance(), parent, report); } protected JRExpressionCollector(JasperReportsContext jasperReportsContext, JRExpressionCollector parent, JRReport report) { this.jasperReportsContext = jasperReportsContext; this.parent = parent; this.report = report; if (parent == null) { expressionIds = new HashMap<JRExpression,Integer>(); datasetCollectors = new HashMap<String,JRExpressionCollector>(); crosstabCollectors = new HashMap<JRCrosstab,JRExpressionCollector>(); contextStack = new LinkedList<Object>(); expressionContextMap = new HashMap<JRExpression, Object>(); crosstabIds = new HashMap<JRCrosstab,Integer>(); } else { expressionIds = this.parent.expressionIds; contextStack = this.parent.contextStack; expressionContextMap = this.parent.expressionContextMap; crosstabIds = parent.crosstabIds; } collectedStyles = new HashSet<JRStyle>(); } /** * Collects an expression. * * @param expression the expression to collect */ public void addExpression(JRExpression expression) { if (expression != null) { Integer id = getExpressionId(expression); if (id == null) { id = generatedIds.nextId(); setGeneratedId(expression, id); generatedIds.put(id, expression); } else { JRExpression existingExpression = generatedIds.put(id, expression); if (existingExpression != null && !existingExpression.equals(expression)) { Integer newId = generatedIds.nextId(); updateGeneratedId(existingExpression, id, newId); generatedIds.put(newId, existingExpression); } } setExpressionContext(expression); } } private void setGeneratedId(JRExpression expression, Integer id) { Object existingId = expressionIds.put(expression, id); if (existingId != null && !existingId.equals(id)) { throw new JRRuntimeException("Expression \"" + expression.getText() + "\" has two generated IDs"); } } private void updateGeneratedId(JRExpression expression, Integer currentId, Integer newId) { Object existingId = expressionIds.put(expression, newId); if (existingId == null || !existingId.equals(currentId)) { throw new JRRuntimeException("Expression \"" + expression.getText() + "\" not found with id " + currentId); } } protected void pushContextObject(Object context) { contextStack.addLast(context); } protected Object popContextObject() { return contextStack.removeLast(); } protected void setExpressionContext(JRExpression expression) { if (!contextStack.isEmpty()) { Object context = contextStack.getLast(); expressionContextMap.put(expression, context); } } /** * Returns the expression collector to which expressions in an element * dataset belong. * * <p> * If the element dataset includes a subdataset run, a (sub) expression * collector that corresponds to the subdataset will be returned. * Otherwise, this/the main expression collector will be returned. * * @param elementDataset an element dataset * @return the expression collector to be used for the element dataset */ public JRExpressionCollector getCollector(JRElementDataset elementDataset) { JRExpressionCollector collector; JRDatasetRun datasetRun = elementDataset.getDatasetRun(); if (datasetRun == null) { collector = this; } else { collector = getDatasetCollector(datasetRun.getDatasetName()); } return collector; } /** * Returns the expression collector for a report subdataset. * * @param datasetName the subdataset name * @return the expression collector for the subdataset */ public JRExpressionCollector getDatasetCollector(String datasetName) { JRExpressionCollector collector; if (parent == null) { collector = datasetCollectors.get(datasetName); if (collector == null) { collector = new JRExpressionCollector(jasperReportsContext, this, report); datasetCollectors.put(datasetName, collector); } } else { collector = parent.getDatasetCollector(datasetName); } return collector; } /** * Returns the expression collector for a dataset. * * @param dataset the dataset * @return the dataset expression collector */ public JRExpressionCollector getCollector(JRDataset dataset) { JRExpressionCollector collector; if (parent == null) { if (dataset.isMainDataset() || datasetCollectors == null) { collector = this; } else { collector = getDatasetCollector(dataset.getName()); } } else { collector = parent.getCollector(dataset); } return collector; } /** * Returns the expression collector for a crosstab. * * @param crosstab the crosstab * @return the crosstab expression collector */ public JRExpressionCollector getCollector(JRCrosstab crosstab) { JRExpressionCollector collector; if (parent == null) { collector = crosstabCollectors.get(crosstab); if (collector == null) { collector = new JRExpressionCollector(jasperReportsContext, this, report); crosstabCollectors.put(crosstab, collector); } } else { collector = parent.getCollector(crosstab); } return collector; } /** * Returns the collected expressions. * * @return the collected expressions */ public List<JRExpression> getExpressions() { return new ArrayList<JRExpression>(generatedIds.expressions()); } /** * Return all the expressions collected from the report. * * @return all the expressions collected from the report */ public Collection<JRExpression> getReportExpressions() { return Collections.unmodifiableSet(expressionIds.keySet()); } /** * Returns the expressions collected for a dataset. * * @param dataset the dataset * @return the expressions */ public List<JRExpression> getExpressions(JRDataset dataset) { return getCollector(dataset).getExpressions(); } /** * Returns the expressions collected for a crosstab. * * @param crosstab the crosstab * @return the expressions */ public List<JRExpression> getExpressions(JRCrosstab crosstab) { return getCollector(crosstab).getExpressions(); } public Integer getExpressionId(JRExpression expression) { return expressionIds.get(expression); } public JRExpression getExpression(int expressionId) { return generatedIds.expression(expressionId); } public Integer getCrosstabId(JRCrosstab crosstab) { return crosstabIds.get(crosstab); } public Object getExpressionContext(JRExpression expression) { return expressionContextMap.get(expression); } /** * */ public Collection<JRExpression> collect() { collectTemplates(); collect(report.getDefaultStyle()); collect(report.getMainDataset()); JRDataset[] datasets = report.getDatasets(); if (datasets != null && datasets.length > 0) { for (int i = 0; i < datasets.length; i++) { JRExpressionCollector collector = getCollector(datasets[i]); collector.collect(datasets[i]); } } collect(report.getBackground()); collect(report.getTitle()); collect(report.getPageHeader()); collect(report.getColumnHeader()); collect(report.getDetailSection()); collect(report.getColumnFooter()); collect(report.getPageFooter()); collect(report.getLastPageFooter()); collect(report.getSummary()); collect(report.getNoData()); return getExpressions(); } protected void collectTemplates() { JRReportTemplate[] templates = report.getTemplates(); if (templates != null) { for (int i = 0; i < templates.length; i++) { JRReportTemplate template = templates[i]; collect(template); } } } protected void collect(JRReportTemplate template) { addExpression(template.getSourceExpression()); } /** * Collects expressions used in a style definition. * * @param style the style to collect expressions from */ public void collect(JRStyle style) { if (style != null && collectedStyles.add(style)) { JRConditionalStyle[] conditionalStyles = style.getConditionalStyles(); if (conditionalStyles != null && conditionalStyles.length > 0) { for (int i = 0; i < conditionalStyles.length; i++) { addExpression(conditionalStyles[i].getConditionExpression()); } } collect(style.getStyle()); } } /** * */ private void collect(JRParameter[] parameters) { if (parameters != null && parameters.length > 0) { for(int i = 0; i < parameters.length; i++) { addExpression(parameters[i].getDefaultValueExpression()); } } } /** * */ private void collect(JRVariable[] variables) { if (variables != null && variables.length > 0) { for(int i = 0; i < variables.length; i++) { JRVariable variable = variables[i]; addExpression(variable.getExpression()); addExpression(variable.getInitialValueExpression()); } } } /** * */ private void collect(JRGroup[] groups) { if (groups != null && groups.length > 0) { for(int i = 0; i < groups.length; i++) { JRGroup group = groups[i]; addExpression(group.getExpression()); collect(group.getGroupHeaderSection()); collect(group.getGroupFooterSection()); } } } /** * */ private void collect(JRSection section) { if (section != null) { JRBand[] bands = section.getBands(); if (bands != null && bands.length > 0) { for(int i = 0; i < bands.length; i++) { collect(bands[i]); } } } } /** * */ private void collect(JRBand band) { if (band != null) { addExpression(band.getPrintWhenExpression()); JRElement[] elements = band.getElements(); if (elements != null && elements.length > 0) { for(int i = 0; i < elements.length; i++) { elements[i].collectExpressions(this); } } } } /** * */ private void collectElement(JRElement element) { collect(element.getStyle()); addExpression(element.getPrintWhenExpression()); collectPropertyExpressions(element.getPropertyExpressions()); } public void collectPropertyExpressions( JRPropertyExpression[] propertyExpressions) { if (propertyExpressions != null && propertyExpressions.length > 0) { for (int i = 0; i < propertyExpressions.length; i++) { collectPropertyExpression(propertyExpressions[i]); } } } protected void collectPropertyExpression( JRPropertyExpression propertyExpression) { addExpression(propertyExpression.getValueExpression()); } /** * */ private void collectAnchor(JRAnchor anchor) { addExpression(anchor.getAnchorNameExpression()); } public void collectHyperlink(JRHyperlink hyperlink) { if (hyperlink != null) { addExpression(hyperlink.getHyperlinkReferenceExpression()); addExpression(hyperlink.getHyperlinkWhenExpression()); addExpression(hyperlink.getHyperlinkAnchorExpression()); addExpression(hyperlink.getHyperlinkPageExpression()); addExpression(hyperlink.getHyperlinkTooltipExpression()); JRHyperlinkParameter[] hyperlinkParameters = hyperlink.getHyperlinkParameters(); if (hyperlinkParameters != null) { for (int i = 0; i < hyperlinkParameters.length; i++) { JRHyperlinkParameter parameter = hyperlinkParameters[i]; collectHyperlinkParameter(parameter); } } } } protected void collectHyperlinkParameter(JRHyperlinkParameter parameter) { if (parameter != null) { addExpression(parameter.getValueExpression()); } } /** * */ public void collect(JRBreak breakElement) { collectElement(breakElement); } /** * */ public void collect(JRLine line) { collectElement(line); } /** * */ public void collect(JRRectangle rectangle) { collectElement(rectangle); } /** * */ public void collect(JREllipse ellipse) { collectElement(ellipse); } /** * */ public void collect(JRImage image) { collectElement(image); addExpression(image.getExpression()); collectAnchor(image); collectHyperlink(image); } /** * */ public void collect(JRStaticText staticText) { collectElement(staticText); } /** * */ public void collect(JRTextField textField) { collectElement(textField); addExpression(textField.getExpression()); addExpression(textField.getPatternExpression()); collectAnchor(textField); collectHyperlink(textField); } /** * */ public void collect(JRSubreport subreport) { collectElement(subreport); addExpression(subreport.getParametersMapExpression()); JRSubreportParameter[] parameters = subreport.getParameters(); if (parameters != null && parameters.length > 0) { for(int j = 0; j < parameters.length; j++) { addExpression(parameters[j].getExpression()); } } addExpression(subreport.getConnectionExpression()); addExpression(subreport.getDataSourceExpression()); addExpression(subreport.getExpression()); } /** * */ public void collect(JRChart chart) { collectElement(chart); collectAnchor(chart); collectHyperlink(chart); addExpression(chart.getTitleExpression()); addExpression(chart.getSubtitleExpression()); chart.getDataset().collectExpressions(this); chart.getPlot().collectExpressions(this); } /** * */ public void collect(JRPieDataset pieDataset) { collect((JRElementDataset) pieDataset); JRPieSeries[] pieSeries = pieDataset.getSeries(); if (pieSeries != null && pieSeries.length > 0) { JRExpressionCollector collector = getCollector(pieDataset); for(int j = 0; j < pieSeries.length; j++) { collector.collect(pieSeries[j]); } } JRExpressionCollector collector = getCollector(pieDataset); collector.addExpression(pieDataset.getOtherKeyExpression()); collector.addExpression(pieDataset.getOtherLabelExpression()); collector.collectHyperlink(pieDataset.getOtherSectionHyperlink()); } /** * */ public void collect(JRCategoryDataset categoryDataset) { collect((JRElementDataset) categoryDataset); JRCategorySeries[] categorySeries = categoryDataset.getSeries(); if (categorySeries != null && categorySeries.length > 0) { JRExpressionCollector collector = getCollector(categoryDataset); for(int j = 0; j < categorySeries.length; j++) { collector.collect(categorySeries[j]); } } } /** * */ public void collect(JRXyDataset xyDataset) { collect((JRElementDataset) xyDataset); JRXySeries[] xySeries = xyDataset.getSeries(); if (xySeries != null && xySeries.length > 0) { JRExpressionCollector collector = getCollector(xyDataset); for(int j = 0; j < xySeries.length; j++) { collector.collect(xySeries[j]); } } } /** * */ public void collect( JRTimeSeriesDataset timeSeriesDataset ){ collect((JRElementDataset) timeSeriesDataset); JRTimeSeries[] timeSeries = timeSeriesDataset.getSeries(); if( timeSeries != null && timeSeries.length > 0 ){ JRExpressionCollector collector = getCollector(timeSeriesDataset); for( int i = 0; i < timeSeries.length; i++ ){ collector.collect(timeSeries[i]); } } } /** * */ public void collect( JRTimePeriodDataset timePeriodDataset ){ collect((JRElementDataset) timePeriodDataset); JRTimePeriodSeries[] timePeriodSeries = timePeriodDataset.getSeries(); if( timePeriodSeries != null && timePeriodSeries.length > 0 ){ JRExpressionCollector collector = getCollector(timePeriodDataset); for( int i = 0; i < timePeriodSeries.length; i++ ){ collector.collect(timePeriodSeries[i]); } } } /** * */ public void collect(JRGanttDataset ganttDataset) { collect((JRElementDataset) ganttDataset); JRGanttSeries[] ganttSeries = ganttDataset.getSeries(); if (ganttSeries != null && ganttSeries.length > 0) { JRExpressionCollector collector = getCollector(ganttDataset); for(int j = 0; j < ganttSeries.length; j++) { collector.collect(ganttSeries[j]); } } } /** * */ public void collect( JRValueDataset valueDataset ){ collect((JRElementDataset) valueDataset); JRExpressionCollector collector = getCollector(valueDataset); collector.addExpression(valueDataset.getValueExpression()); } /** * */ private void collect(JRXySeries xySeries) { addExpression(xySeries.getSeriesExpression()); addExpression(xySeries.getXValueExpression()); addExpression(xySeries.getYValueExpression()); addExpression(xySeries.getLabelExpression()); collectHyperlink(xySeries.getItemHyperlink()); } /** * */ private void collect(JRPieSeries pieSeries) { addExpression(pieSeries.getKeyExpression()); addExpression(pieSeries.getValueExpression()); addExpression(pieSeries.getLabelExpression()); collectHyperlink(pieSeries.getSectionHyperlink()); } /** * */ private void collect(JRCategorySeries categorySeries) { addExpression(categorySeries.getSeriesExpression()); addExpression(categorySeries.getCategoryExpression()); addExpression(categorySeries.getValueExpression()); addExpression(categorySeries.getLabelExpression()); collectHyperlink(categorySeries.getItemHyperlink()); } /** * */ private void collect(JRGanttSeries ganttSeries) { addExpression(ganttSeries.getSeriesExpression()); addExpression(ganttSeries.getTaskExpression()); addExpression(ganttSeries.getSubtaskExpression()); addExpression(ganttSeries.getStartDateExpression()); addExpression(ganttSeries.getEndDateExpression()); addExpression(ganttSeries.getPercentExpression()); addExpression(ganttSeries.getLabelExpression()); collectHyperlink(ganttSeries.getItemHyperlink()); } /** * */ public void collect(JRBarPlot barPlot) { addExpression(barPlot.getCategoryAxisLabelExpression()); addExpression(barPlot.getValueAxisLabelExpression()); addExpression(barPlot.getDomainAxisMinValueExpression()); addExpression(barPlot.getDomainAxisMaxValueExpression()); addExpression(barPlot.getRangeAxisMinValueExpression()); addExpression(barPlot.getRangeAxisMaxValueExpression()); } /** * */ public void collect(JRBar3DPlot barPlot) { addExpression(barPlot.getCategoryAxisLabelExpression()); addExpression(barPlot.getValueAxisLabelExpression()); addExpression(barPlot.getRangeAxisMinValueExpression()); addExpression(barPlot.getRangeAxisMaxValueExpression()); } /** * */ public void collect( JRLinePlot linePlot ){ addExpression( linePlot.getCategoryAxisLabelExpression() ); addExpression( linePlot.getValueAxisLabelExpression() ); addExpression(linePlot.getDomainAxisMinValueExpression()); addExpression(linePlot.getDomainAxisMaxValueExpression()); addExpression(linePlot.getRangeAxisMinValueExpression()); addExpression(linePlot.getRangeAxisMaxValueExpression()); } /** * */ public void collect( JRTimeSeriesPlot timeSeriesPlot ){ addExpression( timeSeriesPlot.getTimeAxisLabelExpression() ); addExpression( timeSeriesPlot.getValueAxisLabelExpression() ); addExpression(timeSeriesPlot.getDomainAxisMinValueExpression()); addExpression(timeSeriesPlot.getDomainAxisMaxValueExpression()); addExpression(timeSeriesPlot.getRangeAxisMinValueExpression()); addExpression(timeSeriesPlot.getRangeAxisMaxValueExpression()); } /** * */ public void collect( JRScatterPlot scatterPlot ){ addExpression( scatterPlot.getXAxisLabelExpression() ); addExpression( scatterPlot.getYAxisLabelExpression() ); addExpression(scatterPlot.getDomainAxisMinValueExpression()); addExpression(scatterPlot.getDomainAxisMaxValueExpression()); addExpression(scatterPlot.getRangeAxisMinValueExpression()); addExpression(scatterPlot.getRangeAxisMaxValueExpression()); } /** * */ public void collect( JRAreaPlot areaPlot ){ addExpression( areaPlot.getCategoryAxisLabelExpression() ); addExpression( areaPlot.getValueAxisLabelExpression() ); addExpression(areaPlot.getDomainAxisMinValueExpression()); addExpression(areaPlot.getDomainAxisMaxValueExpression()); addExpression(areaPlot.getRangeAxisMinValueExpression()); addExpression(areaPlot.getRangeAxisMaxValueExpression()); } /** * */ private void collect(JRTimeSeries timeSeries) { addExpression(timeSeries.getSeriesExpression()); addExpression(timeSeries.getTimePeriodExpression()); addExpression(timeSeries.getValueExpression()); addExpression(timeSeries.getLabelExpression()); collectHyperlink(timeSeries.getItemHyperlink()); } /** * */ private void collect(JRTimePeriodSeries timePeriodSeries ){ addExpression(timePeriodSeries.getSeriesExpression()); addExpression(timePeriodSeries.getStartDateExpression()); addExpression(timePeriodSeries.getEndDateExpression()); addExpression(timePeriodSeries.getValueExpression()); addExpression(timePeriodSeries.getLabelExpression()); collectHyperlink(timePeriodSeries.getItemHyperlink()); } /** * */ public void collect(JRXyzDataset xyzDataset) { collect((JRElementDataset) xyzDataset); JRXyzSeries[] xyzSeries = xyzDataset.getSeries(); if (xyzSeries != null && xyzSeries.length > 0) { JRExpressionCollector collector = getCollector(xyzDataset); for(int j = 0; j < xyzSeries.length; j++) { collector.collect(xyzSeries[j]); } } } /** * */ private void collect(JRXyzSeries xyzSeries) { addExpression(xyzSeries.getSeriesExpression()); addExpression(xyzSeries.getXValueExpression()); addExpression(xyzSeries.getYValueExpression()); addExpression(xyzSeries.getZValueExpression()); collectHyperlink(xyzSeries.getItemHyperlink()); } /** * */ public void collect(JRBubblePlot bubblePlot) { addExpression(bubblePlot.getXAxisLabelExpression()); addExpression(bubblePlot.getYAxisLabelExpression()); addExpression(bubblePlot.getDomainAxisMinValueExpression()); addExpression(bubblePlot.getDomainAxisMaxValueExpression()); addExpression(bubblePlot.getRangeAxisMinValueExpression()); addExpression(bubblePlot.getRangeAxisMaxValueExpression()); } /** * */ public void collect(JRHighLowPlot highLowPlot) { addExpression(highLowPlot.getTimeAxisLabelExpression()); addExpression(highLowPlot.getValueAxisLabelExpression()); addExpression(highLowPlot.getDomainAxisMinValueExpression()); addExpression(highLowPlot.getDomainAxisMaxValueExpression()); addExpression(highLowPlot.getRangeAxisMinValueExpression()); addExpression(highLowPlot.getRangeAxisMaxValueExpression()); } /** * */ public void collect(JRDataRange dataRange) { if (dataRange != null) { addExpression(dataRange.getLowExpression()); addExpression(dataRange.getHighExpression()); } } /** * */ public void collect(JRMeterPlot meterPlot) { List<JRMeterInterval> intervals = meterPlot.getIntervals(); if (intervals != null) { Iterator<JRMeterInterval> iter = intervals.iterator(); while (iter.hasNext()) { JRMeterInterval interval = iter.next(); collect(interval.getDataRange()); } } collect(meterPlot.getDataRange()); } /** * */ public void collect(JRThermometerPlot thermometerPlot) { collect(thermometerPlot.getDataRange()); collect(thermometerPlot.getLowRange()); collect(thermometerPlot.getMediumRange()); collect(thermometerPlot.getHighRange()); } /** * */ public void collect(JRHighLowDataset highLowDataset) { collect((JRElementDataset) highLowDataset); JRExpressionCollector collector = getCollector(highLowDataset); collector.addExpression(highLowDataset.getSeriesExpression()); collector.addExpression(highLowDataset.getDateExpression()); collector.addExpression(highLowDataset.getHighExpression()); collector.addExpression(highLowDataset.getLowExpression()); collector.addExpression(highLowDataset.getOpenExpression()); collector.addExpression(highLowDataset.getCloseExpression()); collector.addExpression(highLowDataset.getVolumeExpression()); collector.collectHyperlink(highLowDataset.getItemHyperlink()); } /** * */ public void collect(JRCandlestickPlot candlestickPlot) { addExpression(candlestickPlot.getTimeAxisLabelExpression()); addExpression(candlestickPlot.getValueAxisLabelExpression()); addExpression(candlestickPlot.getDomainAxisMinValueExpression()); addExpression(candlestickPlot.getDomainAxisMaxValueExpression()); addExpression(candlestickPlot.getRangeAxisMinValueExpression()); addExpression(candlestickPlot.getRangeAxisMaxValueExpression()); } /** * Collects expressions from a crosstab. * * @param crosstab the crosstab */ public void collect(JRCrosstab crosstab) { collectElement(crosstab); createCrosstabId(crosstab); JRCrosstabDataset dataset = crosstab.getDataset(); collect(dataset); JRExpressionCollector datasetCollector = getCollector(dataset); JRExpressionCollector crosstabCollector = getCollector(crosstab); crosstabCollector.collect(report.getDefaultStyle()); addExpression(crosstab.getParametersMapExpression()); JRCrosstabParameter[] parameters = crosstab.getParameters(); if (parameters != null) { for (int i = 0; i < parameters.length; i++) { addExpression(parameters[i].getExpression()); } } if (crosstab.getTitleCell() != null) { crosstabCollector.collect(crosstab.getTitleCell().getCellContents()); } crosstabCollector.collect(crosstab.getHeaderCell()); JRCrosstabRowGroup[] rowGroups = crosstab.getRowGroups(); if (rowGroups != null) { for (int i = 0; i < rowGroups.length; i++) { JRCrosstabRowGroup rowGroup = rowGroups[i]; JRCrosstabBucket bucket = rowGroup.getBucket(); datasetCollector.addExpression(bucket.getExpression()); crosstabCollector.pushContextObject(rowGroup); //order by expression is in the crosstab context crosstabCollector.addExpression(bucket.getOrderByExpression()); addExpression(bucket.getComparatorExpression()); crosstabCollector.collect(rowGroup.getHeader()); crosstabCollector.collect(rowGroup.getTotalHeader()); crosstabCollector.popContextObject(); } } JRCrosstabColumnGroup[] colGroups = crosstab.getColumnGroups(); if (colGroups != null) { for (int i = 0; i < colGroups.length; i++) { JRCrosstabColumnGroup columnGroup = colGroups[i]; JRCrosstabBucket bucket = columnGroup.getBucket(); datasetCollector.addExpression(bucket.getExpression()); crosstabCollector.pushContextObject(columnGroup); //order by expression is in the crosstab context crosstabCollector.addExpression(bucket.getOrderByExpression()); addExpression(bucket.getComparatorExpression()); crosstabCollector.collect(columnGroup.getCrosstabHeader()); crosstabCollector.collect(columnGroup.getHeader()); crosstabCollector.collect(columnGroup.getTotalHeader()); crosstabCollector.popContextObject(); } } JRCrosstabMeasure[] measures = crosstab.getMeasures(); if (measures != null) { for (int i = 0; i < measures.length; i++) { datasetCollector.addExpression(measures[i].getValueExpression()); } } crosstabCollector.collect(crosstab.getWhenNoDataCell()); collectCrosstabCells(crosstab, crosstabCollector); } private void createCrosstabId(JRCrosstab crosstab) { crosstabIds.put(crosstab, Integer.valueOf(crosstabIds.size())); } private void collectCrosstabCells(JRCrosstab crosstab, JRExpressionCollector crosstabCollector) { if (crosstab instanceof JRDesignCrosstab) { List<JRCrosstabCell> cellsList = ((JRDesignCrosstab) crosstab).getCellsList(); if (cellsList != null) { for (Iterator<JRCrosstabCell> iter = cellsList.iterator(); iter.hasNext();) { JRCrosstabCell cell = iter.next(); crosstabCollector.collect(cell.getContents()); } } } else { JRCrosstabCell[][] cells = crosstab.getCells(); if (cells != null) { for (int i = 0; i < cells.length; ++i) { for (int j = 0; j < cells[i].length; j++) { if (cells[i][j] != null) { crosstabCollector.collect(cells[i][j].getContents()); } } } } } } /** * Collects expressions from a dataset. * * @param dataset the dataset * @return collected expressions */ public Collection<JRExpression> collect(JRDataset dataset) { JRExpressionCollector collector = getCollector(dataset); collector.collect(dataset.getParameters()); collector.collect(dataset.getVariables()); collector.collect(dataset.getGroups()); collector.addExpression(dataset.getFilterExpression()); return getExpressions(dataset); } /** * Collects expressions from an element dataset. * * @param dataset the element dataset */ public void collect(JRElementDataset dataset) { collect(dataset.getDatasetRun()); JRExpression incrementWhenExpression = dataset.getIncrementWhenExpression(); if (incrementWhenExpression != null) { JRExpressionCollector datasetCollector = getCollector(dataset); datasetCollector.addExpression(incrementWhenExpression); } } /** * Collects expressions from a subdataset run object. * * @param datasetRun the subdataset run */ public void collect(JRDatasetRun datasetRun) { if (datasetRun != null) { addExpression(datasetRun.getParametersMapExpression()); addExpression(datasetRun.getConnectionExpression()); addExpression(datasetRun.getDataSourceExpression()); JRDatasetParameter[] parameters = datasetRun.getParameters(); if (parameters != null && parameters.length > 0) { for (int i = 0; i < parameters.length; i++) { addExpression(parameters[i].getExpression()); } } } } protected void collect(JRCellContents cell) { if (cell != null) { collect(cell.getStyle()); JRElement[] elements = cell.getElements(); if (elements != null && elements.length > 0) { for(int i = 0; i < elements.length; i++) { elements[i].collectExpressions(this); } } } } public void collect(JRFrame frame) { collectElement(frame); JRElement[] elements = frame.getElements(); if (elements != null) { for (int i = 0; i < elements.length; i++) { elements[i].collectExpressions(this); } } } /** * Collects expressions from a component element wrapper. * * <p> * Common element expressions are collected, and then the component * compiler's * {@link ComponentCompiler#collectExpressions(Component, JRExpressionCollector)} * is called to collect component expressions. * * @param componentElement the component element */ public void collect(JRComponentElement componentElement) { collectElement(componentElement); ComponentKey componentKey = componentElement.getComponentKey(); ComponentManager manager = ComponentsEnvironment.getInstance(jasperReportsContext).getManager(componentKey); Component component = componentElement.getComponent(); manager.getComponentCompiler(jasperReportsContext).collectExpressions(component, this); } /** * Collects expressions from a generic element. * * @param element the generic element */ public void collect(JRGenericElement element) { collectElement(element); JRGenericElementParameter[] parameters = element.getParameters(); for (int i = 0; i < parameters.length; i++) { JRGenericElementParameter parameter = parameters[i]; addExpression(parameter.getValueExpression()); } } public void collect(MultiAxisData data) { if (data == null) { return; } MultiAxisDataset dataset = data.getDataset(); collect(dataset); JRExpressionCollector datasetCollector = getCollector(dataset); List<DataAxis> axisList = data.getDataAxisList(); for (DataAxis dataAxis : axisList) { for (DataAxisLevel level : dataAxis.getLevels()) { collect(level, datasetCollector); } } for (DataMeasure measure : data.getMeasures()) { addExpression(measure.getLabelExpression()); datasetCollector.addExpression(measure.getValueExpression()); } } protected void collect(DataAxisLevel level, JRExpressionCollector datasetCollector) { addExpression(level.getLabelExpression()); DataLevelBucket bucket = level.getBucket(); datasetCollector.addExpression(bucket.getExpression()); addExpression(bucket.getComparatorExpression()); List<DataLevelBucketProperty> bucketProperties = bucket.getBucketProperties(); if (bucketProperties != null) { for (DataLevelBucketProperty bucketProperty : bucketProperties) { datasetCollector.addExpression(bucketProperty.getExpression()); } } } public JasperReportsContext getJasperReportsContext() { return jasperReportsContext; } }
<gh_stars>0 package fhws.minichess.gamecomponents; import com.sun.istack.internal.Nullable; import java.io.*; import java.util.ArrayList; import java.util.HashMap; import java.util.Stack; import java.util.Vector; /** * Copyright © 2017 <NAME> * <p> * [This program is licensed under the "MIT License"] * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: * The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.Copyright © 2017 <NAME> * <p> * [This program is licensed under the "MIT License"] * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: * The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /** * Providing a mini chess board for the game */ public class Board { public static char king = 'k'; public static char queen = 'q'; public static char bishop = 'b'; public static char night = 'n'; public static char rook = 'r'; public static char FREEPOSITION = '.'; public static char PRAWN_BLACK = 'p'; public static char PRAWN_WHITE = 'P'; public static int ROWS = 6; public static int COLUMNS = 5; private char[][] squares = new char[ROWS][COLUMNS]; private int movNumber; private char onMove; private Stack<Move> lastMove = new Stack<>(); private Stack<Character> lastValue = new Stack<>(); private Stack<Character> actualValue = new Stack<>(); private Stack<Character> lastOnMove = new Stack<>(); private Stack<Integer> lastMoveNumber = new Stack<>(); public Board() { movNumber = 1; onMove = 'W'; initBoard(null); } public Board(String state) { initBoard(state); } /** * Initialise the board with / without a given state * if the state is null , the board will initialised with a default board. * * @param state actual state of a gamecomponents. */ private void initBoard(@Nullable String state) { if (state == null) { for (int row = squares.length - 1; row >= 0; row--) { if (row == squares.length - 1) squares[row] = makeBlackLine(); else if (row == 1 || row == 4) addPrawns(row); else if (row == 0) squares[row] = makeWhiteLine(); else addFreeLine(row); } } else { String[] lines = state.split("\n"); if (lines.length != 10) throw new IllegalArgumentException("The String has not the correct number of lines"); String[] firstline = lines[0].split(" "); movNumber = Integer.valueOf(firstline[0]); onMove = firstline[1].charAt(0); for (int y = 0; y < squares.length; y++) { String[] parts = lines[y + 2].split(" | "); for (int x = 0; x < squares[y].length; x++) { squares[ROWS - y - 1][x] = parts[2 + x * 2].charAt(0); } } } } /** * Making the line for white player * @return a array[] with the white start line */ private char[] makeWhiteLine() { return new char[]{(char) (rook - 32), (char) (night - 32), (char) (bishop - 32), (char) (queen - 32), (char) (king - 32)}; } /** * Making the line for black player * @return a array[] with the black start line */ private char[] makeBlackLine() { return new char[]{king, queen, bishop, night, rook}; } /** * Adding free Lines to the array * @param row actual row */ private void addFreeLine(int row) { for (int index = 0; index < squares[row].length; index++) { squares[row][index] = FREEPOSITION; } } /** * Adding Prawns to the board * @param column actual column on the board */ private void addPrawns(int column) { for (int index = 0; index < squares[column].length; index++) { if (column == 4) squares[column][index] = PRAWN_BLACK; if (column == 1) squares[column][index] = PRAWN_WHITE; } } /** * Generates a board with a given Reader * @param reader board as FileReader */ public Board(Reader reader) { BufferedReader bufferedReader = new BufferedReader(reader); String line = null; try { line = bufferedReader.readLine(); String[] parts = line.split(" "); movNumber = Integer.getInteger(parts[0]); onMove = parts[1].charAt(0); int index = 0; while ((line = bufferedReader.readLine()) != null) { squares[index++] = line.toCharArray(); } } catch (IOException exception) { exception.printStackTrace(); } } /** * Overriden toString method * @return the board as String */ @Override public String toString() { StringBuilder builder = new StringBuilder() .append(getFirstLineString() + "\n"); for (int y = squares.length - 1; y >= 0; y--) { builder.append(y + 1 + " | "); for (int x = 0; x < squares[y].length; x++) { builder.append(squares[y][x] + " | "); } builder.append("\n"); } builder.append("----------------------- \n"); builder.append(" | a | b | c | d | e | \n"); return builder.toString(); } /** * Generate a String for the toString method to print the first Line with * moveNumber and actual turnColor. * * @return a string with the movenumber and the Color, who has the next turn. */ private String getFirstLineString() { return movNumber + " " + onMove + "\n"; } /** * Check if a piece from my color and not free. * If the check is ok, the board is doing the move. * If the check fails we threw a exception. * Also Changing the Color for the next move and bump the movNumber if required. * * @param move doing the move which is giving as parameter. */ public char move(Move move) { char objekt = squares[move.getFrom().getRow()][move.getFrom().getCol()]; char nextPosition = squares[move.getTo().getRow()][move.getTo().getCol()]; setRerollPostions(move, objekt, nextPosition); squares[move.getFrom().getRow()][move.getFrom().getCol()] = '.'; if (onMove == 'W') { onMove = 'B'; } else { onMove = 'W'; movNumber++; } squares[move.getTo().getRow()][move.getTo().getCol()] = objekt; if (isSquareKing(nextPosition)) { if (onMove == 'W') { return 'B'; } return 'W'; } if (isPrawn(objekt) && isPrawnOnEdge(move.getTo().getRow())) { squares[move.getTo().getRow()][move.getTo().getCol()]++; } if (movNumber == 41) return '='; return '?'; } /** * Setting all parameters for a correct reroll * * @param move lastMove * @param objekt value from the from Square on Move * @param nextPosition value of the position where we want to go */ private void setRerollPostions(Move move, char objekt, char nextPosition) { lastMoveNumber.push(movNumber); lastOnMove.push(onMove); lastMove.push(move); lastValue.push(objekt); actualValue.push(nextPosition); } /** * Doing a Reroll on he Board and reset all params */ public void rerollBoard() { onMove = lastOnMove.pop(); movNumber = lastMoveNumber.pop(); Move lastMove = this.lastMove.pop(); squares[lastMove.getFrom().getRow()][lastMove.getFrom().getCol()] = lastValue.pop(); squares[lastMove.getTo().getRow()][lastMove.getTo().getCol()] = actualValue.pop(); } /** * check if it is a Prawn. * * @param objekt current piece * @return */ private boolean isPrawn(char objekt) { return objekt == 'p' || objekt == 'P'; } /** * check if it is a King.. * * @param c current piece * @return */ private boolean isSquareKing(char c) { return c == 'k' || c == 'K'; } /** * check if the Prawn reaches the opponent edge * * @param row row of the prawn * @return */ private boolean isPrawnOnEdge(int row) { return row == 0 || row == 5; } /** * Check if the move is Possible and than doing the move, * else we threw a Exception. * * @param move String with the next move */ public char move(String move) { Move actualMove = new Move(move); if (isNotFree(actualMove.getFrom()) && isPieceFromActualColor(squares[actualMove.getFrom().getRow()][actualMove.getFrom().getCol()])) { ArrayList<Move> possibleMoves = Algorithm.moveList(this, actualMove.getFrom().getRow(), actualMove.getFrom().getCol()); if (isMovePossible(possibleMoves, actualMove)) { return this.move(actualMove); } } throw new IllegalArgumentException("gamecomponents.Move is not possible!"); } /** * Check if the move isPossible * * @param possibleMoves List of all possibleMoves for this piece. * @param actualMove the giving move * @return boolean after checking the move */ private boolean isMovePossible(ArrayList<Move> possibleMoves, Move actualMove) { if (possibleMoves != null && !possibleMoves.isEmpty()) { return possibleMoves.contains(actualMove); } return false; } /** * Check if the actual position is not a free place * * @param from gamecomponents.Square with the actual position * @return boolean with result */ private boolean isNotFree(Square from) { return squares[from.getRow()][from.getCol()] != '.'; } private boolean isNotFree(char element) { return element != '.'; } /** * Generate all possible Moves for all pieces * * @return List of all possible Pieces */ public ArrayList<Move> genMoves() { ArrayList<Move> moves = new ArrayList<>(); for (int y = 0; y < ROWS; y++) { for(int x = 0;x < COLUMNS;x++) if(squares[y][x] != '.' && isPieceFromActualColor(squares[y][x])){ moves.addAll(Algorithm.moveList(this, y, x)); } } return moves; } /** * giving a piece and check if he is on turn. * * @param c is the actual positon * @return boolean */ private boolean isPieceFromActualColor(char c) { if (onMove == 'W') { if (c != '.' && (c > 'A' && c < 'Z')) return true; } else if (onMove == 'B') { if (c != '.' && (c > 'a' && c < 'z')) return true; } return false; } public static int getCOLUMNS() { return COLUMNS; } public char getOnMove() { return onMove; } public char[][] getSquares() { return squares; } }
<gh_stars>1-10 use bevy::prelude::*; use bevy_ecs_tilemap::prelude::*; use noise::{Fbm, MultiFractal, NoiseFn, Seedable}; use pathfinding::prelude::{absdiff, astar}; fn get_island_shape(x: f64, y: f64) -> f64 { let a = 1.0; let b = 1.2; let value = x.abs().max(y.abs()); value.powf(a) / value.powf(a) + (b - b * value).powf(a) } pub fn generate_road( ground_layer: &LayerBuilder<TileBundle>, road_layer: &mut LayerBuilder<TileBundle>, ) -> Vec<IVec2> { let mut road_points = Vec::new(); let angle_increment: u32 = 15; let random_angle = fastrand::u32(0..360) as f32; let mut angles = Vec::new(); for angle_index in 0..(360 / angle_increment) { let mut current_angle = (angle_increment * angle_index) as f32 + random_angle; current_angle = current_angle % 360.0; if current_angle < 0.0 { current_angle += 360.0; } angles.push(current_angle); } angles.sort_by(|a, b| a.partial_cmp(b).unwrap()); let map_center = (ground_layer.settings.map_size * ground_layer.settings.chunk_size).as_f32() / 2.0; for angle in angles { let current_direction = Vec2::new(angle.to_radians().cos(), angle.to_radians().sin()).normalize(); for ray_index in 2..1000 { let check_position: Vec2 = map_center + (current_direction * ray_index as f32); if let Ok(tile_bundle) = ground_layer.get_tile(check_position.as_u32()) { if tile_bundle.tile.texture_index == 19 { let range = 30..(ray_index - 2); let random_ray_position = fastrand::i32(range); let road_position: Vec2 = map_center + current_direction * (random_ray_position as f32); road_points.push(road_position); break; } } else { break; } } } for road_point in road_points.iter() { let tile = Tile { texture_index: 36, ..Default::default() }; road_layer.set_tile(road_point.as_u32(), tile.into(), true).unwrap(); } road_points.push(road_points[0]); let road_path = find_road_path(ground_layer, road_layer, &road_points); road_path } pub fn find_road_path( ground_layer: &LayerBuilder<TileBundle>, road_layer: &mut LayerBuilder<TileBundle>, road_points: &Vec<Vec2>, ) -> Vec<IVec2> { let mut road_path = Vec::new(); let mut starting_point = (road_points[0].x as i32, road_points[0].y as i32); for road_point_index in 1..road_points.len() { let goal = ( road_points[road_point_index].x as i32, road_points[road_point_index].y as i32, ); // Do pathfinding let path = astar( &starting_point, |&(x, y)| { let neighbors = ground_layer.get_tile_neighbors(UVec2::new(x as u32, y as u32)); neighbors .iter() .filter(|(_, tile_data)| { if let Some((_, tile_bundle)) = tile_data { // 19 is water. if tile_bundle.tile.texture_index != 19 { return true; } } return false; }) .map(|(p, _)| ((p.x, p.y), 1)) .collect::<Vec<((i32, i32), i32)>>().into_iter() }, |&(x, y)| absdiff(x, goal.0) + absdiff(y, goal.1), |&p| p == goal, ).unwrap().0.iter().map(|(x, y)| IVec2::new(*x, *y)).collect::<Vec<IVec2>>(); road_path.extend(path); starting_point = goal.clone(); } for road_point in road_path.iter() { let has_no_tile = road_layer.get_tile(road_point.as_u32()).is_err(); if has_no_tile { let tile = Tile { texture_index: 7, ..Default::default() }; road_layer.set_tile(road_point.as_u32(), tile.into(), true).unwrap(); } } let half_map_size = (ground_layer.settings.map_size * ground_layer.settings.chunk_size).as_i32() / 2; road_path = road_path.iter().map(|vec| IVec2::new(vec.x - half_map_size.x, vec.y - half_map_size.y)).collect(); road_path } pub fn generate_map( ground_layer: &mut LayerBuilder<TileBundle>, ) { // Generate a seed for the map let seed: u32 = fastrand::u32(..); fastrand::seed(seed as u64); // Create fbm noise let mut fbm = Fbm::new(); fbm = fbm.set_seed(seed); fbm = fbm.set_frequency(0.2); let chunk_width = ground_layer.settings.chunk_size.x; let chunk_height = ground_layer.settings.chunk_size.y; let map_width = ground_layer.settings.map_size.x; let map_height = ground_layer.settings.map_size.x; let actual_width = map_width * chunk_width; let actual_height = map_height * chunk_height; let half_actual_width = actual_width / 2; let half_actual_height = actual_height / 2; for x in 0..actual_width { for y in 0..actual_height { let high_x = x as f64 - half_actual_width as f64; let high_y = y as f64 - half_actual_height as f64; let mask = get_island_shape(high_x / 60.0, high_y / 60.0); let noise_value = fbm.get([high_x / 15.0, high_y / 15.0]) - (1.0 - mask); // Create Tile let mut tile = Tile { texture_index: 19, // Water ..Default::default() }; if noise_value > 0.0 { if noise_value > 0.9 { tile.texture_index = 23; // Snow } else if noise_value > 0.7 { tile.texture_index = 22; // Rock 2 } else if noise_value > 0.6 { tile.texture_index = 21; // Rock 1 } else if noise_value > 0.4 { tile.texture_index = 20; // Forest } else { tile.texture_index = 18; // Grass } } let _ = ground_layer.set_tile(UVec2::new(x, y), tile.into(), true); } } }
// Now we create a function that will run the OCR char* identifyText(Mat input, char* language = "eng") { ocr.Init(NULL, language, tesseract::OEM_TESSERACT_ONLY); ocr.SetPageSegMode(tesseract::PSM_SINGLE_BLOCK); ocr.SetImage(input.data, input.cols, input.rows, 1, input.step); char* text = ocr.GetUTF8Text(); cout << "Text:" << endl; cout << text << endl; cout << "Confidence: " << ocr.MeanTextConf() << endl << endl; return text; }
/*Copyright 2020 <NAME> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "hotkeyPP.h" using namespace HKPP::extra; namespace HKPP { key_deskriptor::key_deskriptor() { Key = 0; Injected = injected_status_enm::UNDEFINED_INJECTION_STATUS; } key_deskriptor::key_deskriptor(DWORD key_ARG, injected_status_enm injected_ARG) { Key = key_ARG; Injected = injected_ARG; } key_deskriptor::key_deskriptor(DWORD key_ARG) { Key = key_ARG; Injected = injected_status_enm::UNDEFINED_INJECTION_STATUS; } bool key_deskriptor::operator== (key_deskriptor& s) { if ((this->Injected == injected_status_enm::UNDEFINED_INJECTION_STATUS || s.Injected == injected_status_enm::UNDEFINED_INJECTION_STATUS)) //if we do not know injection status return (s.Key == this->Key); //compare just codes else //if we know injection status return ((s.Key == this->Key) && (s.Injected == this->Injected)); //full comparation } bool key_deskriptor::operator!= (key_deskriptor& s) { return !(operator==(s)); } bool key_deskriptor::operator> (key_deskriptor& s) { if ((this->Injected == injected_status_enm::UNDEFINED_INJECTION_STATUS || s.Injected == injected_status_enm::UNDEFINED_INJECTION_STATUS)) return (s.Key > this->Key); else return ((s.Key > this->Key) && (s.Injected == this->Injected)); } bool key_deskriptor::operator< (key_deskriptor& s) { return !(operator>(s)); } bool key_deskriptor::operator<= (key_deskriptor& s) { if ((this->Injected == injected_status_enm::UNDEFINED_INJECTION_STATUS || s.Injected == injected_status_enm::UNDEFINED_INJECTION_STATUS)) return (s.Key <= this->Key); else return ((s.Key <= this->Key) && (s.Injected == this->Injected)); } bool key_deskriptor::operator>= (key_deskriptor& s) { return (operator<=(s)); } bool key_deskriptor::operator== (DWORD& s) { return (s == this->Key); } bool key_deskriptor::operator!= (DWORD& s) { return !(operator==(s)); } bool key_deskriptor::operator> (DWORD& s) { return (s > this->Key); } bool key_deskriptor::operator< (DWORD& s) { return !(operator>(s)); } bool key_deskriptor::operator<= (DWORD& s) { return (s <= this->Key); } bool key_deskriptor::operator>= (DWORD& s) { return !(operator<=(s)); } }
Type 2 diabetes, which has been found to impact dexterity and sensory function in the hands, may also impact the short-term memory of those living with the disease. Stacey Gorniak is an assistant professor in the University of Houston Department of Health and Human Performance. She studies the impact of changes in the brain due to chronic health conditions, movement disorders and aging. Currently, she is studying cognitive, sensory and motor changes of middle aged and older adults who control their type 2 diabetes with medication. Additionally, when patients were asked to perform a simple activity with their hands—for example holding an object like a smartphone—and were asked to repeat a set of words while interacting with the object, patients with type 2 diabetes exhibited difficulty in recalling words and performing the activity. Gorniak’s next step is to identify changes to brain structures that are involved in cognitive, sensory and motor functions. “By better understanding both structural and functional brain changes with type 2 diabetes, clinicians will have a better idea of how to modify treatment plans to better accommodate the challenges faced by diabetic patients,” she said. Another ongoing study she is pursuing is an examination of how the disease may affect the sense of touch. It builds on her previous work that found type 2 diabetes affected dexterity and sensory function in the hands. As part of a new study, participants with and without the disease will be administered a light anesthetic to the wrist and elbow and be asked to perform a number of activities. She’s hoping to learn if the diminished tactile sensation brought on by type 2 diabetes impacts hand dexterity.
Extensive marine anoxia during the terminal Ediacaran Period Extensive marine anoxia in the terminal Ediacaran ocean was associated with the decline of the Ediacara biota. Mass balance description of uranium isotope in the ocean Uranium is a redox-sensitive trace metal with a residence time of ~500 kyr in the modern ocean. Uranium occurs in two redox states in natural waters: soluble U(VI) under oxygenated conditions and insoluble U(IV) under anoxic conditions. Isotope fractionation between U(IV) and U(VI) is driven by the dominance of nuclear volume effects. As a result, during reduction of U(VI) to U(IV), the reduced U(IV) is enriched in the heavier 238 U isotope, thus enriching the remaining dissolved U(VI) reservoir in the lighter 235 U isotope. This is observed in the Black Sea. Microbially-mediated reduction of U(VI) to U(IV) under anoxic conditions is associated with a large isotopic fractionation ranging between 0.68 and 0.99. The only major source of U to the ocean is oxidative mobilization of U from the upper continental crust and transport of dissolved U(VI) to the oceans via rivers. The 238 U value of dissolved U in rivers is dominated by the U concentration and 238 U of the source lithologies. The estimated average 238 U of the world's major rivers ranges between -0.26 and -0.34, which reflects the estimated average 238 U of the continental crust . In our U isotope modeling calculation below, we adopted a riverine 238 U of -0.34. There are multiple sinks for U in the ocean. The major sinks are sediments deposited beneath anoxic/euxinic bottom waters, sediments deposited beneath weakly oxygenated bottom waters, and marine carbonates. Minor sinks include ferromanganese oxides and the hydrothermal alteration of oceanic crust. The largest expression of U isotope fractionation (~0.4 to ~1.2 ) in the marine environment occurs during U burial in anoxic/euxinic sediments, like those of the Black Sea, the Saanich Inlet, and the Framvaren Fjord. By contrast, the fractionation of U isotopes during removal to suboxic sediments is only ~0.1 based on observations from the Peruvian continental margin and off the coast of Washington State, where sediments underlying weakly oxygenated waters have an average 238 U of -0.28 ± 0.19 and -0.23 ± 0.19, respectively. Both natural and laboratory observations suggest at most a small offset between the 238 U of primary carbonate precipitates and seawater. Sedimentary carbonates may incorporate U(IV) from sulfidic pore waters, leading to values that are 0.2-0.4 higher compared with seawater, although this process can potentially be monitored by examining local depositional redox conditions where the carbonates precipitated. The fractionation of U isotopes during removal to Mn nodules and metalliferous sediments is -0.24, and is well constrained by both natural samples and adsorption experiments. Seafloor alteration at high temperatures is assumed to have no isotope fractionation, and seafloor alteration at low temperatures is estimated to have a fractionation factor of 0.25. Non-anoxic sinks and the associated fractionation factors A simplified schematic representation of the major source and sinks of U in the modern ocean along with their isotopic compositions (sources) or associated isotopic fractionations (sinks) is presented in fig. S1 (modified after Wang et al. and Tissot et al. ). In order to simplify our mass balance calculations, several types of sinks are lumped into a single oxic sink, including Fe-Mn crusts, pelagic clays, low temperature and high temperature oceanic crust alteration, marine carbonates, and coastal retention. Additionally, the oxic sink and suboxic sink are lumped together into a single "other" sink to make the estimation of U removal associated with anoxic/euxinic sinks solvable. The overall U isotope fractionation factor for the oxic sink and the "other" sink are calculated as a weighted average of the fractionation factors for the individual components. The fractionation factors between the oxic sink and seawater and between the "other" sink and seawater are 0.01 (oxic) and 0.04 (other), respectively. Geological background of the studies sites The geological and stratigraphic background of the Dengying Formation in the Yangtze Gorges area was detailed in Chen et al. and was summarized by Meyer et al.. To briefly summarize, the upper Ediacaran Dengying Formation overlies the lowermiddle Ediacaran Doushantuo Formation and underlies the Yanjiahe Formation, which contains the Ediacaran-Cambrian boundary. The Dengying Formation was deposited on a shallow marine carbonate platform in an inner shelf environment, and its age is constrained between 551.1±0.7 Ma and ~541 Ma based on available radiometric dates and stratigraphic correlations. The Dengying Formation is divided into three members. These are the Hamajing, Shibantan, and Baimatuo members from bottom to top (fig. S2 in the supplementary information and Fig. 1 in the main text). The Hamajing Member consists of peritidal dolostone. The Shibantan Member is composed of dark gray, thin-bedded, bituminous limestone interpreted to have been deposited in a subtidal environment. The Baimatuo Member is composed of light gray, massive peritidal dolostone. The geological and stratigraphic background of the Dengying Formation in the Gaojiashan area was detailed in Cai et al. and Cui et al.. To briefly summarize, the study area is located in the northwestern margin of the Yangtze Platform. Ediacaran successions in the Gaojiashan area consist of the Doushantuo and the Dengying formations, similar to classical Ediacaran successions in the southeastern margin of the Yangtze Platform. The Dengying Formation is divided into three members. These are the Algal Dolomite, Gaojiashan, and Beiwan members from bottom to top, which are typically correlated to the Hamajing, Shibantan, and Baimatuo members, respectively, in the Three Gorges area. The Algal Dolomite Member is characterized by light gray, peritidal dolostone. The overlying Gaojiashan Member is characterized by thin-bedded, subtidal, fossiliferous calcisiltite-siltstone and mudstone with limestone interbeds. Microbial laminae and rip-up clasts are common in limestones of the upper Gaojiashan Member, which is capped by a thick sandstone bed. The overlying Beiwan Member consists of thick-bedded, peritidal dolostone with stromatactis-like structures and Cloudina fossils. The studied Gaojiashan Member is 55 m thick and can be divided into three units. The lower Gaojiashan Member is characterized by 19 m of greenish and brownish siltstone, greenish silicified tuffaceous siltstone, and silty shale. The middle Gaojiashan Member consists of 8 m of non-fossiliferous, interbedded calcisiltite-siltstone and calcilutite-mudstone, followed stratigraphically up-section by 12 m of fossiliferous calcisiltite-siltstone-calcilutite-mudstone interbeds that contain abundant pyritized fossils (Conotubus and Gaojiashania), calcareous microfossils (Protolagena), and horizontal trace fossils towards the upper part of this unit. In the succeeding 14 m of strata upsection, limestone becomes increasingly dominant over siltstone, fossils become increasingly scarce and are dominated by Cloudina, but wrinkled microbial sedimentary structures and rip-up clasts are common. Evaluation of carbonate diagenesis Marine carbonate sediments can faithfully record chemical signatures of seawater provided that post-depositional processes have not caused significant alteration. To assess diagenesis, we used a combination of sedimentary petrography and standard geochemical criteria. Specifically, we compared our U isotope data to standard diagenetic indicators such as Mn content, Sr content, Mn/Sr ratios, and O isotope composition to evaluate the influence of meteoric or burial fluids on preserved U isotope signatures. To provide a framework for our interpretation, we briefly summarize the way in which petrography and geochemistry can be used to assess diagenesis (after Gilleaudeau et al. ). Broadly, grain size and the degree of preservation of primary textural features can be indicative of fluid composition during diagenesis. Fabric-retentive micritic to microsparitic fabrics that preserve original textural details indicate that recrystallization occurred in the presence of fluids similar in composition to seawater, leading to the inference that diagenesis was early-either synsedimentary or during shallow burial. Dolomitization may also occur during early diagenesis in the presence of seawaterbuffered fluids, resulting in a high degree of fabric retention. By contrast, diagenesis in the presence of fluids very different in composition than seawater (meteoric or deep burial fluids) commonly results in crystal coarsening and destruction of primary textural details. Altered carbonate phases (both calcite and dolomite) are often sparry and characterized by planar grain boundaries. The isotopic composition of oxygen can also be altered during diagenesis, and because of the high concentration of oxygen in diagenetic fluids, the oxygen isotopic composition of carbonate minerals will be reset at relatively low water/rock ratios (< 10; ). Diagenetic alteration tends to decrease 18 O values, although the oxygen isotopic composition of meteoric fluids is highly variable depending on geographic location. Compilation of data for petrographically well-preserved Proterozoic carbonate phases indicates 18 O values generally > −9, although Kaufman and Knoll suggested that values > −5 may be a more reliable indicator of relatively pristine oxygen isotope compositions in Neoproterozoic settings. These values can be used as an initial benchmark for assessing diagenesis. Trace elements (Sr, Mn, and Fe) substituted into the lattice of carbonate minerals can also be used as diagenetic indicators. The incorporation of trace elements into the carbonate lattice is governed by the distribution coefficient, and different types of diagenetic fluids have different trace element compositions. Modern, well-oxygenated seawater is high in Sr relative to Mn and Fe such that primary precipitates and early diagenetic phases formed in the presence of seawater are generally enriched in Sr relative to Mn and Fe. This is particularly true for aragonite because of the high distribution coefficient for Sr into aragonite compared to other carbonate minerals. Early fabric-retentive dolostone can also be enriched in Sr relative to Mn and Fe, although dolomite generally has a lower preference for Sr and a higher preference for Mn and Fe compared to calcite. Meteoric and burial fluids, by contrast, tend to be depleted in Sr relative to seawater. The recrystallization process also acts to expel Sr from the lattice of carbonate minerals because of its relatively large ionic radius compared to Ca. As a result, meteoric or burial diagenetic phases are often depleted in Sr relative to precursor marine phases. Burial fluids can also be substantially enriched in Mn and Fe, particularly under reducing conditions. This commonly leads to Mn and Fe enrichment in burial diagenetic phases. Meteoric fluids are variable in their Mn and Fe content-depending largely on redox conditions-such that meteoric calcite phases can be characterized by enrichment or depletion of Mn and Fe. These general relationships have led to the establishment of traditional criteria such as Mn/Sr ratio to assess the fidelity of primary geochemical signatures in carbonate rocks. For example, Kaufman and Knoll suggested that both limestone and dolostone with Mn/Sr ratios < 10 can be expected to retain their primary carbon isotopic signatures. In this study, we use a conservative Mn/Sr ratio of 2.5 as a benchmark for assessing diagenesis. For the Hamajing Member at Wuhe, we examined four thin sections and we provide photomicrographs of samples HMJ-14 and HMJ-19 at various magnifications in fig. S3. Overall, the Hamajing Member is comprised of relatively homogeneous micritic to microsparitic fabric-retentive dolomite with volumetrically insignificant veins and small voids filled with dolomite spar. For the overlying Shibantan Member, we examined ten thin sections and we provide photomicrographs of samples SBT-26, SBT-42, SBT-89, and SBT-107 at various magnifications in fig. S4. The Shibantan Member is composed of micritic to microsparitic calcite that is fabric-retentive, preserving primary textural features such as thin, microbial laminations. Strata are generally organic-rich and preserve alternating organic-poor and organic-rich mm-scale laminations, as well as occasional intervals with small, dispersed mud clasts. For the Baimatuo Member, we examined four thin sections and we provide photomicrographs of samples BMT-172, fig. S5A-C. In the Baimatuo Member, dolomite microspar is also fabric-retentive, preserving primary mm-scale laminations and small mud rip-up clasts. Lastly, for the Yanjiahe Formation that sits above the Dengying Formation, we examined five thin sections and we provide photomicrographs for samples YJH-2, YJH-21, and YJH-40 in fig. S5D-F. Limestone of the Yanjiahe Formation is composed of micritic to microsparitic calcite that is generally fabric-retentive. Some intervals are organic-rich and preserve thin, microbial laminations. Intraformational conglomerates composed of sub-mm-scale mud rip-up clasts are common. In summary, none of the samples examined in this study show the degree of recrystallization observed by Hood et al. in the Neoproterozoic Balcanoona reef complex, South Australia. The generally fabric-retentive nature of our samples is suggestive of primary marine precipitation or early stage diagenesis in the presence of seawater (e.g., ). With respect to geochemical characteristics, the Hamajing Member is characterized by relatively low Mn concentrations (< 100 ppm with the exception of two samples), as well as relatively low Sr concentrations (< 100 ppm). Mn/Sr ratios are < 2, with the exception of two samples that have been excluded from further consideration ( fig. S6). Relatively low Sr concentrations are not uncommon in early fabric-retentive dolostone, and therefore are not taken to indicate late-stage diagenetic alteration. Oxygen isotope values are > −6, which also argues against late-stage diagenesis. In summary, petrographic, trace element, and isotopic characteristics suggest that the Hamajing Member has the potential to record seawater geochemical signatures. In the Shibantan Member, Mn concentrations are exceptionally low (< 15 ppm) and Sr concentrations are strongly elevated (up to ~2,700 ppm). As a result, Mn/Sr ratios are generally below 0.01 ( fig. S6). This is strong evidence for the preservation of seawater geochemical signatures. Oxygen isotope values are > −7, which also suggests the lack of substantial meteoric or deep burial diagenesis. In the Baimatuo Member, dolostone is characterized by relatively low Mn contents (< 150 ppm with the exception of three samples) and relatively low Sr concentrations (< 65 ppm) ( fig. S6). Sr is easily expelled from the crystal lattice during recrystallization and dolomite has a generally lower preference for Sr than calcite, such that early fabricretentive dolomite formed in the presence of seawater is often depleted in Sr. The low Mn contents and generally low Mn/Sr ratios of these samples (< 2.5 with the exception of six samples that have been excluded) indicate a high degree of preservation of seawater geochemistry, despite these low Sr contents. This hypothesis is also supported by oxygen isotope data ( 18 O values > −6 ). In summary, petrography and geochemistry both indicate that limestone and dolostone of the Shibantan and Baimatuo members are either primary precipitates or formed during early seafloor diagenesis. In the Yanjiahe Formation, we excluded three samples based on high Mn contents, low Sr contents, and therefore, high Mn/Sr ratios (two samples with Mn/Sr > 15). The remaining samples are characterized by Mn contents < 100 ppm, Sr contents > 250 ppm, and oxygen isotope values > −7.5, suggesting preservation of seawater geochemistry ( fig. S6). In addition to absolute Mn and Sr contents, we have also investigated Mn/(Mg+Ca) and Sr/(Mg+Ca) ratios ( fig. S7). Mn/(Mg+Ca) data strongly mirror the previously-discussed Mn contents, and suggest that no anomalous Mn enrichment occurred in the Wuhe section that can be attributed to late-stage diagenesis. In the Gaojiashan section, there is a trend of decreasing Mn/(Mg+Ca) upward in the section ( fig. S7D). If this were caused by late-stage burial diagenesis or pore water anoxia during early diagenesis, then we would also expect systematic differences in 238 U, with higher 238 U in the lower interval compared to the upper interval. This is not observed, however, suggesting that 238 U values were not systematically altered by early pore water anoxia or late-stage burial diagenesis. We also note that 238 U in the Gaojiashan section is identical to the wellpreserved, coeval Shibantan Member at the Wuhe section, suggesting that both sections record primary values. Sr/(Mg+Ca) values also mirror the previously-discussed Sr contents, and show expected trends related to carbonate mineralogy. In the Wuhe section as a whole, there is no correlation between carbon and oxygen isotope values (R 2 = 0.009; fig. S8). 13 C and 18 O tend to co-vary if both systems have been influenced by proportional mixing with an external (diagenetic) fluid, and in the case of the Wuhe section, the lack of co-variation between these parameters is another line of evidence supporting preservation of early, seawater-derived geochemical signatures. Lastly, in the Gaojiashan section, the lower 30 meters are characterized by Mn > 300 ppm (with the exception of one sample), Sr < 150 ppm (with the exception of two samples), and highly variable oxygen isotope values ranging from −1.78 to −8.39 ( fig. S6). Mn/Sr ratios are uniformly > 2.5, and we have therefore excluded eight samples from this interval. By contrast, the upper part of the section is characterized by low Mn contents (< 300 ppm with the exception of one sample), relatively high Sr contents (most samples > 400 ppm), low Mn/Sr ratios (< 1.5), and oxygen isotope values > −7.5 (with the exception of one sample). There is also no co-variation between carbon and oxygen isotopes for the entire Gaojiashan section (R 2 = 0.017; fig. S8), which would be expected if both parameters were influenced by proportional mixing with an external fluid. These data, along with generally fabric-retentive petrographic characteristics, strongly indicate that the upper 20 meters of the Gaojiashan section have the potential to record seawater geochemistry. Using these diagenetic criteria, 2, 0, 6, and 2 outliers have been identified from the Hamajing Member (total sample number #8), the Shibantan Member (total sample number #24), the Baimatuo Member (total sample number #23), and the Yanjiahe Formation (total sample number #7) at the Wuhe section. Eight outliers have been identified from the Gaojiashan Member (total sample number #27) at the Gaojiashan section. Because the main conclusions of our study are based on the very light U isotope compositions recorded in the Shibantan and Gaojiashan members, we further investigated the extent of correlation between 238 U and 18 O, Sr concentration, Mn concentration, and Mn/Sr for samples in these units with Mn/Sr ratios < 2.5 (table S2). There is no systematic correlation between diagenetic indicators and 238 U in the Shibantan and Gaojiashan members, suggesting that late-stage diagenesis did not progressively alter U isotope values. It is also important to note that these very light U isotope values are found in two geographically disparate, but coeval sections, which also argues against systematic alteration of U isotopes. Detrital contaminations When evaluating detrital contamination, we used detrital indicators such as Rb/Sr ratios and Al contents (e.g., Ling et al. ). Among samples with Mn/Sr < 2.5, only those samples with Rb/Sr < 0.02 and Al (wt.%) < 0.35 % were used in our main text discussion. The Rb/Sr and Al content plots are shown in fig. S9. In addition, for samples with Mn/Sr < 2.5, Rb/Sr < 0.02 and Al < 0.35%, we further looked at U/Al ratios to confirm that our dissolution procedure primarily targets carbonate-bound (and not detrital) U. The U/Al ratio of the upper continental crust is ~0.331 ppm/wt.%, and U/Al ratios in our samples are substantially enriched above crustal values by approximately two orders of magnitude ( fig. S9), indicating that the dissolution procedure is effective at isolating carbonate-bound U. We have also used Al content (wt.%) data to estimate the possible amount of contribution of U from detrital sources. Specifically, assuming all the measured Al in our samples is from detrital minerals, and using the U/Al ratio of upper continental crust, we estimate that detrital U accounts for <2 % of total U for Wuhe samples, and < 10 % of total U for Gaojiashan samples. Thus, we conclude that detrital influence on our 238 U signals are minimal. We also note that the estimated amount of U associated with detrital material is different between the Shibantan Member at Wuhe and its equivalent Gaojiashan Member. However, the 238U signals of these two members are identical. Ce anomalies and dolomitization Romaniello et al. point out that in the modern Bahamas, bulk carbonate sediments can incorporate U with a 238 U value that is 0.2-0.4 heavier than seawater due to the incorporation of 238 U-enriched U(IV) under locally pore water euxinic conditions. If this were true in our Dengying carbonates, then we could have potentially underestimated the extent of U removal associated with anoxic sedimentary sinks. First, we examined local water column redox conditions by looking at the Ce anomaly (Ce/Ce*) recorded in our carbonate samples. Unlike the other REEs, which are strictly trivalent in the oceans, cerium (Ce) can exist in either trivalent or tetravalent forms depending on redox conditions. The redox state of Ce is modulated by the presence of manganese oxides and/or bacteria, where Ce is oxidized by and adsorbed onto mineral surfaces. Thus, the concentration of Ce relative to the other REEs is associated with dissolved oxygen concentrations, and can be used to infer redox conditions of the overlying water column. Ce anomalies are calculated following Ling et al.. The Ce anomalies at Wuhe range between 0.29 and 0.79 with a mean of 0.5, and the Ce anomalies at Gaojiashan range between 0.70 and 0.98 with a mean of 0.76 ( fig. S10). The Ce anomalies indicate that the local water column at both Wuhe and Gaojiashan were likely dominated by oxic conditions. This confirms that our carbonates can be considered an oxic sink for U, and thus can passively capture the 238 U signal of seawater. Second, we examined the possibility of pore water euxinia using Mo and U concentrations, as well as correlations between 238 U and U/(Mg+Ca) ratios and Mo/(Mg+Ca) ratios ( fig. S10). This is in contrast to carbonate sediments from the modern Bahamas, most of which are characterized by Mo concentrations between 1.8 and 28 ppm. This indicates that pore water euxinia was less prevalent during deposition of our samples than on the modern Bahamian carbonate platform. In addition, U/(Mg+Ca) and Mo/(Mg+Ca) are extremely low (excluding two samples, BM-1 and YJH-40) and there is no systematic stratigraphic variation. Furthermore, there are no statistically significant correlations between 238 U and U/(Mg+Ca) and Mo/(Mg+Ca) in our carbonates (table S2). We note that although the two "outliers", the BM-1 and the YJH-40, were likely affected by pore water anoxia during early diagenesis, their 238 U did not depart from the surrounding samples with low U concentrations. If there were U addition (compared to the surrounding low U concentration samples) associated with pore water anoxia during early diagenesis, that would indicate 60-100% of U in these two samples are related to proewater anoxia, and thus these samples would show obvious high 238 U values. However, this is in contrast to our observations that 238 U of these three samples did not obviously depart from the surrounding low U concentration samples. And thus, these high concentrations might have been caused by other factors that did not affect 238 U. Romaniello et al. also observed U-isotope change associated with dolomitization in one Bahamian tidal pond. There was a strong correlation with Mg/Ca ratio (R 2 =0.96), suggesting that U-isotope change was possibly associated with dolomitization. This seems to be a special, spatially restricted case, however. In our samples from Wuhe, no statistically significant correlations are observed between 238 U and Mg/Ca molar ratio (R 2 =0.25), suggesting that dolomitization has not systematically altered the primary isotopic record. Further confidence that dolomitization may not have been an issue for paleo- 238 U records comes from a global compilation of 238 U studies across the Permian-Triassic boundary. Uranium isotope mass balance constraints on U removal to anoxic/euxinic sinks The implied changes to the extent of U removal into anoxic sediments can be described by a mass balance equation for the fraction of anoxic/euxinic sinks and their isotopic composition (following Montoya-Pino et al., Here, the subscripts input, anoxic, and other denote the riverine input, anoxic/euxinic sink, and other sedimentary sinks, respectively, and fanoxic represents the fraction of the riverine U input that is deposited in anoxic/euxinic sediments. Anoxic seafloor area modeling calculation The fraction of U removed into anoxic/euxinic sediments can be coupled to the extent of seafloor covered by anoxic/euxinic waters, as described by Wang et al. The implied changes to the extent of bottom water anoxia can be described by differential mass balance equations for the seawater uranium inventory and its isotopic composition, respectively ( 238 Usw and 238 Uriv are the U isotope composition of seawater and the riverine source, respectively. 238 Uanoxic, 238 Usuboxic, and 238 Uoxic are the U isotope composition of anoxic sedimentary sink, suboxic sedimentary sink, and the sum of the other sinks, respectively. Here, we simplify the inputs to Jriv, the riverine flux, whose modern value is ~410 7 moles U/yr. The outputs are assumed to consist of the anoxic sediment sink (Janoxic), suboxic sediment sink (Jsuboxic), and the sum of the other sinks (Joxic). anoxic is the effective fractionation factor associated with anoxic sediment deposition, suboxic is the effective fractionation factor associated with suboxic sediment deposition, and oxic is the effective fractionation factor associated with the remaining other sinks (+0.005, calculated to maintain isotopic steady state in the modern ocean (e.g, Brennecka et al., and Wang et al. ). We further define the fraction of anoxic seafloor area overlain by anoxic water where Fanox is the total fraction of seafloor area overlain by anoxic waters, and Aocean is the total seafloor area of modern ocean. Model parameterization was based on studies of the modern U cycle and are summarized in where Aanoxic + Asuboxic + Aoxic = Aocean. In this modeling, we adopted a value of -0.34 for rivers. As stated earlier, riverine input is the single major source of U into the ocean. The weighted average 238 U of riverine input is −0.34. An exception not included in this average is the Yangtze River in China, where two reported measurements yield an average 238 U value of ~−0.70. Although these data and their ability to represent the entire Yangtze catchment need to be confirmed, this "outlier" is interpreted to reflect local U contributions from evaporite minerals (halite) that are abundant near the source of the Yangtze River. If so, this is an unusual situation because evaporites are not major sources of U to the oceans overall. Previous global riverine estimates yielded values of −0.30 to −0.27. It thus appears that the riverine composition is indistinguishable from average continental crust, which has been measured as −0.30 ± 0.04 and −0.31 ± 0.05. In this modeling exercise, we have simplified the ocean into oxic, suboxic, and anoxic states. Because of the fractionation factor of U isotopes under suboxic conditions are from 238 U measurements from the Peruvian continental margin and off the coast of Washington State, and hence, by referring to suboxic conditions, we are discussing a situation that is similar to suboxic waters in Peruvian continental margin and off the coast of Washington State. Bottom water O2 at both Peruvian continental margin and off the coast of Washington State are within the range of 0.2 to 2 ml L -1 that has previously been used to define suboxic depositional environments. Similarly, the fractionation factor of U isotopes under anoxic conditions are primarily based on observations from the modern Black Sea and from the modern Saanich Inlet, and therefore, by referring to anoxic conditions, we are discussing a situation that is similar to these two modern anoxic environments. Here, bottom water O2 concentrations are <0.2 ml L -1 and H2S concentrations >400 M. We first varied the areal extent of anoxic/euxinic and oxic seafloor area while keeping the areal extent of suboxic seafloor the same as the modern value . This modeling exercise suggests that essentially the entire seafloor was covered by anoxic/euxinic sediments (assuming a fractionation factor of 0.6 between seawater and anoxic/euxinic sediments) for terminal Ediacaran seawater 238 U to approach values as low as −0.95. In reality, however, suboxic seafloor area is likely to co-vary with anoxic/euxinic seafloor area. We tested various suboxic areal extents , the results of which tell us that it is difficult or even impossible to generate seawater 238 U of −0.95 with large suboxic seafloor areas (assuming anoxic/euxinic sink-seawater fractionation of 0.6 ). Thus, variations in suboxic seafloor area have a very small effect on our basic conclusion that significantly expanded anoxic/euxinic seafloor area is likely the only major process that can cause terminal Ediacaran seawater 238 U to reach values as low as −0.95. Second, we varied the fractionation factor between seawater and anoxic/euxinic sediments and kept the suboxic seafloor area fixed at 0%. These results are summarized in Fig. 3B in the main text. Our results suggest that the inferred extent of ocean anoxia largely depends on the assumed average fractionation factor between seawater and anoxic/euxinic sediments. If we use larger fractionation factors of 0.68 and 0.99 the two end member values observed for reduction of U(VI) to U(IV) by metal-reducing bacteria -modeling results suggest that ~33 % and ~8.5 % of anoxic/euxinic seafloor area was required to drive terminal Ediacaran seawater 238 U to values as low as −0.95. We also ran our model with a anoxic of 0.835, which is an "average" fractionation factor that represents microbially-mediated U reduction, and is close to the maximum anoxic observed both in the modern Saanich Inlet (0.79 ) and in the Black Sea (0.83 ). Here, we calculate that fanoxic = 0.7, meaning that a minimum of 70 % of global riverine U input was removed into anoxic/euxinic sediments when the Shibantan/Gaojiashan members were deposited. This fraction of U removal into anoxic/euxinic sediments is estimated to occur over an anoxic/euxinic seafloor area of ~14 %. In reality, suboxic seafloor area would not be 0 % and would co-vary with expanded anoxic/euxinic seafloor area. If we assume that fsuboxic was greater than fanoxic in the latest Ediacaran ocean, then a combination of fsuboxic = 21% and fanoxic = 21% will minimally be required in order to account for latest Ediacaran seawater average 238 U of −0.95 (fig. S12). In reality, 21% seafloor area overlain by anoxic waters will require an even larger seafloor area overlain by suboxic seafloor area. For example, the fanoxic = ~0.35% while fsuboxic = 6% in the modern ocean. We therefore conclude that at least 42% of the seafloor was covered by oxygen-deficient (anoxic + suboxic) waters. However, in the abstract of the main text, we focus on emphasizing anoxic seafloor areas. This simple modeling exercise thus gives us the lowest estimate of anoxic/euxinic seafloor area. Parameters used in the modeling exercise have been summarized in table S3. Supplementary tables table S1. The sample-dissolving procedure.
/* Exported interface, documented in data-avg.h */ uint32_t data_avg_proc( void *pw, unsigned channel, uint32_t sample) { struct data_avg_ctx *ctx = pw; struct channel_data *c; uint32_t value; assert(channel < ctx->count); c = &ctx->channel[channel]; data_avg__add_sample(c, sample); if (ctx->normalise) { value = data_avg__get_normalised(c, sample); } else { value = data_avg__get_average(c); } if (c->utilisation == c->capacity) { data_avg__drop_sample(c); } return value; }
Device interaction between cardiac contractility modulation (CCM) and subcutaneous defibrillator (SICD) Combined implantation of cardiac contractility modulation (CCM) with subcutaneous implantable cardioverterdefibrillator (SICD) appears a suitable option to reduce the amount of intracardiac leads and complications for patients. Here we report on a patient with ischemic cardiomyopathy carrying an SICD in which a CCM device was implanted. During crosstalk testing postCCM implantation, the SICD misannotated QRS complexes and T waves. The problem was solved through reprogramming the CCM, while preserving SICD functionality and improving heart failure symptoms. In conclusion, SICD combined with CCM seems to be a good and safe option for patients when device interference is being ruled out. | INTRODUCTION Cardiac contractility modulation (CCM) is a device-based treatment option for patients with chronic heart failure with reduced ejection fraction (HFrEF) improving exercise capacity and reducing the rates of cardiovascular deaths and hospitalization for heart failure. 1,2 The device is implanted similar to a pacemaker, with two bipolar leads are being positioned at the right ventricular septum (local sense and right ventricle ) delivering nonexcitatory high voltage biphasic electrical impulses during the absolute refractory period. Since many patients receiving CCM present with a left ventricular ejection fraction (LVEF) ≤ 35%, concomitant and prophylactic placement of an implantable cardioverter-defibrillator (ICD) needs to be considered according to guideline recommendations. 3 However, currently no device combines CCM and ICD capabilities in one device, requiring the transvenous implantation of at least two leads into the heart. Combining transvenous implantation of CCM with the subcutaneous placement of an ICD (S-ICD) appears as a suitable option for those patients who do not require pacemaker stimulation, and which may benefit from a reduction in implanted lead-related complications. In the following, we report a case of unexpected electrical interference when combining CCM and S-ICD placement in one patient. Inadequate noise annotation by the S-ICD, when combined with CCM in the same patient, is a rare and usually noncritical malfunction. 4 When noise occurs, the respective annotation (N) should not limit tachycardia detection of the S-ICD, but there is no experience when noise annotation coincides with T wave oversensing. By modifying CCM programming (see description above) the problem was resolved and the therapy was clinically effective, nevertheless. Probably, the reduced CCM output, although excitatory, resulted in a less deformed QRS complex morphology ( Figure 3) which was then adequately recognized by the S-ICD. In our patients, reduction of pacing amplitude does not influence the clinical outcome of CCM. The lowest threshold of CCM voltage output sufficient to impose a therapeutic effect has not been tested systematically in trials. | CASE REPORT However, we and others have seen that CCM voltage amplitude can be reduced, occasionally, to prevent palpitations due to high output pacing without jeopardizing the clinical benefit. In these cases, we empirically increase the duration of daily delivered (from 8 to 10 h) CCM therapy to compensate for the lower amplitude, in accordance with the manufacturer's recommendation. We acknowledge that Klopee et al. 5 described CCM therapy to be similarly effective over a range of shorter (5 h) to longer (12 h) daily periods of stimulation, but limited clinical data exists. In our patient, DFT was tested after the initial S-ICD placement in 2017. Following the voltage adaptation of CCM pacing that eliminated the interference with S-ICD sensing during crosstalk testing, we did not repeat DFT testing again. The CCM device contains a built-in algorithm which inhibits CCM stimulation in case the irregular electrical activity is being detected, including ventricular extrasystoles and ventricular tachycardia. Roeger et al. 4 reported that during S-ICD testing, CCM signal delivery stopped immediately during (VT)/VF detection and the S-ICD recognized the arrhythmia adequately in all 20 patients. For this reason, we regarded DFT testing not necessary, but we acknowledge that other operators may prefer to repeat the test. The manufacturer does not offer an official recommendation for such rare complications. In conclusion, S-ICD combined with CCM appears as an effective and safe option for patients suffering from chronic HFrEF. Crosstalk testing to assure adequate S-ICD detection is of high importance when CCM and S-ICD are being combined. ACKNOWLEDGMENT This study was supported by internal funds. Open access funding enabled and organized by Projekt DEAL.
Prcis of Realizing Reason: A Narrative of Truth and Knowing In the seventeenth century, Descartes introduced a radically new form of mathematical practice, one that he came to think was the work of pure reason. Kant saw that it was not: Descartes mathematical practice no less than ancient diagrammatic practice constitutively involves paper-and-pencil reasoning, not, to be sure, images or drawn figures but written signs nonetheless, in particular, equations in the symbolic language of arithmetic and algebra. While the ancient paradigm of knowing as perception, whether with ones bodily eyes or with the eyes of the mind, was indeed superseded with the appearance of Descartes Geometry in 1637, it was not pure reason but only the understanding that was now to be regarded as the power of knowing. In 1637 Descartes fundamentally transformed the practice of mathematics. Fifty years later with the publication of his Principia, Newton transformed the practice of physics. It was left to Kant, nearly a century on, to transform the practice of philosophy in the Critique of Pure Reason (1781/1787). Over the course of the nineteenth century, mathematical practice was again transformed to become, as it remains today, a practice of deductive reasoning directly from concepts. And in the twentieth century the practice of fundamental physics was again transformed as well. Philosophy has not had its revolution but remains merely Kantian. And it does so because although Frege in fact saw (pace Kant) that deductive reasoning can nonetheless be ampliative, and thereby that pure reason had been realized as a power of knowing, Freges logic to show this was systematically misunderstood. As a result, the potential of Freges Begriffsschrift to revolutionize philosophical practice has, for over a century, lain dormant. Realizing Reason: A Narrative of Truth and Knowing aims to complete what Frege began, to catalyze the needed revolution in philosophy, thereby ushering in a new, profoundly post-Kantian era in philosophy. A central aim of Realizing Reason is to trace developments over the course of the intellectual history of the West that have culminated, so I argue, in the
// Code generated by goctl. DO NOT EDIT! // Source: order.proto package server import ( "context" "go-bc/app/order/cmd/rpc/internal/logic" "go-bc/app/order/cmd/rpc/internal/svc" "go-bc/app/order/cmd/rpc/pb" ) type OrderServer struct { svcCtx *svc.ServiceContext } func NewOrderServer(svcCtx *svc.ServiceContext) *OrderServer { return &OrderServer{ svcCtx: svcCtx, } } // 民宿下订单 func (s *OrderServer) CreateHomestayOrder(ctx context.Context, in *pb.CreateHomestayOrderReq) (*pb.CreateHomestayOrderResp, error) { l := logic.NewCreateHomestayOrderLogic(ctx, s.svcCtx) return l.CreateHomestayOrder(in) } // 民宿订单详情 func (s *OrderServer) HomestayOrderDetail(ctx context.Context, in *pb.HomestayOrderDetailReq) (*pb.HomestayOrderDetailResp, error) { l := logic.NewHomestayOrderDetailLogic(ctx, s.svcCtx) return l.HomestayOrderDetail(in) } // 更新民宿订单状态 func (s *OrderServer) UpdateHomestayOrderTradeState(ctx context.Context, in *pb.UpdateHomestayOrderTradeStateReq) (*pb.UpdateHomestayOrderTradeStateResp, error) { l := logic.NewUpdateHomestayOrderTradeStateLogic(ctx, s.svcCtx) return l.UpdateHomestayOrderTradeState(in) } // 用户民宿订单 func (s *OrderServer) UserHomestayOrderList(ctx context.Context, in *pb.UserHomestayOrderListReq) (*pb.UserHomestayOrderListResp, error) { l := logic.NewUserHomestayOrderListLogic(ctx, s.svcCtx) return l.UserHomestayOrderList(in) }
// -------------------------------------------------------------------------------- // Copyright 2002-2021 Echo Three, LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // -------------------------------------------------------------------------------- package com.echothree.ui.cli.database.util; import com.echothree.ui.cli.database.util.current.CurrentColumn; import com.echothree.ui.cli.database.util.current.CurrentForeignKey; import com.echothree.ui.cli.database.util.current.CurrentIndex; import com.echothree.ui.cli.database.util.current.CurrentTable; import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Set; public class DatabaseUpdateTasks { List<Table> tablesNeeded; List<Column> columnsNeeded; List<Column> incorrectColumns; Set<Index> indexesNeeded; Set<Column> foreignKeysNeeded; Set<CurrentForeignKey> extraForeignKeys; Set<CurrentIndex> extraIndexes; List<CurrentTable> extraTables; List<CurrentColumn> extraColumns; /** Creates new DatabaseUpdateTasks */ /** Creates a new instance of DatabaseUpdateTasks */ public DatabaseUpdateTasks() { tablesNeeded = new ArrayList<>(); columnsNeeded = new ArrayList<>(); incorrectColumns = new ArrayList<>(); indexesNeeded = new HashSet<>(); foreignKeysNeeded = new HashSet<>(); extraForeignKeys = new HashSet<>(); extraIndexes = new HashSet<>(); extraTables = new ArrayList<>(); extraColumns = new ArrayList<>(); } public void addTable(Table table) { tablesNeeded.add(table); indexesNeeded.addAll(table.getIndexes()); foreignKeysNeeded.addAll(table.getForeignKeys()); } public List<Table> getTables() { return tablesNeeded; } public void addColumn(Column column) { columnsNeeded.add(column); if(column.getType() == ColumnType.columnForeignKey) { addForeignKey(column); } } public List<Column> getColumns() { return columnsNeeded; } public void addIndex(Index index) { indexesNeeded.add(index); } public List<Column> getIncorrectColumns() { return incorrectColumns; } public void addIncorrectColumn(Column column) { incorrectColumns.add(column); } public Set<Index> getIndexes() { return indexesNeeded; } public void addForeignKey(Column foreignKey) { foreignKeysNeeded.add(foreignKey); } public Set<Column> getForeignKeys() { return foreignKeysNeeded; } public void addExtraForeignKey(CurrentForeignKey cfk) { extraForeignKeys.add(cfk); } public Set<CurrentForeignKey> getExtraForeignKeys() { return extraForeignKeys; } public void addExtraIndex(CurrentIndex ci) { extraIndexes.add(ci); } public Set<CurrentIndex> getExtraIndexes() { return extraIndexes; } public void addExtraTable(CurrentTable ct) { // Also need to treat FKs targeting columns in ct as extras. ct.getColumns().values().stream().forEach((cc) -> { extraForeignKeys.addAll(cc.getTargetForeignKeys()); }); extraTables.add(ct); } public List<CurrentTable> getExtraTables() { return extraTables; } public void addExtraColumn(CurrentColumn cc) { cc.getIndexes().stream().forEach((ci) -> { addExtraIndex(ci); }); cc.getForeignKeys().stream().forEach((cfk) -> { addExtraForeignKey(cfk); }); cc.getTargetForeignKeys().stream().forEach((cfk) -> { addExtraForeignKey(cfk); }); extraColumns.add(cc); } public void addExtraColumns(List<CurrentColumn> ccs) { ccs.stream().forEach((cc) -> { addExtraColumn(cc); }); } public List<CurrentColumn> getExtraColumns() { return extraColumns; } }
package dmo.fs.spa.db; import dmo.fs.spa.utils.SpaLogin; import io.vertx.core.Future; import io.vertx.mutiny.core.Promise; public interface SpaDatabase { SpaLogin createSpaLogin(); Promise<SpaLogin> getLogin(SpaLogin spaLogin); Promise<SpaLogin> addLogin(SpaLogin spaLogin); Promise<SpaLogin> removeLogin(SpaLogin spaLogin); Future<Void> databaseSetup(); static <T> void setupSql(T pool) {}; }
José Luis García-López Early life José Luis García-López was born on March 26, 1948 in Pontevedra, Spain, and lived since age three in Argentina. He was inspired by artists as Alex Raymond, Harold Foster, Milton Caniff, José Luis Salinas, and Alberto Breccia. Career During the 1960s, García-López worked for Charlton Comics. In 1974 he moved to New York, where he met DC Comics editor Joe Orlando. His first interior art credit for DC was June 1975's "Nightmare In Gold" back-up in Action Comics #448, where he inked the pencils of artist Dick Dillin. The following month, he inked the pencils of Curt Swan on a "Private Life of Clark Kent" backup story in Superman #289, before graduating to full pencils on a back-up story written by E. Nelson Bridwell in Detective Comics #452 (October 1975). The following month, García-López and writer Gerry Conway created the Hercules Unbound series and in April 1977, he and writer Michael Fleisher launched the Jonah Hex ongoing series. García-López and Conway collaborated on a Superman vs. Wonder Woman story in All-New Collectors' Edition #C-54 (1978). DC Comics Presents, a team-up title starring Superman was launched in 1978 by writer Martin Pasko and García-López. He drew the first appearance of the Snowman in Batman #337 (July 1981) and a DC–Marvel crossover between Batman and the Hulk in DC Special Series #27 (Fall 1981). He penciled five issues of The New Teen Titans in 1985 and writer Marv Wolfman later commented that "I knew that I had this incredible artist who could draw almost anything that I wanted...So I decided to make the story just the biggest spectacle I could come up with." Other notable works include Atari Force, Road to Perdition, Deadman, and various DC superheroes. His work on the DC series Twilight received an Eisner Award nomination. His work on the Cinder and Ashe limited series was praised by ComicsAlliance in 2014 which noted "His characters are never in a static position; they’re always stretching, or crunched up, or twisting. There is constant dramatic content in their movements." During his exclusive contract with DC Comics, he has been responsible of penciling the style guides used by the company to provide official artwork for merchandise licences around the world. García-López illustrated the 1982 guide, still used today as part of a DC Comics retro line, a 1992 guide focused on the Batman Returns movie, the Superman related guides from 1991, 1994, and 2006, and other DC Universe guides in 1998, 2004, and 2012. His 2000s work includes JLA: Classified and a 2009 story arc in Batman Confidential which introduced the King Tut character. He drew the Metal Men story in Wednesday Comics which was written by Dan DiDio. In 2011, he drew one of the stories in The Spirit #17. DC Comics published a collection of his Superman stories in Adventure of Superman: José Luis García-López in 2013. He and Len Wein produced a comics adaptation of a Two-Face story written by Harlan Ellison originally intended for the Batman television series. García-López drew the "Actionland!" chapter in Action Comics #1000 (June 2018) and the Superman story in DC Nation #0 (July 2018). Awards 1992: Nominated for "Best Artist" Eisner Award, for Twilight. 1997: Nominated for "Best Penciller/Inker or Penciller/Inker Team" Eisner Award, with Kevin Nowlan, for Doctor Strangefate
Alabama Atty. Gen. Bill Pryor filed a lawsuit in an effort to ban gambling machines that give winners coupons good for cash or prizes. Pryor filed the suit in Montgomery County Circuit Court asking that a statute known as the "Chuck E. Cheese law" be declared unconstitutional. The law was passed in 1996 to allow arcade games, such as those found at the popular restaurant for kids, to award coupons for prizes. But the law also has been used to allow adult gambling machines that award coupons for cash or merchandise to pop up in convenience stores and arcades statewide.
/* eslint-disable */ export type MsgVpnRestDeliveryPointQueueBinding = { /** * Enable or disable whether the authority for the request-target is replaced with that configured for the REST Consumer remote. When enabled, the broker sends HTTP requests in absolute-form, with the request-target's authority taken from the REST Consumer's remote host and port configuration. When disabled, the broker sends HTTP requests whose request-target matches that of the original request message, including whether to use absolute-form or origin-form. This configuration is applicable only when the Message VPN is in REST gateway mode. The default value is `false`. Available since 2.6. */ gatewayReplaceTargetAuthorityEnabled?: boolean; /** * The name of the Message VPN. */ msgVpnName?: string; /** * The request-target string to use when sending requests. It identifies the target resource on the far-end REST Consumer upon which to apply the request. There are generally two common forms for the request-target. The origin-form is most often used in practice and contains the path and query components of the target URI. If the path component is empty then the client must generally send a "/" as the path. When making a request to a proxy, most often the absolute-form is required. This configuration is only applicable when the Message VPN is in REST messaging mode. The default value is `""`. */ postRequestTarget?: string; /** * The name of a queue in the Message VPN. */ queueBindingName?: string; /** * The name of the REST Delivery Point. */ restDeliveryPointName?: string; } export namespace MsgVpnRestDeliveryPointQueueBinding { /** * the discriminator for the model if required for more complex api's */ export const discriminator = 'MsgVpnRestDeliveryPointQueueBinding'; }
Length-Dependent Diblock DNA with Poly-cytosine (Poly-C) as High-Affinity Anchors on Graphene Oxide. DNA-functionalized graphene oxide (GO) is a popular system for biosensor development and directed materials assembly. Compared to covalent attachment, simple physisorption of DNA has been more popular, and a DNA sequence with a strong affinity on GO is highly desirable. Recently, we found that poly-cytosine (poly-C) DNA can strongly adsorb on many common nanomaterials, including GO. To identify an optimal length of poly-C DNA, we herein designed a series of diblock DNA sequences containing between 0 and 30 cytosines. The displacement of a random sequenced DNA by poly-C DNA was demonstrated, confirming the desired diblock structure on GO with the poly-C block anchoring on the surface and the other block available for hybridization. The adsorption density of poly-C containing DNA did not vary much as the length of the poly-C block increased, suggesting the conformation of the anchoring DNA on the GO was quite independent of the DNA length. With a longer poly-C block, the efficiency of surface hybridization of the other block increased, while nonspecific adsorption of noncomplementary DNA was inhibited more. Compared to poly-adenine (poly-A)-containing DNAs, which were previously used for the same purpose, poly-C DNA adsorption is more stable. Using four types of 15-mer DNA homopolymers as the intended anchoring sequences, the C15 DNA had the best hybridization efficiency. This work has suggested the optimal length for the poly-C block to be 15-mer or longer, and it has provided interesting insights into the DNA/GO biointerface.
Postoperative Hypoparathyroidism after Completion Thyroidectomy for Well-Differentiated Thyroid Cancer. Objective Thyroid surgery may lead to postoperative complications. The aim of this paper was to determine whether the rate of postoperative hypoparathyroidism (HPT) is influenced by whether surgery is staged. Design Single-institution retrospective observational study. Methods The clinical records of 786 patients treated at the Otolaryngology Unit of the Azienda USL-IRCCS di Reggio Emilia between January 1990 and December 2015 were reviewed. Patients were divided into two groups according to the surgical treatment received: Group TT (637 patients, 81.04%) underwent single-stage total thyroidectomy; Group cT (149 patients, 18.96%) underwent loboisthmusectomy and delayed completion total thyroidectomy. Transient and permanent HPT, assessed after 6 months of follow-up, were the primary endpoints. Risk factors of postoperative HPT were also analysed as secondary outcomes. Results: Rates of transient HPT in Group TT were higher than those observed in Group cT, (P = 0.0057). Analysis of risk factors identified sex as an independent risk factor for transient HPT only for Group TT (P = 0.0012) and the number of parathyroid glands remaining in situ (PGRIS) as an independent risk factor for transient and permanent HPT for Group TT (P <0.0001 and P = 0.0002, respectively). Conclusions This study suggests that the risk of transient postoperative HPT is lower in patients that undergo completion thyroidectomy. Further independent risk factors for postoperative HPT are female sex and PGRIS score. In light of the growing use of conservative surgery for thyroid neoplasms, these findings could help to adequately plan surgery in order to reduce endocrine complications.
<reponame>sschepis/libsyshj /** * Copyright 2011 Google Inc. * Copyright 2014 <NAME> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.bitcoinj.core; import org.libdohj.core.AltcoinNetworkParameters; import com.google.common.base.Preconditions; import javax.annotation.Nullable; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.OutputStream; import java.math.BigInteger; import java.security.GeneralSecurityException; import java.util.BitSet; import java.util.List; import static org.bitcoinj.core.Coin.FIFTY_COINS; import org.libdohj.core.ScryptHash; import static org.libdohj.core.Utils.scryptDigest; import static org.bitcoinj.core.Utils.reverseBytes; import org.libdohj.core.AuxPoWNetworkParameters; /** * <p>A block is a group of transactions, and is one of the fundamental data structures of the Bitcoin system. * It records a set of {@link Transaction}s together with some data that links it into a place in the global block * chain, and proves that a difficult calculation was done over its contents. See * <a href="http://www.bitcoin.org/bitcoin.pdf">the Bitcoin technical paper</a> for * more detail on blocks. <p/> * * To get a block, you can either build one from the raw bytes you can get from another implementation, or request one * specifically using {@link Peer#getBlock(Sha256Hash)}, or grab one from a downloaded {@link BlockChain}. */ public class AltcoinBlock extends org.bitcoinj.core.Block { private static final int BYTE_BITS = 8; private boolean auxpowParsed = false; private boolean auxpowBytesValid = false; /** AuxPoW header element, if applicable. */ @Nullable private AuxPoW auxpow; /** * Whether the chain this block belongs to support AuxPoW, used to avoid * repeated instanceof checks. Initialised in parseTransactions() */ private boolean auxpowChain = false; private ScryptHash scryptHash; /** Special case constructor, used for the genesis node, cloneAsHeader and unit tests. * @param params NetworkParameters object. */ public AltcoinBlock(final NetworkParameters params, final long version) { super(params, version); } /** Special case constructor, used for the genesis node, cloneAsHeader and unit tests. * @param params NetworkParameters object. */ public AltcoinBlock(final NetworkParameters params, final byte[] payloadBytes) { this(params, payloadBytes, 0, params.getDefaultSerializer(), payloadBytes.length); } /** * Construct a block object from the Bitcoin wire format. * @param params NetworkParameters object. * @param serializer the serializer to use for this message. * @param length The length of message if known. Usually this is provided when deserializing of the wire * as the length will be provided as part of the header. If unknown then set to Message.UNKNOWN_LENGTH * @throws ProtocolException */ public AltcoinBlock(final NetworkParameters params, final byte[] payloadBytes, final int offset, final MessageSerializer serializer, final int length) throws ProtocolException { super(params, payloadBytes, offset, serializer, length); } public AltcoinBlock(NetworkParameters params, byte[] payloadBytes, int offset, Message parent, MessageSerializer serializer, int length) throws ProtocolException { super(params, payloadBytes, serializer, length); } /** * Construct a block initialized with all the given fields. * @param params Which network the block is for. * @param version This should usually be set to 1 or 2, depending on if the height is in the coinbase input. * @param prevBlockHash Reference to previous block in the chain or {@link Sha256Hash#ZERO_HASH} if genesis. * @param merkleRoot The root of the merkle tree formed by the transactions. * @param time UNIX time when the block was mined. * @param difficultyTarget Number which this block hashes lower than. * @param nonce Arbitrary number to make the block hash lower than the target. * @param transactions List of transactions including the coinbase. */ public AltcoinBlock(NetworkParameters params, long version, Sha256Hash prevBlockHash, Sha256Hash merkleRoot, long time, long difficultyTarget, long nonce, List<Transaction> transactions) { super(params, version, prevBlockHash, merkleRoot, time, difficultyTarget, nonce, transactions); } private ScryptHash calculateScryptHash() { try { ByteArrayOutputStream bos = new UnsafeByteArrayOutputStream(HEADER_SIZE); writeHeader(bos); return new ScryptHash(reverseBytes(scryptDigest(bos.toByteArray()))); } catch (IOException e) { throw new RuntimeException(e); // Cannot happen. } catch (GeneralSecurityException e) { throw new RuntimeException(e); // Cannot happen. } } public AuxPoW getAuxPoW() { return this.auxpow; } public void setAuxPoW(AuxPoW auxpow) { this.auxpow = auxpow; } /** * Returns the Scrypt hash of the block (which for a valid, solved block should be * below the target). Big endian. */ public ScryptHash getScryptHash() { if (scryptHash == null) scryptHash = calculateScryptHash(); return scryptHash; } /** * Returns the Scrypt hash of the block. */ public String getScryptHashAsString() { return getScryptHash().toString(); } @Override public Coin getBlockInflation(int height) { final AltcoinNetworkParameters altParams = (AltcoinNetworkParameters) params; return altParams.getBlockSubsidy(height); } /** * Get the chain ID (upper 16 bits) from an AuxPoW version number. */ public static long getChainID(final long rawVersion) { return rawVersion >> 16; } /** * Return chain ID from block version of an AuxPoW-enabled chain. */ public long getChainID() { return getChainID(this.getRawVersion()); } /** * Return flags from block version of an AuxPoW-enabled chain. * * @return flags as a bitset. */ public BitSet getVersionFlags() { final BitSet bitset = new BitSet(BYTE_BITS); final int bits = (int) (this.getRawVersion() & 0xff00) >> 8; for (int bit = 0; bit < BYTE_BITS; bit++) { if ((bits & (1 << bit)) > 0) { bitset.set(bit); } } return bitset; } /** * Return block version without applying any filtering (i.e. for AuxPoW blocks * which structure version differently to pack in additional data). */ public final long getRawVersion() { return super.getVersion(); } /** * Get the base version (i.e. Bitcoin-like version number) out of a packed * AuxPoW version number (i.e. one that contains chain ID and feature flags). */ public static long getBaseVersion(final long rawVersion) { return rawVersion & 0xff; } @Override public long getVersion() { // TODO: Can we cache the individual parts on parse? if (this.params instanceof AltcoinNetworkParameters) { // AuxPoW networks use the higher block version bits for flags and // chain ID. return getBaseVersion(super.getVersion()); } else { return super.getVersion(); } } protected void parseAuxPoW() throws ProtocolException { if (this.auxpowParsed) return; this.auxpow = null; if (this.auxpowChain) { final AuxPoWNetworkParameters auxpowParams = (AuxPoWNetworkParameters)this.params; if (auxpowParams.isAuxPoWBlockVersion(this.getRawVersion()) && payload.length >= 160) { // We have at least 2 headers in an Aux block. Workaround for StoredBlocks this.auxpow = new AuxPoW(params, payload, cursor, this, serializer); } } this.auxpowParsed = true; this.auxpowBytesValid = serializer.isParseRetainMode(); } @Override protected void parseTransactions(final int offset) { this.auxpowChain = params instanceof AuxPoWNetworkParameters; parseAuxPoW(); if (null != this.auxpow) { super.parseTransactions(offset + auxpow.getMessageSize()); optimalEncodingMessageSize += auxpow.getMessageSize(); } else { super.parseTransactions(offset); } } @Override void writeHeader(OutputStream stream) throws IOException { super.writeHeader(stream); if (null != this.auxpow) { this.auxpow.bitcoinSerialize(stream); } } /** Returns a copy of the block, but without any transactions. */ @Override public Block cloneAsHeader() { AltcoinBlock block = new AltcoinBlock(params, getRawVersion()); super.copyBitcoinHeaderTo(block); block.auxpow = auxpow; return block; } /** Returns true if the hash of the block is OK (lower than difficulty target). */ protected boolean checkProofOfWork(boolean throwException) throws VerificationException { if (params instanceof AltcoinNetworkParameters) { BigInteger target = getDifficultyTargetAsInteger(); if (params instanceof AuxPoWNetworkParameters) { final AuxPoWNetworkParameters auxParams = (AuxPoWNetworkParameters)this.params; if (auxParams.isAuxPoWBlockVersion(getRawVersion()) && null != auxpow) { return auxpow.checkProofOfWork(this.getHash(), target, throwException); } } final AltcoinNetworkParameters altParams = (AltcoinNetworkParameters)this.params; BigInteger h = altParams.getBlockDifficultyHash(this).toBigInteger(); if (h.compareTo(target) > 0) { // Proof of work check failed! if (throwException) throw new VerificationException("Hash is higher than target: " + getHashAsString() + " vs " + target.toString(16)); else return false; } return true; } else { return super.checkProofOfWork(throwException); } } /** * Checks the block data to ensure it follows the rules laid out in the network parameters. Specifically, * throws an exception if the proof of work is invalid, or if the timestamp is too far from what it should be. * This is <b>not</b> everything that is required for a block to be valid, only what is checkable independent * of the chain and without a transaction index. * * @throws VerificationException */ @Override public void verifyHeader() throws VerificationException { super.verifyHeader(); } }
It’s a device that brings to mind the bodyless hand, Thing, from The Addams Family: a human-like robotic hand, engineered by scientists at the University of Washington, that can learn on its own as it handles a specific task. The hand has five fingers, tendons, joints, over a hundred sensors, and is capable of moving faster than its human counterpart. In a video the university released, the hand can be seen delicately rotating a tube full of coffee beans– an activity that the robot can improve iteratively, the university said. “Hand manipulation is one of the hardest problems that roboticists have to solve,” Vikash Kumar, a doctoral student at the University of Washington and the lead author on a new paper about the robot hand, said in a statement. “A lot of robots today have pretty capable arms but the hand is as simple as a suction cup or maybe a claw or a gripper.” But besides the impressive dexterity of this robotic hand, which is powered by a pneumatic system and cost about $300,000, machine learning algorithms allow it to learn along the way. “What we are using is a universal approach that enables the robot to learn from its own movements and requires no tweaking from us,” Emo Todorov, an associate professor at the University of Washington, said. The next step for the device, the university said, is for the robot hand to not just learn while performing a known task on an object it has worked with before, but to figure out how to manipulate new objects or handle new situations. By Rob Verger More from FoxNews.com Tech:
The California Senate candidate’s new Web video—portraying her primary rival as a demonic sheep—has gone viral, and been viciously mocked. Benjamin Sarlin talks to Fiorina’s camp about the response. Candidates take all sorts of abuse in campaign ads, but it's not every day that they're likened to menacing, red-eyed demon sheep. A surreal Web video from Republican Senate candidate Carly Fiorina making exactly that comparison is taking the Internet by storm this week, becoming one of the most buzzed-about political ads since the presidential election. The video cuts between live and animated footage of docile sheep and images of Fiorina's primary opponent former Representative Tom Campbell, while a voice-over accuses him of being a “FCINO” – a fiscal conservative in name only (a play on “RINO,” or “Republican in name only,” a common rallying cry from conservatives who are unhappy with ideologically suspect members of the GOP). The payoff comes about 2:30 in, when the audience is treated to menacing images of a human—presumably Campbell—dressed as an evil sheep that appears to have clawed its way out of Stephen King's Pet Sematary. “The demon sheep at the end is meant to be a wolf in sheep's clothing,” she said. “That’s the whole point." The ad, which has already garnered over 100,000 views on Youtube, is attracting endless mockery on social media sites and from Fiorina's political opponents, with #demonsheep becoming a trending topic on Twitter. One of Fiorina's opponents, Chuck Devore, quickly started a mock Web site, demonsheep.org, asking Californians to stop these “Jawa-like, Terminator-esque, Demon Sheep from taking over California,” while Campbell happily passed the ad along to supporters in a fundraising email. "This bizarre video is just the latest sign that the Fiorina campaign is in complete meltdown mode,” a spokesman for Campbell, James Fisfis, told The Daily Beast. “Tom Campbell will stick to talking about how to rein in federal spending. We're happy to let Carly Fiorina talk to California voters about demon sheep." Or is Fiorina's campaign crazy like a fox? A spokeswoman for Fiorina, Julie Soderlund, told The Daily Beast that they were “energized” by the ad's response, citing its YouTube traffic as “a great success” and said that Campbell “played right into our hands” by sending out the video himself. The ad's myriad critics are an asset—helping to spread the word—rather than a hindrance, this argument goes. The man behind the Web video is none other than Fred Davis, the Republican ad guru who cooked up John McCain's legendary “Celebrity” ad that juxtaposed Barack Obama with images of Paris Hilton and Britney Spears in 2008. Don’t be surprised if the Fiorina camp does more in this vein in the weeks ahead. “We can expect to see equally if not more shocking Web-based ads or videos coming from our campaign moving forward,” Soderlund said. Critics have suggested that sheep might not be the best metaphor for the ideal sort of Republican—since it typically connotes politicians who march, unthinkingly, in lockstep with their leaders. But Soderlund says the naysayers have it wrong. And for those wondering how to pronounce “FCINO” or whether to spell it out in its entirety, the campaign has settled on an official pronunciation.
NagareDB: A Resource-Efficient Document-Oriented Time-Series Database The recent great technological advance has led to a broad proliferation of Monitoring Infrastructures, which typically keep track of specific assets along time, ranging from factory machinery, device location, or even people. Gathering this data has become crucial for a wide number of applications, like exploration dashboards or Machine Learning techniques, such as Anomaly Detection. Time-Series Databases, designed to handle these data, grew in popularity, becoming the fastest-growing database type from 2019. In consequence, keeping track and mastering those rapidly evolving technologies became increasingly difficult. This paper introduces the holistic design approach followed for building NagareDB, a Time-Series database built on top of MongoDBthe most popular NoSQL Database, typically discouraged in the Time-Series scenario. The goal of NagareDB is to ease the access to three of the essential resources needed to building time-dependent systems: Hardware, since it is able to work in commodity machines; Software, as it is built on top of an open-source solution; and Expert Personnel, as its foundation database is considered the most popular NoSQL DB, lowering its learning curve. Concretely, NagareDB is able to outperform MongoDB recommended implementation up to 4.7 times, when retrieving data, while also offering a stream-ingestion up to 35% faster than InfluxDB, the most popular Time-Series database. Moreover, by relaxing some requirements, NagareDB is able to reduce the disk space usage up to 40%. Introduction The great progress in the technological field has led to a dramatic increase in deployed monitoring devices. Those devices, commonly called sensors, are employed in a broad number of scenarios, ranging from traditional factories and commercial malls to the largest experiment on Earth. The continuous polling of the sensor's readings is considered beneficial, as it helps not only in supervising the status of the monitored assets, but also in understanding the behavior of the monitored systems by collecting insights. Moreover, sensor data started being used in Data Analysis and Machine Learning approaches such as Industrial Predictive Maintenance, in order to anticipate to future failures, and Anomaly Detection, aimed at identifying rare events or observations. In general, in order to optimize the value extracted from data, it is necessary to keep a large historical record. In consequence, Monitoring Infrastructures are expected to work closely with database management systems, whose goal is to efficiently store and manipulate data for its later retrieval. While General-purpose database management systems (GDB), such as Relational Database Management Systems, have been historically capable of managing a wide range Hardware: Handling real-time data requires computing resources in line with the Monitoring Infrastructure's size. Moreover, as time passes and more data is gathered, the storage requirements grow accordingly. A common approach to tackle this problem is to just keep a fixed amount of data, following FIFO method: As new data is stored, the oldest is removed. This is therefore a double-sided sword, since it prevents storage costs to grow, but it also implies that data is being discarded, and potentially relevant information is lost. Software: While some databases are open-source, most popular databases offer both a limited-free version and a commercial-enterprise edition. However, license pricing is only one of the many considerations to take into account: for example, large-scale Monitoring Infrastructures typically require the software to be horizontally-scalable, so, able to scale out by adding more machines. Expert Personnel: Following Monitoring Infrastructure's rising interest, TSDB became the fastest-growing database category, leading to a plethora of new databases. Furthermore, each database typically has a different query language and a completely different way-of-thinking associated with its usage. However, Data Engineers are expected to select and master the most appropriate solution for each situation. Consequently, experts are not easy to find or train, nor cheap to hire. The main contributions of this paper is towards relieving the above explained problems, helping in the democratization of Monitoring Infrastructures, by lowering down the barriers to employing a Time-Series Database. Concretely, we demonstrate and benchmark the novel approach followed for implementing NagareDB, a resource-efficient Time-Series database built on top of MongoDB-a database typically discouraged in the Time-Series scenario. NagareDB intends to ease access to the essential resources needed by: First, by working on top on a open-source and broadly-known database such as MongoDB, which relieves SMEs from licensing costs, and personnel from learning to use from scratch yet-another database. Secondly, by offering a fair trade-off between efficiency and requirements, which makes it able to be deployed in commodity machines while offering outstanding performance. More precisely, our experiments show that NagareDB is able to outperform Mon-goDB's time series data model, providing up to a 4.7 speedup when querying, while also offering a 35% faster synchronous real-time ingestion, in comparison with InfluxDB, the most popular Time Series Database, whose non-scalable version is open source. Moreover, thanks to the optional usage of a naive-but-efficient data type approximation, NagareDB is able to provide further querying speedup while reducing the disk space consumption up to 40%, which makes it able to store an almost 1.7 times bigger historical period in the same disk space. Solutions Categorization Concerning Time-series data management, Databases can be efficiently-implemented following a wide range of data models such as key-value or column oriented data models. However, this research focuses on their outcomes and purpose, in disregard of their internal implementation. Consequently, Time-series databases could be classified either in General-Purpose DB, or Purpose-Built Time-Series DB, which, in turn, could be considered either Native TSDB or Adapted TSDB. General-purpose databases (GDB). GDB intend to meet the needs of as many applications as possible. In consequence, GDB are designed to be independent with regard to the nature of the data to be handled. Thanks to this Swiss army knife behavior, GDB are typically the most popular DBMS, which makes it easier to find expert personnel in their usage. However, this flexibility is gained at the expense of efficiency, since GDB are not tailored to benefit from the specifics of any particular scenario. Hence, system performance is limited, and strongly attached to the design decisions made by the database engineers, while fitting the particular scenario into the GDB. Native Time-Series databases (N-TSDB). N-TSDB are DBMS that are optimized for storing and retrieving time series data, such as the one produced by sensors or smart meters. As TSDB are tailored to the specific requirements of Time series data, they can be offered as an out-of-the-box solution, meaning that not many design decisions have to be taken, speeding up the deployment time. However, as a consequence of their intrinsic specialization, their popularity is substantially reduced, in comparison to General-purpose DBMS. Adapted Time-Series databases (A-TSDB). This specific case of TSDB does not employ a new database engine, but borrows one from a GDB. Specific functionalities and design decisions, with respect to the time series nature, have been implemented on top of a GDB, offering the outcome as an out-of-the-box solution. Thus, the newly created database looses the ability of handling scenarios that was typically able to. As the foundation data model is inherited from the GDB, the optimization approaches than can be performed are limited. Thus, A-TSDB rely on the popularity and robustness of the chosen GDB, while providing an scenario-optimized solution. Time-Series Properties Time-Series DBs are tailored to the specifics of Time-Series data, which empowers their efficient data handling. Some of the most fundamental properties of time-series data are: Triple-based Data Model. Time series data is mainly composed of three parts: The subject to be measured (f.i sensor ID), the measurement, and the timestamp at which the measurement was read. Smooth and continuous stream. The writing of time series data is relatively stable, and its generation is typically done at a fixed time frequency. Immutable data. Once data is read and written, it is never updated, except in case of manual revisions. Decaying query probability. Recent data is more likely to be queried. Thus, as newer data is ingested, the older data has less chances of being consulted. TSDBs Requirements Time-series databases can be implemented following a wide range of approaches in order to benefit one or another feature or specific use case requirements. For example, a given TSBD could be designed in order to maximize the ingestion speed, while other might intend to speed up data retrieval. While optimizing at the same time some of these requirements might be possible, sometimes it is necessary to find a trade-off. In addition, some non-functional requirements might depend on the target users or even the business model of the database developers. This research classifies TSBD requirements according to the resource that benefit or compromise the most, as explained in Section 1. Concretely: Software. Requirements regarding software characteristics or database functionalities, involving query or data types, allowed operations, etc. Hardware. The ones regarding the ability of the database to reduce or to optimize the machine(s) speed or resources usage. Expert personnel. These requirements describe the different ways the user is able to interact with the database, including the facility of its usage or the compatibility of the database with the user's environment. Thus, some of the most relevant requirements on time series databases are : Software. -Continous calculations. The TSDB is able to resolve functions continuously, taking into account the recently ingested data, and the historical information, keeping the outcomes internally. An example is the continuous calculation of the last hour average value, for a given item. - Time Granularity. It defines the smallest time unit precision in which a timestamp can be stored and interpreted. For example, a given TSDB could be able to store up to seconds, being unable to keep information regarding the millisecond in which the data was generated. - Aggregation. When aggregating, the database is able to group multiples values and perform operations over them, returning a single result. The retrieval of the minimum/maximum value during a given time period is an example of aggregation. - Downsampling. It is the process of reducing the sampling rate of a given data source or sensor, taking into account a specific time granularity or sample interval. For example, if the database stored a sensor reading in minute-basis, the database should be able to retrieve its data in hourly intervals, showing, for example, the average of the total sensor readings for each hour. -License. A license regulates, among others, who and how the database can be used. Since this research focus on being resource-efficient, the price or cost of licenses, for using a given database, is especially relevant. -Distribution/Clusterability. Scalability and load balancing features are able to compensate machine failures, preventing the system from down-times. Moreover, by scaling horizontally, the database is able to increase its storage or its performance, by adding further nodes or machines to the cluster. - Retention Policy. In a TSDB, a data Retention Policy specifies for how long data should be kept in the system, until being deleted. The possibility of setting up retention policies is crucial for TSDB, as keeping the data forever is not typically affordable for most users, as hardware storage might be limited and expensive. - Storage Approach and Compression Algorithms. The approach followed for implementing the data persistence will directly affect the storage usage of the database and its compression capability. For example, databases implemented following column-oriented data models are likely able to compress data more efficiently, by means of Run Length Encoding. Moreover, each database employs a given compression algorithm, reducing either its disk usage, its compression time or finding a trade-off. -Ability to support highly concurrent writes. Data is typically ingested at a regular pace, following the Smooth and continuous stream property explained in Section 2.2. However, it is important for the database to be able to ingest it as fast as possible, as it enables a wider range of scenarios, including more demanding ones. -Ability to retrieve data speedily. Queries should be answered as fast as possible, as the TSDB might be the cornerstone of further systems or operations, such as data exploration or visualization, data analysis, or machine learning techniques such as predictive maintenance or anomaly detection. -Database and Query Language Popularity. As a database raises more interest, it becomes easier to find expert personnel on its usage, clear documentation and even courses or training material. The same effect happens with the query language: While some databases use their own language, some others mimic, inherit or support a more popular and external query language, in order to facilitate its querying. - Interfaces. Interfaces can be used by programming languages to communicate to a database. Thus, the more interfaces a database provides, the easier it becomes to adapt to personnel expertise. - Operative Systems. As it happens with interfaces and query languages, users might be specialized in a given operative system. Moreover, some companies could promote the usage of a given operative system. Thus, as more Operative Systems the database is able to be deployed in, the more possibilities it will have to fit in its user's environment. Related Work The problem of handling time-series data has been addressed by employing or developing different database solutions laying in one of the three categories explained in Section 2.1. Concretely, just DB-ENGINES, the Knowledge Base of Relational and NoSQL Database Management Systems, keeps track of more than 35 Time Series Databases, such as InfluxDB, KdB+, Prometheus, Graphite, or TimescaleDB. While their shared goal is to empower data management, their approaches, strengths and weaknesses are different, being InfluxDB the most popular Native Time-Series, and TimescaleDB the most popular Adapted Time-Series database. Thus, some of the most relevant technologies related to this research are: MongoDB. It is a general-purpose open-source database written in C++. It offers an extremely flexible data model, since its base structure is document-oriented, so, made out of JSON-like documents. These documents act like independent dictionaries where the user can freely add of remove new fields, releasing the database of up-front constraints. Thus, there is no need to set up or alter any enforced global schema, as it would happen in relational databases. However, this flexibility implies constant metadata repetition, such as the key of the key-value JSON dictionary pairs. This negative impact is partially palliated by its data compression mechanisms. It is able to scale horizontally freely, by means of shards and replicas, which makes it possible to create a database cluster composed by commodity machines. Regarding its interaction methods, it has its own query language and a really wide range of interfaces to work with. It is able to perform continuous queries when retrieving new values by means of change streams and to aggregate and down-sample data. Last, it can be installed on Linux-based systems, OS X, and Windows, which makes it able to reach a great number of users. However, although it is considered the most popular NoSQL DBMS, its usage in the Time-Series domain has been typically discouraged due to its time-expensive query answering, and its timestamps are limited to milliseconds, which might be insufficient for high-demanding use cases. Last, although it provides optional retention policies, in the form of capped collections, they are tight to the insertion date of a given sensor reading, and not to its generation date, which might be problematic for some time-series use cases, in case of delays or non-chronological insertions. InfluxDB. It is a native Purpose-Built Time-Series database written in GO. From 2016, it is considered the most popular TSDB. It supports plenty of programming languages and two different querying approaches: Flux, its own query language, and InfluxQL, as SQL-like support, each having different limitations. It is able to efficiently perform a wide range of operations, such as continuous querying, down-sampling and aggregations. Moreover, is able to efficiency reduce and limit disk usage, by means of its compression mechanisms and its data retention policy. However, it provides a commercial enterprise version and an open-source version, with some limitations. Among others, the open-source version is not able to grow horizontally, so the deployment is limited to one single machine, not being able to carry out data sharding or replication, which strongly limits the performance, at the same time that reduces the system availability and fault-tolerance. Regarding its potential user's operative systems, it can be installed on Linux-based systems and OS X, but not on Windows itself. Last, although it is the most popular TSDB, its popularity score is almost twenty times smaller than other generalpurpose databases, such as MongoDB. TimescaleDB. It is an adapted open-source TSDB built over PostgreSQL, one of the most popular General-Purpose DBMS. Thus, it inherits PostgreSQL's broadly known SQL query language and its powerful querying features and interfaces, which lowers down its learning curve. Moreover, it is able to run on Windows, OS X, and Linux, which makes it able to reach a wide number of potential users. However, due to the limitations of the underlying rigid Relational data model, its scalability might be compromised, and its performance might vary depending on the query. Moreover, as its underlying data model is row-oriented, its disk-usage consumption is significantly greater than other TSDB, such as InfluxDB, and its compression mechanisms are not likely able to demonstrate its full potential. To sum up, on the one hand, MongoDB is a general-purpose and open source database, but despite being considered the most popular NoSQL DBMS, its usage in the time-series scenario has been discouraged. On the other hand, TimescaleDB relies on a well-known SQL solution and offers good optimizations, but generally worse than Native TSDBs. Last, InfluxDB offers an upstanding performance, but its usage is limited to Linux-based and OS X, at the same time that its full version is commercial-licensed, which reduces the number of users that could benefit from it. In addition, as it is a native TSDB, it becomes necessary to learn a new technology from scratch. NagareDB's goal is to provide a fair trade-off between efficiency and resources demand, offering an optimized TSDB solution that relies on a moldable, open-source, and well-known NoSQL General-purpose DBMS. Design Approach This section describes the holistic and most relevant design decisions materialized in NagareDB, with the goal of creating an efficient and balanced Adapted Time-series database. In addition, it states the main differences between the MongoDB Recommended Implementation for Time-Series (briefed as MongoDB-RI), and other related solutions. Data Model As in any adapted database, the overlying data model adaptability is limited by the malleability of the foundation data model. Taking this into account, our Time-Series Data Model approach has the following key features: Medium-sized time-shaped bucketing. Sensor readings are packed together in mediumsized buckets or documents, following the nature of time. Concretely, a document clusters together the readings of three consecutive units of time. For instance, if the frequency unit in which a sensor is reading values is set to minute (First unit), all readings belonging to the same hour (Second unit) will be packed together, for afterwards being bucketed in a daily document (Third unit). This structure can be seen in Figure 1: The showed document represents the 15th day of Month 02/2000 (3rd time unit), that stacks together all 24 day hours (2nd time unit), were each hour has 60 readings, one per minute (1st/base time unit). By contrast, MongoDB-RI packs together readings in small-sized buckets, taking just 2 time units. This can be seen in Figure 2, were a document represents an just hour and Data 2021, 6, 91 7 of 20 its minutely readings. While this could be efficient for short-ranged queries, it severely penalizes medium and high ranged historical queries, as the storage device is asked to retrieve a large amount of documents, that could be scattered. Since long queries are more resource-consuming than small ones, this approach is considered more balanced. Time rigidity. Following the smoothness property explained in Section 2.2, sensor readings are organized via a rigid schema-full approximation, meaning that there is a pre-defined rigid structure for their storage, where each reading has an specific allocation and position. This bucket structure, consisting in a dictionary of arrays, is created as a whole when a sensor reading, belonging to it, is received. This structure can be seen in Figure 1, where the document representing the 15th day of 02/2000 has several pre-defined and fixed-size data structures. As one of the most important features in MongoDB is its schema-less design, enforcing a schema could be seen as counter-intuitive at first sight, however, imposing a structure provides two important benefits, perfectly suited for timedependent data: First, it allows to store time-sorted data in disk, and second, it allows to leverage from implicit information, inherent to the structure design, such as the value array position. Conversely, MongoDB recommended data model ( Figure 2) following its schema-less nature, keeps sensor readings dictionaries, where the key provides recurrent explicit information about time (f.i the minute when the reading was performed), and the value contains the sensor reading itself. Sensor elasticity. With respect to the sensor dimension, it follows the schema-less approximation inherent to MongoDB. Consequently, new sensors can be incorporated or removed in an elastic way, without having to alter any global schema, as it could happen in rigid data models such as relational ones. Pre-existing timestamps. Every sensor reading is implicitly assigned to an already existing timestamp. Thus, timestamps are not calculated on the fly, as it happens in MongoDB-RI. Data-driven bucket identification. Each bucket is identified and sorted by sensor's reading time. By contrast, MongoDB-RI identifies and sorts buckets by metadata, such as insertion time. However, in Time-series scenarios, sorting by insertion time is not necessarily equal to sorting by data-generation time, as data could be delayed or even ingested disorderly. Access Structures and Layered Bucketing Sensor readings, containerized in buckets as explained in Section 4.1 and in Figure 1, are hash-distributed and grouped, according to time, in so-called MongoDB collections. More precisely, a MongoDB collection, containing a set of documents sorted by a B-Tree, is intended to keep the data produced in a given month, and in a specific year. For example, as it can be seen in Figure 1, the bucket containing the readings of sensor 0001 for the day 15 February 2020 is classified in the collection Month 2000_02. This bucketing approach intrinsically enables, on the one hand, the possibility of performing efficient lazy-querying, eventually performing several small queries (one per bucket) instead of a big one (whole database query). Moreover, querying can performed by means of chained queries, so, performing several time-consecutive queries, relieving the system from searching or holding data that is not yet needed. On the other hand, when querying speed is crucial, this bucketing approach also enables efficient parallel querying, as data is already naturally grouped, being able to perform several queries to different buckets at the same time. Last, benefiting from both the decaying and immutability properties of Time-series (Section 2.2), this bucketing approach allows the natural compaction of already filled-up buckets, that are not likely to be updated, which also have less possibilities of being queried. Using a more granular bucket distribution, such as grouping data by its generation day, instead of its generation month, while tempting for high-granularity data, is currently discarded in this approach, but subject to future re-considerations. This is due to the fact that MongoDB's WiredTiger Storage Engine requires the Operative System to open two files per collection, plus one per each additional index, which could overwhelm the Operative System's open files table. This is, actually, a recurrent problem found in InfluxDB, which makes it necessary for database administrators to apply patches, for example with the ulimit command. However, as NagareDB is intended to be a fastdeploying and resource-compromised solution, this self-imposed limitation was preferred. By contrast, MongoDB-RI's strategy is to keep data stored as a whole, accessing it via a single B-tree. However, this B-tree is intended to be kept in RAM, independently from the time range of the query to be performed. While this benefits efficiency, it potentially misallocates RAM resources. Conversely, the approach proposed in this research intends to save resources, by selectively loading and replacing small indexes based on the time-range of the queries, following a Least Recently Used approach. Retention Policies Retention policies are crucial in Time-Series databases, as the amount of data to be kept is limited by the available resources. Concretely, retention policies describe for how long a record needs to be stored in the system. In order to tackle this problem, NagareDB proposes a flexible retention policy strategy, with the aim of finding a good trade-off between resource-saving and efficiency. Concretely, the flexible retention policy is configured with a maximum and a minimum retention time. Thus, data will be eventually bulk-deleted in some point in between the minimum and the maximum allowed time. The main advantage of this strategy is its instant and inexpensive bucket delete operations. For instance, if the retention time is set in terms of months (so, the number of buckets), the oldest data could be deleted as a whole, by dropping the monthly bucket. By contrast, in fixed retention policy strategies, such as the ones of MongoDB-RI or InfluxDB, when a new record is received, the oldest one is removed, meaning that each insert operation is potentially triggering an implicit delete operation, which reduces insert performance, at the same time that overloads the system. Moreover, MongoDB's retention policy is based capped collections, and takes into account the last inserted record. However in Time-series scenarios, insertion time order is not necessarily equal to data-generation order, as data could be received disorderly. Data Types MongoDB has a wide number of available data types, which can be inherited to any specific-purpose database built on top. Concretely, but not exclusively: In spite of the wide variety of data types provided by MongoDB, decimal values are always requesting, at least, 64 bits for storage. While the usage of 64-bit double-precision decimals might be required for some high-precision scenario, more modest and resourcelimited scenarios might find a more balanced solution by limiting the number of decimals digits, using a 32-bit data type for its representation. Furthermore, this would allow users to store the same historical period of data using, theoretically, up to half of the disk storage resources or, said in another way, to keep up to two times more historical data in the same disk space. As 32-bit decimals are not implemented in MongoDB, and taking into account that one of the main goals of this research is to provide a resource-balanced solution, the proposed approach includes one further, optional, and naive data type, understood as a on-query-time limited decimal. This naive data type relies on two different data types: 32-bit signed Integer: It keeps the decimal number without the decimal point. Thus, the integer part and the fractional part of the number are stored together, without separation. BSON Document: It is a meta-data configuration document, functioning as a dictionary, that keeps, per each sensor, which is the desired maximum number of decimal digits. Also, it keeps a default setting, that will be used if no specific configuration is set, for a given sensor. This naive-but-effective approach is intended to enable the storage of decimal numbers in 32 bits, while limiting the foreseen overhead produced by the type casting. Data rounding is automatically done at insertion time, and the consequent type casting is accordingly performed during ingestion and query time. Taking into account that a 32-bit signed Integer is able to represent a maximum value of 2 31 − 1 and a minimum value of −2 31, this on-query-time limited decimal is able to represent, for example, a maximum number of 21.474, 9999, when using four decimals, given that the number of decimal digits has to be static, and each decimal digit should be able to range from 0 to 9. This self-imposed limitation is optional, and targeted to sensors with low or medium magnitude order variability. Concretely, its target scenarios are the ones in which Monitoring Infrastructures set up a retention policy, as explained in Section 4.3. For example, for resource-limited scenarios involving anomaly detection or predictive maintenance, in which real anomalies or failures rarely occur, it will likely be more relevant to keep more historical data, if the sensor data fits in this naive decimal data type, than keeping more decimal digits. Regarding It is inherited from MongoDB, providing it via shards and/or replicas. By constrast, InfluxDB is only able to grow horizontally in its commercial version. Compression MongoDB uses snappy compression by default, which intends to minimize the compression time. However, NagareDB is set up to use Zstandard compression. ZSTD is able to offer higher compression rates, while slightly reducing query performance. However, as one of the main objectives of the proposed approach is to reduce resource requirements, this option is preferred. Timestamps MongoDB's date type is limited to milliseconds. Thus, NagareDB is also limited to it. While it would be possible to create a new data type for storing nanoseconds, we consider it enough to keep up to milliseconds, as NagareDB is intended to provide a good trade-off between resources and features offer, not specifically targeting the highest demanding use cases. Conversely, InfluxDB uses nanosecond precision. This makes InfluxDB a more time-precise database, but it also implies that in a not-that-precise scenario it will keep unnecessary date information. Query Parallelization The bucketing technique explained in Section 4.2 enables intrinsic query parallelization, as data is already equally distributed in buckets. However, as NagareDB is intended to provide a good resource-outcomes compromise, query parallelization is only enabled for queries whose nature is CPU-DISK balanced, and limited to half of the available threads. For instance, queries that request a historical period will not use query parallelization, as their CPU usage is low. Conversely, queries involving data aggregation, which requires higher CPU usage, are parallelized. Time-Series Granularity and Frequency NagareDB is intended for discrete time series, with stable frequency and round timestamps, following the smooth property explained in Section 2.2. For instance, users are expected to define the baseline granularity for each sensor, and/or a default one. Thus, when receiving a reading, the timestamp will be truncated to the desired granularity. By contrast, InfluxDB does allow non-truncated timestamps, but strongly recommends to truncate them, as otherwise efficiency drops significantly. This self-imposed limitation provides extended performance, at the same time that prevents users from inefficient practices. Experimental Setup The experimental setup is intended to enable the evaluation of the performance of NagareDB in moderate-demand use cases, as well as the effects of implementing more lightweight data types, such as the one explained in Section 4.4. Concretely, the experimental set up is made against two different solutions: First, the MongoDB recommended implementation (MongoDB-RI), as a reference point. Second, InfluxDB, as it is considered the most popular Time-Series Database. Virtual Machine The experiment is conducted in a Virtual Machine (VM) that emulates a commodity PC, in accordance to NagareDB's goals, as explained in Section 1. More precisely, the VM is configured as follows: Data Set In order to generate synthetic Time-series, we simulate a Monitoring Infrastructure based on the industrial settings of some external collaborators of our institution. Concretely, it is composed of 500 sensors, equally segmented in five different monitoring areas. Each sensor is globally identified by a number between 1 and 500, and ships data every minute. Sensors readings (R) follow the trend of a Normal Distribution with mean and standard deviation : where each sensor's and are uniformly distributed. The simulation is ran until obtaining a 10-years historical period, from year 2000 until year 2009, included, so no retention policy is set. In consequence, the total amount of triples is 2,628,000,000. Other configurations, such as ones including a bigger number of sensors, are likely to provide similar results. This is due to the fact that seek times, in SSD devices, are typically a constant latency, in contrast with HDD devices. This effect makes HDD devices to be discouraged for database applications, to the extend that not even InfluxData has tested InfluxDB on them. Evaluation and Benchmarking This section demonstrates the performance of NagareDB in comparison to other database solutions, as explained in Section 5. Concretely, the evaluation and benchmarking is done in three different aspects: Storage Usage, Data Retrieval Speed, and Data Ingestion Speed. Thanks to this complete evaluation, it is possible to analyze the performance of the different solutions during the persistent data life-cycle, with regard to the database scope: From being ingested, to being stored and, lately, retrieved. Storage Usage After ingesting the data, as explained in Section 5.3, the disk space usage of the different database solutions is as shown in Figure 3. On the one hand, MongoDB-RI is the implementation that requires more disk space. This could be explained due its schema-less implementation and by its snappy compression mechanisms intended to improve query performance while reducing its compression ratio, following the implications explained in Section 4.5.2. On the other hand, both InfluxDB and NagareDB-64b require the same amount of disk space, which could be explained by its shared pseudo-column oriented data representation and by its powerful compression mechanisms. Last, NagareDB-32b is able to reduce the disk usage by 40%, in comparison to both InfluxDB and NagareDB-64b, thanks to its lightweight data type, explained in Section 4.4. In consequence, NagareDB-32b is able to store, approximately, a 1.7 times bigger historical period in the same disk space. Last, NagareDB-32b is able to reduce the disk usage by 40% percent, in 538 to both InfluxDB and NagareDB-64b, thanks to its lightweight data type, e 539 section 4.4. In consequence, NagareDB-32b is able to store, approximately 540 bigger historical period in the same disk space. The testing query set is composed by 12 queries (Table 1) Data Retrieval The testing query set is composed by 12 queries (Table 1), intended to cover a wide range of use-case scenarios, while providing insights of the databases' performance and behavior. They lay in four different categories: to ensure the fairness of the results, the cache is cleaned and the databases rebooted after the evaluation of each query. Historical Querying As it can be seen in Figure 4, NagareDB is able to retrieve historical data up to 5 times faster than MongoDB-RI, while also outperforming InfluxDB in every historical query. In addition, the plotting shows some interesting insights: MongoDB is faster when retrieving small historical ranges in comparison to when retrieving big ones. Concretely, NagareDB speeds up MongoDB by 2.5 in daily queries (Q1, Q4), while doubling the speedup when requesting a larger historical period. In contrast, InfluxDB performs better when retrieving more historical data. NagareDB-32b is generally faster than NagareDB-64b, but the difference is almost negligible in this category. This is due to the fact that, while it handles smaller data, it also performs internal type castings, as explained in Section 4.4. NagareDB slightly reduces its performance when retrieving sparse data (Q7). This effect also occurs in InfluxDB, but more notoriously. As it can be seen in Figure 4, NagareDB is able to retrieve historical data up to 5 564 times faster than MongoDB-RI, while also outperforming InfluxDB in every historical 565 query. In addition, the plotting shows some interesting insights: period. In contrast, InfluxDB performs better when retrieving more historical data. 570 NagareDB-32b is generally faster than NagareDB-64b, but the difference is almost 571 negligible in this category. This is due to the fact that, while it handles smaller data, 572 it also performs internal type castings, as explained in section 4.4. Timestamped Querying Timestamped querying requests all sensor values for a given timestamp. Hence, it does not benefit from the columnar design that NagareDB and InfluxDB follow, being penalized. Thus, MongoDB, based on small buckets, is able to outperform them. However, as it can bee seen in Figure 5, NagareDB is able to outperform InfluxDB using any of its data types. Moreover, NagareDB-32b is able to provide much better performance than NagareDB-64b. This is due to the fact that the data buckets, that have to be loaded to RAM, are much smaller, with the advantage that there is only one value requested per bucket, so the data type parsing overhead is greatly reduced. Finally, it is important to take into account that this kind of query is answered fast, even in the case of InfluxDB, which shows the worst speedup. Concretely, NagareDB-32 only needs 0.065 seconds in order to answer the query. Thus, despite of the fact that MongoDB outperforms all three alternatives, the response times are still far acceptable. However, when the result consists in one single value, such as mini 594 detection queries (Q11), NagareDB is able to outperform it. 595 NagareDB-32b outperforms NagareDB-64b as NagareDB-32b is able to 596 slightly faster, without the negative impact of performing numerous t 597 Concretely, unlike when querying historical data, NagareDB-32b need 598 all the data, but it is only request to perform one type casting per eve 599 readings (R) (on the average result, in this case), as the base granulari 600 but the target granularity is hour: MongoDB-RI NagareDB-32b NagareDB-64b InfluxDB Figure 6. Aggregation querying response times Aggregation Querying Both NagareDB and InfluxDB greatly surpass MongoDB-RI. This behavior is even more notable in downsampling queries (Q9-10), as seen in Figure 6. More precisely: InfluxDB is more efficient when performing queries that involve big amounts of data, but the outcome is calculated by reducing it, such as downsampling queries. However, when the result consists in one single value, such as minimum-value detection queries (Q11), NagareDB is able to outperform it. NagareDB-32b outperforms NagareDB-64b as NagareDB-32b is able to read values slightly faster, without the negative impact of performing numerous type castings. Concretely, unlike when querying historical data, NagareDB-32b needs to process all the data, but it is only request to perform one type casting per every 60 sensor readings (R) (on the average result, in this case), as the base granularity is minute, but the target granularity is hour: Both NagareDB and InfluxDB greatly surpass MongoDB-RI. This behaviour is even 590 more notable in downsampling queries (Q9-10), as seen in Figure 6. More precisely: 591 InfluxDB is more efficient when performing queries that involve big amounts of 592 data, but the outcome is calculated by reducing it, such as downsampling queries. 593 However, when the result consists in one single value, such as minimum-value 594 detection queries (Q11), NagareDB is able to outperform it. 595 NagareDB-32b outperforms NagareDB-64b as NagareDB-32b is able to read values 596 slightly faster, without the negative impact of performing numerous type castings. 597 Concretely, unlike when querying historical data, NagareDB-32b needs to process 598 all the data, but it is only request to perform one type casting per every 60 sensor 599 readings (R) (on the average result, in this case), as the base granularity is minute, 600 but the target granularity is hour: MongoDB-RI NagareDB-32b NagareDB-64b InfluxDB Figure 6. Aggregation querying response times Inverted Querying In inverted querying, databases are asked to read the sensor values in a given range, but to only process those who meet certain conditions. In this case, databases are requested to retrieve outlier triples, as shown in Table 1. This kind of query can potentially benefit from inverted indexes. These indexing structures are meant to store a mapping from the value itself, to its location (or timestamp). Moreover, they are typically sorted, so finding the timestamps corresponding to a range of values would be rapidly answered. However, while these indexes are available in MongoDB, they are not present in InfluxDB. In despite of the exceptional performance that inverted indexes could provide, they are not included by default in NagareDB. This is due to NagareDB's goals with respect to resource-saving, as inverted indexes can require high amounts of disk space and RAM. Thus, queries on values have to scan all sensor readings in the specified time range, which is the same behavior as InfluxDB, which also lacks these indexes. Regarding its comparative speedup against MongoDB-RI, as it can be seen in Figure 7 both NagareDB and InfluxDB are able to provide a speedup greater than 10, being InfluxDB the fastest. Also, NagareDB-32b slighlty outperforms NagareDB-64b, as it has to carry out type castings only in a brief subset of the data. range, but to only process those who meet certain conditions. In this case, d 604 requested to retrieve outlier triples, as shown in Table 1. 605 This kind of query can potentially benefit from inverted indexes. The 606 structures are meant to store a mapping from the value itself, to its locati 607 tamp). Moreover, they are typically sorted, so finding the timestamps corres 608 range of values would be rapidly answered. However, while these indexes 609 in MongoDB, they are not present in InfluxDB. 610 In despite of the exceptional performance that inverted indexes could 611 they are not included by default in NagareDB. This is due to NagareDB' Figure 7. Inverted querying response times. Summary The experiments show that NagareDB is able to greatly outperform MongoDB Recommended Implementation, our baseline, in 11 out of the total 12 queries. Concretely, it extensively outperforms MongoDB when performing middle or high time-ranged queries, which are the most time-consuming ones. However, when queries involve a tiny amount of consecutive readings, for a big number of different sensors, MongoDB-RI is able to retrieve results faster. This is mainly because of its small-sized bucketing approach and its lightweight compression mechanisms. Nonetheless, as it can be seen in Table 2, these kind of queries are answered really fast, in a tenth of a second, even in a worst case scenario such as the one provided by Query 8, which turns this drawback into an unimportant obstacle for most scenarios. In comparison to InfluxDB, the most popular Time-series database, NagareDB has shown to be faster when retrieving Historical data and Timestamped data, while falling a little behind when performing Aggregation queries and Inverted Queries.. Performance Metrics and Set Up The simulation is run along with 1 to 5 ingestion jobs, each handling an equal amount of sensors, and keeping the average writes/second metric. It is performed simulating a synchronized, distributed and real-time stream-ingestion approach, meaning that sensor's data streaming is decentralized, data is stored when received, without waiting, and each write is not considered as finished until the database acknowledges its correct reception, and physically persists the Write-ahead log. Thus, this scenario intents to guarantee write operation durability while simulating an accurate real-time Monitoring Infrastructure. Results Regarding stream data ingestion, as seen in Figure 8, MongoDB-RI provides the fastest writes/second ratio. This is mainly due to two reasons: First, MongoDB-RI uses snappy compression, which provides a lighter but faster compression, in comparison to any compression technique that NagareDB or InfluxDB uses. Second, MongoDB-RI's data model follows a document-oriented data model which is, in fact, a key-value approximation, where the value is a document that stores a small bucket, considered as a small column of sensor readings according to time. Conversely, InfluxDB provides the slowest ratio in this scenario. This could be partially explained by its columnar data model design. This data model benefits batch writes to single columns (or sensors), so, it is really fast when inserting, at the same time, a lot of readings of one single sensor. However, this behavior is distant from a real-time scenario, when all sensors ship their readings altogether, and they have to be inserted at the moment. Laying in the middle, NagareDB, uses an intermediate data model: While it is using a document-oriented (so, key-value) approximation as MongoDB-RI does, it holds much bigger columns than MongoDB-RI, but not as extensive as InfluxDB. In addition, NagareDB uses ZSTD compression, which provides better compression ratio, at the expense of slightly slowing down insertion time, following NagareDB's resource-saving goals. This makes NagareDB data model a some-how hybrid between MongoDB-RI and InfluxDB, providing, thus, an intermediate performance. In addition, NagareDB-32b is able to slighly surpass NagareDB-64b, as the data types that it uses are smaller than its high-precision alternative version. Finally, all databases have demonstrated to provide an efficient scaling speedup, as they did not reach the parallel slowdown point, when adding more parallel jobs implies a speedup decay, not even with five parallel jobs. Conclusions We introduced the obstacles that users or organizations who lack from resources might face when dealing with time-series databases, as well as the requirements that a good TSBD should fulfill. In order to address this problem, and to lower the barriers to building Monitoring Infrastructures, we introduced the novel approach followed to create NagareDB, a resource-compromised and efficient Time-series database built on top of MongoDB, the most popular NoSQL open-source database. Thus, thanks to the improvements and adaptations performed in NagareDB, and to the inherent MongoDB features and popularity, NagareDB is able to satisfy all modern TSDB requirements, while being an easy-to-master solution. Concretely, our experiment results show that NagareDB is able to smoothly execute any TSDB typical query or operation, and to comfortably work in commodity PCs, consuming less disk space than MongoDB's recommended implementation, while also outperforming it in up to 377% when retrieving data. Moreover, when comparing NagareDB with TOP-tier databases, such as InfluxDB, the most popular Time-series database, our experiments show that NagareDB is able to compete against it, providing similar global query results. In addition, when ingesting real-time data, NagareDB is able to outperform InfluxDB by 35%. Furthermore, NagareDB is built on top of MongoDB's Community Edition, which is able to freely scale horizontally, while InfluxDB has this feature restricted to its commercial version, making it mandatory to follow a monolithic approach, limiting the database to one single machine. Finally, our experiments show that NagareDB is able to provide further speedup, and to reduce its storage consumption up to 40% when relaxing some requirements with regard data decimal precision, providing an even better resource-outcome trade-off. Future Work We have preliminary tested NagareDB in demanding and resource-limited real-world scenarios. We aim to improve it by working out any deficiencies we might identify, and to continue adding further optimizations and features, extensively testing them in new challenging scenarios, until releasing it as an out-of-the-box solution. Currently, its official version is only used internally, at the Barcelona Supercomputing Center, and in some projects with external collaborators. Moreover, we expected this approach to encourage more studies with regard to Monitoring Infrastructures democratization, as many small organizations could venture to improve their efficiency thanks to these kind of systems, which currently might feel intimidating. Author Contributions: C.G.C. has designed and implemented NagareDB, implemented the Mon-goDB recommended implementation following MongoDB official design approach, and performed the evaluation and benchmarking on both NagareDB and MongoDB-RI, as well as the initial version of this research paper. C.D.C. has implemented and executed the evaluation and benchmarking for InfluxDB, providing further comparison insights. Y.B.F. and F.M.C. have been supervising the research during the whole process, providing insights, corrections, reviews, and proposing best practises. They were also in charge of funding adquisition. All authors have read and agreed to the published version of the manuscript. Data Availability Statement: The dataset used for performing this benchmark, as well as the code itself, is freely available under demand. Please, reach us at nagaredb@bsc.es, and we will be glad to help you, in case you are interested in bench-marking NagareDB in your own machine or hardware ecosystem. Conflicts of Interest: The authors declare no conflict of interest.
import java.io.*; import java.util.*; public class CF_520C { public static void main(String[] args) throws IOException { InputStream in = System.in; PrintStream out = System.out; /* in = new FileInputStream("in.txt"); out = new PrintStream("out.txt"); */ Scanner sc=new Scanner(in); int n=sc.nextInt(); TreeMap<Character, Integer> tm=new TreeMap<>(); String s=sc.next(); for (int i=0;i<n;i++) tm.put(s.charAt(i),1+(tm.containsKey(s.charAt(i))?tm.get(s.charAt(i)):0) ); ArrayList<Integer> al=new ArrayList<>(tm.values()); Collections.sort(al, Collections.reverseOrder()); int mult=1; for (int i=1;i<al.size();i++) if (al.get(i).equals(al.get(0))) mult++; long mod=1000_000_007; long res=1; for (int i=0;i<n;i++) res=(res*mult)%mod; out.println(res); } }
<gh_stars>1-10 from pyspark import SparkConf, SparkContext def action_exercise(): """ simple action exercise :return: """ # TODO create spark conf conf = # TODO create a spark context sc = # TODO create a simple rdd, parallelize a list of numbers df = sc.parallelize([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) # TODO create a lambda function to get sum of all numbers in the list df.reduce( ) if __name__ == "__main__": action_exercise()
<reponame>ITBear/GpNetwork<gh_stars>0 #pragma once #include "GpEmailHeaderType.hpp" #include "../Common/Headers/GpHeaders.hpp" #include "../Common/Enums/GpEnums.hpp" namespace GPlatform { class GPNETWORK_API GpEmailHeaders final: public GpProtoHeaders { public: CLASS_DECLARE_DEFAULTS(GpEmailHeaders) TYPE_STRUCT_DECLARE("debc2d8e-4184-4de4-950f-6641c9275466"_sv) public: GpEmailHeaders (void) noexcept; GpEmailHeaders (const GpEmailHeaders& aHeaders); GpEmailHeaders (GpEmailHeaders&& aHeaders) noexcept; virtual ~GpEmailHeaders (void) noexcept override final; GpEmailHeaders& operator= (const GpEmailHeaders& aHeaders); GpEmailHeaders& operator= (GpEmailHeaders&& aHeaders) noexcept; GpEmailHeaders& Replace (const GpEmailHeaderType::EnumT aType, std::string_view aValue); GpEmailHeaders& Replace (const GpEmailHeaderType::EnumT aType, std::string&& aValue); GpEmailHeaders& Replace (const GpEmailHeaderType::EnumT aType, const u_int_64 aValue); GpEmailHeaders& Add (const GpEmailHeaderType::EnumT aType, std::string_view aValue); GpEmailHeaders& Add (const GpEmailHeaderType::EnumT aType, std::string&& aValue); GpEmailHeaders& Add (const GpEmailHeaderType::EnumT aType, const u_int_64 aValue); GpEmailHeaders& SetContentType (const GpContentType::EnumT aContentType); GpEmailHeaders& SetContentType (const GpContentType::EnumT aContentType, const GpCharset::EnumT aCharset); GpEmailHeaders& SetContentType (std::string aContentType); public: static const GpArray<std::string, GpEmailHeaderType::SCount().As<size_t>()> sHeadersNames; }; }//namespace GPlatform
<reponame>aws-samples/end-2-end-3d-ml<filename>container_inference/mm3d/predictor.py # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. from __future__ import print_function import os import io import json import tempfile import pickle import flask import torch from glob import glob from mmdet3d.apis import init_model, inference_detector prefix = '/opt/ml/' model_path = os.path.join(prefix, 'model') # A singleton for holding the model. This simply loads the model and holds it. # It has a predict function that does a prediction based on the model and the input data. """ The model artifact must contain the configuration file and the model checkpoint. The configuration file should contain absolute paths to /mmdetection3d if it points to base files. The prediction method accepts a point cloud byte stream, and returns a pickled version of the response. """ class PredictService(object): model = None # Where we keep the model when it's loaded @classmethod def get_model(cls): """Get the model object for this instance, loading it if it's not already loaded.""" if cls.model == None: device = "cuda" if torch.cuda.is_available() else "cpu" config_file = glob(f'{model_path}/*.py')[0] checkpoint_file = glob(f"{model_path}/*.pth")[0] print(f"Loading config file {config_file} from path {model_path}") cls.model = init_model(config_file, checkpoint_file, device=device) return cls.model @classmethod def predict(cls, input): clf = cls.get_model() f = io.BytesIO(input) tfile = tempfile.NamedTemporaryFile(delete=False) tfile.write(f.read()) # get inference results res, data = inference_detector(clf, tfile.name) results = {} # change torch tensors to numpy arrays results['boxes_3d'] = res[0]['boxes_3d'].tensor.detach().cpu().numpy() results['scores_3d'] = res[0]['scores_3d'].detach().cpu().numpy() results['labels_3d'] = res[0]['labels_3d'].detach().cpu().numpy() mm_result = {'result': results} return mm_result # The flask app for serving predictions app = flask.Flask(__name__) @app.route('/ping', methods=['GET']) def ping(): """Determine if the container is working and healthy. In this sample container, we declare it healthy if we can load the model successfully.""" health = PredictService.get_model() is not None # You can insert a health check here status = 200 if health else 404 return flask.Response(response='\n', status=status, mimetype='application/json') @app.route('/invocations', methods=['POST']) def transformation(): predictions = PredictService.predict(flask.request.data) result = pickle.dumps(predictions) return flask.Response(response=result, status=200, mimetype='application/octet-stream')
We now know where the Vigo County Capital Improvement Board wants to locate a new convention center. TERRE HAUTE, Ind. (WTHI) - We now know where the Vigo County Capital Improvement Board wants to locate a new convention center. The board has been looking for a home for the project since the initial idea with Indiana State University fell through. The site will be near 8th Street and Wabash Avenue, stretching from 9th Street to the east and Cherry Street to the north. The board will get two appraisals for some of the land it will need for the convention center. The rest of the land will be donated by Greg Gibson. News 10 asked Gibson what it means to be in the position to make this kind of donation in Terre Haute. "I've been thinking about making that donation for a while now, and we're doing some site investigation to make sure the site would work. I think this project is so important to the community...to downtown Terre Haute," Gibson said. A hotel will also be built, and be a combination of a renovation of the state office building and new construction. Gibson said he will partner with Dora Brothers Hotels again, as he did with the Hilton Garden Inn and Candlewood Suites. To make the convention center happen, the board will ask the city to vacate 8th Street between Wabash and Cherry Streets.
<gh_stars>0 import unittest import numpy as np from hmmlearn.hmm import MultinomialHMM class TestHMM(unittest.TestCase): def setUp(self): np.random.seed(0) def test_viterbi_case_handcraft(self): # init startprob = np.array([0.6, 0.4]) transmat = np.array([[0.7, 0.3], [0.4, 0.6]]) emissionprob = np.array([[0.1, 0.4, 0.5], [0.6, 0.3, 0.1]]) X = np.array([1,0,2,0,2,1,0,1,1]).reshape(-1,1) # hmmlearn model = MultinomialHMM(n_components=2) model.startprob_ = startprob model.transmat_ = transmat model.emissionprob_ = emissionprob y = model.predict(X) # my hmm hmm = HMM() pred = hmm.viterbi(startprob, transmat, emissionprob, X) self.assertTrue(np.array_equal(y, pred)) def test_viterbi_case_random(self): for i in range(1000): # init self.n_state = np.random.randint(1,10) self.n_output = np.random.randint(1,10) self.step = np.random.randint(1,200) p = np.random.random(self.n_state) startprob = p/p.sum() p = np.random.random((self.n_state,self.n_state)) transmat = p/p.sum(axis=1).reshape(-1,1) p = np.random.random((self.n_state,self.n_output)) emissionprob = p/p.sum(axis=1).reshape(-1,1) X = np.random.choice(self.n_output,self.step).reshape(-1,1) # hmmlearn model = MultinomialHMM(n_components=self.n_state,) model.startprob_ = startprob model.transmat_ = transmat model.emissionprob_ = emissionprob y = model.predict(X) # my hmm hmm = HMM() pred = hmm.viterbi(startprob, transmat, emissionprob, X) self.assertTrue(np.array_equal(y, pred)) class HMM: def __init__(self): pass def viterbi(self, prob, transmat, emissionprob, X): prob = np.log(prob) #  アンダーフローを防ぐために対数をとる。hmmlearnの実装もそう transmat = np.log(transmat) emissionprob = np.log(emissionprob) history = [] for i in range(len(X)): prob = prob + emissionprob[:,X[i]].ravel() if (i==len(X)-1): break # 出力はlen(X)回だが、遷移はlen(X)-1回 history.append(np.argmax(transmat.T + prob,axis=1)) # パスを保存して遷移 prob = np.max(transmat.T + prob,axis=1) s = np.argmax(prob) # 終了状態の累積確率からバックトラック開始 seq = [s] for h in history[::-1]: s = h[s] seq.append(s) return seq[::-1] if __name__ == '__main__': unittest.main()
# include <iostream> # include <fstream> # include <sstream> # include <algorithm> # include <functional> // greater<int>() # include <vector> # include <string> # include <queue> # include <stack> # include <list> # include <map> # include <set> # include <cstdio> # include <cstdlib> # include <cmath> # include <ctime> # include <cstring> # include <cctype> # include <climits> // LLONG_MAX , LLONG_MIN , INT_MAX , INT_MIN /* MACROS */ # define all(A) (A).begin() , (A).end() # define rall(A) (A).rbegin() , (A).rend() # define sz(A) (int)(A).size() # define pb push_back # define ppb pop_back # define mp make_pair # define bir first # define iki second # define rsz(A,X) (A).resize(X) # define ln(A) (int)(A).length() # define FILL(X, A) memset((X), (A), sizeof(X)) # define minof(X) min_element(all(X))-X.begin() # define maxof(X) max_element(all(X))-X.begin() # define square(X) ((X)*(X)) # define cube(X) ((X)*(X)*(X)) # define FOR(i, a, b) for (int i=a; i<b; i++) # define FORD(i, a, b) for (int i=a-1; i>=b; i--) # define REP(i,n) for( int i=0 ; i<n ; i++ ) # define REPD(i,n) for( int i=n-1 ; i>=0 ; i-- ) # define TR(it,container) \ for(typeof(container.begin()) it = container.begin(); it != container.end(); it++) # define setmap_found(container, element) (container.find(element) != container.end()) # define vector_found(container, element) (find(all(container),element) != container.end()) # define DBG(vari) cerr<<#vari<<" = "<<(vari)<<endl; //time_t st=clock(); using namespace std; /* TYPE DEFINITIONS */ typedef long long i64; typedef vector<int> vi; typedef vector<vi> vvi; typedef pair<int,int> pi; typedef vector<pi> vpi; typedef vector<vpi> vvpi; typedef vector<i64> v64; typedef vector<v64> vv64; typedef pair<i64,i64> p64; typedef vector<p64> vp64; typedef vector<vp64> vvp64; typedef vector<string> vs; typedef vector<vs> vvs; typedef vector<bool> vb; typedef vector<vb> vvb; typedef vector<char> vc; typedef vector<vc> vvc; /* TOOLS */ template <class T> inline T max(T a, T b, T c){ return max(a, max(b, c)); } template <class T> inline T min(T a, T b, T c){ return min(a, min(b, c)); } template <class T> void debug(T a,T b){ for(;a!=b;++a) cerr<<*a<<' '; cerr<<endl;} /* ALGEBRA */ template <class T> T gcd (T a, T b){ return ( b==(T)0 ?a :gcd(b,a%b) ); } template <class T> T lcm (T a , T b){ return a/gcd(a,b)*b; } template <class T> T mathround (T x){ i64 i64x=(i64)x; if( x<i64x+0.5 ) return (T)i64x; return (T)i64x+1; } template <class T> bool isprime(T x) { int till = (T)sqrt(x+.0); if (x <= 1) return 0; if (x == 2) return 1; if (x%2 == 0) return 0; for (int i=3; i<=till; i+=2) if (x%i == 0) return 0; return 1; } /* CONSTANTS */ //const double Pi = 4.0 * atan(1.0); // 4*atan(1) //int dir[8][2] = {{+1,+0},{-1,+0},{+0,+1},{+0,-1},{-1,-1},{+1,+1},{+1,-1},{-1,+1}}; //int mon[12] = {31,28,31,30,31,30,31,31,30,31,30,31}; /* {END} */ int n; int sumx; int sumy; bool tt = false; int main() { cin >> n; for (int i = 0; i < n; i++) { int x, y; cin >> x >> y; if ( ((y-x)%2+2)%2 == 1 && ((x-y)%2+2)%2 == 1) tt = true; sumx += x; sumy += y; } if (sumx%2 == 0 && sumy%2 == 0) cout << 0; else if (sumx%2 == 1 && sumy%2 == 1) cout << (tt ?1 :-1); else cout << -1; // ios_base::sync_with_stdio(false); // freopen("input.txt", "r", stdin); // needs disabled ios_base::sync_with_stdio(false); // freopen("output.txt", "w", stdout); // needs disabled ios_base::sync_with_stdio(false); // cin >> noskipws; // cin >> skipws; // cout<<clock()-st; /* getchar(); getchar(); */ return EXIT_SUCCESS; }
- Advertisement - The Kickstarter for Questionable Content Vol. 6 went live on April 11th, and has been fully funded at $55,000 four days later. Questionable Content, written by professional webcartoonist and indie music aficionado Jeph Jacques, began in 2003. Originally, it was just some silly comics about a depressed, overworked guy and a perverted robot. However, it evolved into a love letter to the hipster culture of Northampton in the early 2000s, including fashion, indie bands and coffee shops. Topatoco carries the first five volumes of Questionable Content. Volume Six covers the story from strip 1500 to 1799, which is still well behind the online daily strips (currently at 3460). QUESTIONABLE CONTENT VOL. 6 – STRETCH GOALS - Advertisement - The Kickstarter for Vol. 6 is fully funded, but has yet to reach its stretch goals. The stretch goals, set at 69, 80, and 91 thousand respectively, are for reprints of the first three books. The first three were printed in a different format; the reprints mean that they will match the newer prints. Since the Kickstarter doesn’t close for another month, it’s hard to say yet whether or not it’ll reach the stretch goals, but it looks likely. The recent Dumbing of Age Book Six Kickstarter has reached all but one of its stretch goals with 10 days to spare, and its highest stretch goal is more than double its first. These are examples of the power of crowdfunding, especially from artists as popular as Willis and Jacques. QUESTIONABLE CONTENT – WHAT NEXT? With the success of the latest Kickstarter and the impending Vol. 6, it’s worth taking a moment to realize that Jeph Jacques has been working on this comic for a stunning 14 years. It’s not his only project, either. Jacques began drawing Alice Grove in 2014, and his indie post-metal band Deathmøle has been around more or less since 2005. There’s been no indication that Jacques has any intention of ending Questionable Content any time soon. The latest story arcs open up new paths to explore, and plot points hang unresolved from past conflicts. It’s exciting, then, to think about what QC’s fifteenth birthday will bring next year – and if the print books will ever catch up to the online strips. (With five strips a week, it isn’t likely, but Jacques is bringing in almost 10k a month from Patreon alone – so who knows?) Are you sponsoring the Questionable Content Vol. 4 Kickstarter?
<filename>shared/collab-socket.d.ts declare module '@pm-react-example/shared/collab-socket' { // Editor socket action export type EditorSocketAction = CollabAction export type EditorSocketActionType = ECollabAction // Collab actions // REMEMBER: when adding enums, update the shared.js file export enum ECollabAction { COLLAB_USERS_CHANGED = 'COLLAB:USERS_CHANGED', COLLAB_CLIENT_EDIT = 'COLLAB:CLIENT_EDIT', COLLAB_SERVER_UPDATE = 'COLLAB:SERVER_UPDATE', } export type CollabAction = ICollabUsersChangedAction | ICollabEditAction | ICollabServerUpdateAction export interface ICollabUsersChangedAction { type: ECollabAction.COLLAB_USERS_CHANGED payload: { documentId: string userCount: number userId: string } } export interface ICollabEditPayload { version: number steps: { [key: string]: any }[] clientIDs: number[] } export interface ICollabEditAction { type: ECollabAction.COLLAB_CLIENT_EDIT payload: ICollabEditPayload } export interface ICollabServerUpdateAction { type: ECollabAction.COLLAB_SERVER_UPDATE payload: { cursors: any } } }
Efficacy of FODMAP Elimination and Subsequent Blinded Placebo-Controlled Provocations in a Randomised Controlled Study in Patients with Ulcerative Colitis in Remission and Symptoms of Irritable Bowel Syndrome: A Feasibility Study Background: Patients with inflammatory bowel disease (IBD) and symptoms of irritable bowel syndrome (IBS) may be intolerant to fermentable carbohydrates (FODMAPs). The aim of this study was to test the feasibility of eliminating and subsequently reintroducing FODMAPs in patients with IBS symptoms as part of the IBD manifestation and to compare the severity of IBS symptoms and pain, bloating and quality of life (QoL). Methods: An eight-week randomised open-label FODMAP elimination with double-blinded, crossover provocations of FODMAP and placebo. Diet patients were on a low-FODMAP diet for eight weeks with blinded two-week provocations after two and six weeks. Questionnaires, blood and stool samples were collected. Results: Patient enrolment was challenging. Nineteen participants were included in the study. Eliminating low FODMAP for two weeks resulted in significant decreases in pain and bloating scores (p < 0.003), whereas there were no statistical differences in pain scores between diet patients and controls. Pain and bloating scores increased, returning to baseline levels after two weeks of double-blinded provocations with placebo, (p > 0.05). Conclusions: The results document the possibility of performing a randomised controlled study following the gold standard for testing food intolerance with blinding of the Low FODMAP diet. Recruitment of participants was challenging. Introduction One-third of patients with inflammatory bowel disease (IBD) in remission have symptoms of irritable bowel syndrome (IBS). Patients with IBD typically suffer from bloating, abdominal pain, and diarrhoea or constipation, and the term IBS in IBD has been proposed to describe this clinical manifestation of IBD. Co-existence of IBS symptoms further deteriorates the quality of life (QoL) in patients with IBD, and effective treatment for relief of the symptoms is still lacking [1,. The mechanisms behind IBS in IBD are unknown, but intake of the poorly absorbable fermentable oligo-, di-, and monosaccharides and polyols (FODMAPs) such as fructose, lactose, sorbitol, and mannitol has been found to exacerbate gastrointestinal (GI) symptoms. Furthermore, a diet low in FODMAPs may reduce symptoms in patients with IBS, thus increasing QoL for these patients. However, the efficacy of FODMAP alone to reduce symptoms compared to a placebo and nocebo effect has not yet been determined due to the difficulties inherent in diet intervention studies. Previous studies have shown a reduction in symptoms in both patients on placebo and a low-FODMAP diet. The challenges related to study design consist of ensuring blinding, creating comparable study logistics compatible with patients' lives, and prolonging the study period to capture symptom variation over time. Blinding is difficult to achieve in diet intervention studies as patients often guess the blinding status by observing their food and investigating the FODMAP content. The gold standard for evaluating food intolerance is elimination, provocation, elimination, and provocation in a double-blinded and placebo controlled set-up. The efficacy of FODMAPs has not yet been tested in such a design in patients with IBS in IBD. The aim of the present study was to test the feasibility of eliminating and subsequently reintroducing FODMAPs in patients with IBS in IBD. Moreover, to compare the severity of IBS symptoms and subsequent pain, bloating and QoL in patients receiving either a FODMAP diet or placebo. Materials and Methods This study was an eight-week randomised open-label FODMAP elimination trial with double-blinded, crossover provocations of FODMAP or placebo, compared to a control group. The efficacy of low FODMAP elimination and provocation on IBS symptoms was investigated in patients with ulcerative colitis in deep remission with concurrent IBD symptoms. The study complied with the declaration of Helsinki and was conducted at the North Denmark Regional Hospital, Hjoerring, and Aalborg University Hospital in Denmark. The local data authorities and ethics committee approved the database and study protocol (N-20180005), and the study was registered at ClinicalTrials.gov (accessed on 30 January 2022) under the identifier NCT02469220. Oral and written informed consent was obtained from all participants. Participants Patients were recruited from the gastroenterology outpatient clinics at North Denmark Regional Hospital, and Aalborg University Hospital with a combined uptake area of 590,439 people. Inclusion started in July 2018 and was ended in August 2020. The inclusion criteria were age between 18 and 70 years, Ulcerative Colitis (UC) in remission, fulfilment of the ROME IV criteria for the diagnosis of co-morbid IBS, no or stable medical treatment with 5-aminosalicylic acid or biological therapy. Exclusion criteria were intake of a low-FODMAP diet within six weeks before study inclusion; atypical UC with right-sided inflammation and calprotectin >50 unless a normal colonoscopy documented remission; Clostridium difficile infection; lactose intolerance; comorbid coeliac disease or elevated transglutaminase; pregnancy; antibiotic treatment up to six weeks before inclusion; other treatment for UC than stated above; flare in UC; eating disorder; unable to follow the low-FODMAP diet for any reason; other disease than IBD explaining the IBS symptoms; medication explaining the symptoms. Patients fulfilling the above-mentioned inclusion criteria underwent sigmoidoscopy or colonoscopy if this had not been performed within the last three months. After screening, written consent was confirmed. A sample size calculation using data from a previous study in patients with IBD was performed. According to this calculation a minimum difference in the symptom score on a VAS of 2.5, a standard deviation of 2.3 with 80% power and = 0.05 a total of 45 patients, 15 in each group, were needed. Measurements The timeline of the study is illustrated in Figure 1. Included patients were randomised to the study at a 1:1:1 ratio. The randomisation code was computer generated and kept in a locked room, only accessible to the kitchen supervisor. The food supplements were delivered in a box containing information only on the randomisation number and time period. Measurements The timeline of the study is illustrated in Figure 1. Included patients were randomised to the study at a 1:1:1 ratio. The randomisation code was computer generated and kept in a locked room, only accessible to the kitchen supervisor. The food supplements were delivered in a box containing information only on the randomisation number and time period. Figure 1 shows the protocol. After randomisation, 1/3 was a watchful waiting control group, and 2/3 of patients were placed on an open label low-FODMAP diet and provoked doubleblinded with placebo and FODMAPs, respectively in a cross-over fashion. Between provocations there was a two-week low-FODMAP diet wash out. During the study, patients had three identical consultations with questionnaires, blood and stool sampling. A symptom diary was filled in daily throughout the entire study period. As illustrated in Figure 1, two groups received the low-FODMAP diet. The provocation food supplements contained either FODMAP levels typical of a Danish diet or placebo. The control group (Figure 1) underwent the same visits, phone calls, questionnaires, tests, blood tests and faecal samples, but no dietary intervention, as in the intervention group. After study completion, the control group was offered instruction in the diet if interested. All patients had three visits in the outpatient clinics after the randomisation ( Figure 1). An overview of tests and questionnaires completed in connection with the visits can be seen in Table 1. In the week before visit 1, Biopsies for histology was sampled from rectum and sigmoideum and all screening questionnaires were filled in. During visit 1, randomisation of the patients was performed, and all patients completed the remaining questionnaires electronically. Those randomised to dietary intervention were instructed by a certified dietician on how to adhere to the low FODMAP diet, and frozen food supplements were handed out for the first period. The lists of foods to be included or excluded were reviewed with the patients. One week after visit 1 and visit 2, respectively, an investigator contacted each participant by telephone to clarify any questions. The faecal samples for visits 1-3 were collected and frozen to −18° at home and carried in a thermo bag with frost elements to the hospital. Figure 1 shows the protocol. After randomisation, 1/3 was a watchful waiting control group, and 2/3 of patients were placed on an open label low-FODMAP diet and provoked double-blinded with placebo and FODMAPs, respectively in a cross-over fashion. Between provocations there was a two-week low-FODMAP diet wash out. During the study, patients had three identical consultations with questionnaires, blood and stool sampling. A symptom diary was filled in daily throughout the entire study period. As illustrated in Figure 1, two groups received the low-FODMAP diet. The provocation food supplements contained either FODMAP levels typical of a Danish diet or placebo. The control group (Figure 1) underwent the same visits, phone calls, questionnaires, tests, blood tests and faecal samples, but no dietary intervention, as in the intervention group. After study completion, the control group was offered instruction in the diet if interested. All patients had three visits in the outpatient clinics after the randomisation ( Figure 1). An overview of tests and questionnaires completed in connection with the visits can be seen in Table 1. In the week before visit 1, Biopsies for histology was sampled from rectum and sigmoideum and all screening questionnaires were filled in. During visit 1, randomisation of the patients was performed, and all patients completed the remaining questionnaires electronically. Those randomised to dietary intervention were instructed by a certified dietician on how to adhere to the low FODMAP diet, and frozen food supplements were handed out for the first period. The lists of foods to be included or excluded were reviewed with the patients. One week after visit 1 and visit 2, respectively, an investigator contacted each participant by telephone to clarify any questions. The faecal samples for visits 1-3 were collected and frozen to −18 at home and carried in a thermo bag with frost elements to the hospital. The recipes for the provocation foods complied with the low-FODMAP principles before addition of either FODMAPs or placebo. Sucrose was used as placebo as FODMAPs have a sweet taste. Blinded taste testing of the finalised provocation foods was performed by the study personnel, dietitians and kitchen staff to ensure identical smell, looks, taste and consistency. The provocation foods were added to the diets of patients, respectively: A 2 dL breakfast smoothie, 100 g low dark rye bread for lunch, and 250 g soup for dinner. Calculations of daily FODMAP intake were performed using the FODMAP calculator from Monash University. The amount of FODMAPs in a normal Danish diet was estimated among 20 randomly selected, healthy Danish volunteers, registering their food intake in detail over one week (data not shown). The amount was 30 g of FODMAPS/day. For the provocation foods, 5 g was subtracted, as this is the intake on a low-FODMAP diet. The amount of FODMAPs in the provocation foods were 25 g in total (fructose: 3.23 g, sorbitol: 2.28 g, mannitol: 0.40 g, lactose: 14.42 g, fructans: 3.58 g, galacto oligosaccharide (GOS): 0.85 g). Questionnaires The following questionnaires were filled in by the participants to assess disease severity: Mayo Score, Rome IV Criteria for IBS, Irritable Bowel Syndrome Severity Scoring System (IBS-SSS), IBS-specific Gastrointestinal Symptom Rating Scale (GSRS-IBS), Irritable Bowel Syndrome Adequate Relief (IBS-AR) and Patient Health Questionnaire (PHQ-15)-Somatisation. Finally, each participant filled in a symptom diary, starting one week before randomisation, and running throughout the study for a total of 56 days. The diary consisted of a 100 mm long visual analogue scale (VAS), to score average daily pain, maximum pain and bloating, as well as stool frequency and consistency. Two questionnaires were used to identify anxiety and depression: The Hospital Anxiety and Depression Scale (HADS) and the Visceral Sensitivity Index (VSI)-GI specific anxiety. Health-related quality of life was measured with Short Form 36 Health Survey (SF-36). Diet registration and FODMAP food frequency registration were performed three days before each outpatient visit. Food item and amount (weight, volume) was registered for two weekdays and one day in a weekend. The questionnaire consisted of five pages of specified foods and drinks containing FODMAPs. The amount of specific FODMAPs was subsequently calculated using the FODMAP calculator. Patients were asked which provocations they thought they had received during the past two weeks and if they had experienced that the provocations decreased, increased or did not change their pain. Compliance with the provocation foods was checked by counting of the remaining food supplements after provocation. Laboratory analyses of blood samples at each of the three visits were analysed for Creactive protein, white blood cell count, total iron, transferrin, ferritin, haemoglobin (whole blood), folate, cobalamin and red blood cell volume. Faecal calprotectin was extracted using BHLMANN CALEX caps (Bhlmann Laboratories AG, Schnenbuch/Basel, Switzerland) and measured using the BLMANN fCAL turbo method. Gut microbiota analysis was performed on faecal samples as described previously. Briefly, bacterial DNA was extracted using a QIAamp PowerFecal Pro DNA kit (QIAGEN, Copenhagen, Denmark) according to manufacturer's instructions. The resulting DNA was investigated on the Illumina MiSeq platform (Illumina, San Diego, CA, USA) by 16S rRNA gene sequencing targeting the hypervariable V4 region. Outcomes The primary outcome was the numerical change in IBS-SSS score after four and eight weeks of diet and either provocation with low FODMAP (placebo provocation) or FODMAPS (FODMAP provocation), respectively. Secondary outcomes were changes in pain and bloating scores from daily symptom diaries and changes in QoL. Statistical Analysis and Bioinformatics We used numbers and percentages to present categorical variables. Continuous variables were reported as medians and interquartile ranges. Comparing the sum of the primary outcome in the two periods between groups showed no significant carry-over effects. Differences in outcomes were tested by paired Wilcoxon's signed-rank tests and Chi-squared tests as appropriate. Symptom diaries were averaged across each week, and a graphical representation of the mean difference between each week and the baseline score was presented. All analyses were performed as complete-case intention to treat analyses. A p-value below 0.05 was considered statistically significant. For microbiota data, bioinformatics was performed using an Usearch11 pipeline as previously described. For microbiota data, alpha diversity was compared using repeated-measures ANOVA, while beta diversity was investigated using principal coordinate analysis (PCoA) on the Bray-Curtis dissimilarity. All remaining statistical analyses were performed using R version 3.5.3. Results The Figure 2 shows the inclusion of patients. A total of 34% of the patients invited from the outpatient clinics completed the questionnaires. Although 31% of the patients from the outpatient clinics complied with the Rome IV criteria in the questionnaire, onethird of them did not meet the criteria at the interview due to, e.g., a flare of UC or misunderstanding of the questionnaire. Patient's demographics are shown in Table 2 (combined) and Supplementary Table S1. Results The figure 2 shows the inclusion of patients. A total of 34% of the patients invited from the outpatient clinics completed the questionnaires. Although 31% of the patients from the outpatient clinics complied with the Rome IV criteria in the questionnaire, onethird of them did not meet the criteria at the interview due to, e.g., a flare of UC or misunderstanding of the questionnaire. Patient's demographics are shown in Table 2 (combined) and Supplementary Table S1. Figure 2 shows the inclusion process. Half of the patients were found via questionnaires sent out electronically, and the other half were recruited at planned outpatient consultations. The questionnaires were not filled in by 67% of patients. Of those who completed the questionnaires, 31% fulfilled the Rome IV criteria for IBS; however, of those accepting contact, 37% did not fulfil the Rome IV criteria at the time of the interview. Feasibility of Blinding and Adherence to Low FODMAP Diet All patients on a low-FODMAP diet self-reported adherence to the low-FODMAP diet and intake of the food supplement treatment (Table 3). There was a significantly decreased intake of FODMAPs in the diet groups, but not the control group (Table 3). Patients did not guess blinding status (Table 3). Primary Endpoint There was no change in IBS-SSS scores after low FODMAP diet and placebo provocation combined (p > 0.99, Figure 3). Patients in the control group did not report change in their IBS-SSS scores from baseline either. Hence, IBS-SSS scores in all three groups (diet, diet (placebo) and control group) were comparable ( Figure 3). Primary Endpoint There was no change in IBS-SSS scores after low FODMAP diet and placebo provocation combined (p > 0.99, Figure 3). Patients in the control group did not report change in their IBS-SSS scores from baseline either. Hence, IBS-SSS scores in all three groups (diet, diet (placebo) and control group) were comparable ( Figure 3). Secondary Endpoints The symptom diary made it possible to distinguish between the effects of diet and provocations on a weekly basis. After two weeks, pain score decreased significantly with 40% (p = 0.002) and bloating score with 56% (p < 0.001) compared to baseline in the low-Nutrients 2022, 14, 1296 9 of 13 FODMAP diet groups ( Figure 4A,B). However, when provoked blindly with placebo, symptoms increased and were similar to baseline levels eliminating the initial diet effect (p > 0.05, Figure 4A,B). Patients in the control group showed a decreasing trend in the median abdominal pain score of almost 50% (p = 0.22, Figure 4C), and in the bloating score by a median of 38% ( Figure 4D). These results matched improvements in the diet group while they were on the low-FODMAP diet (p = 0.92). ther placebo or FODMAPS. The lack of difference was a consequence of the provocation effects cancelling the initial dietary effect. Secondary Endpoints The symptom diary made it possible to distinguish between the effects of diet and provocations on a weekly basis. After two weeks, pain score decreased significantly with 40% (p = 0.002) and bloating score with 56% (p < 0.001) compared to baseline in the low-FODMAP diet groups ( Figure 4A,B). However, when provoked blindly with placebo, symptoms increased and were similar to baseline levels eliminating the initial diet effect (p > 0.05, Figure 4A,B). Patients in the control group showed a decreasing trend in the median abdominal pain score of almost 50% (p = 0.22, Figure 4C), and in the bloating score by a median of 38% ( Figure 4D). These results matched improvements in the diet group while they were on the low-FODMAP diet (p = 0.92). The results of the symptom diaries showed that watchful waiting in the control group resulted in a 50% pain reduction (A) and bloating reduction trend (B). In diet groups, two weeks on the open-label low-FODMAP-diet resulted in significant decreases in pain and bloating scores (C,D). However, after the subsequent two weeks provocations, pain and bloating scores returned to baseline levels regardless of the provocation placebo (p < 0.05, nocebo effect) or FODMAPs (C,D). *p < 0.05 Results for a priori defined secondary outcomes in patients on diet and provocations were similar and no clinically relevant differences were observed. There was no indication of UC flare during the study period as calprotectin levels remained normal for all patients. Blood sample analysis did not show clinically relevant changes. In gut microbiota measures, variations were observed across all groups during the study period, although interpersonal variations superseded the effects of treatment. No treatment-specific effect was observed (Supplementary Figure S1). Results for a priori defined secondary outcomes in patients on diet and provocations were similar and no clinically relevant differences were observed. There was no indication of UC flare during the study period as calprotectin levels remained normal for all patients. Blood sample analysis did not show clinically relevant changes. In gut microbiota measures, variations were observed across all groups during the study period, although interpersonal variations superseded the effects of treatment. No treatment-specific effect was observed (Supplementary Figure S1). Discussion This feasibility study is the first randomised study of low-FODMAP elimination with subsequent placebo-controlled, double-blinded cross-over provocation related to symptoms of patients with IBS in IBD in deep remission. Recruitment was challenging. The blinding of the provocation foods was effective. This study followed the gold standard of testing for food intolerance. The FODMAP elimination followed by randomised subsequent doubleblinded, cross-over reintroduction of FODMAPs was additionally compared to a control group to estimate the effect of participation in the study procedures alone without any intervention. This enabled us to estimate the efficacy of the diet alone with a control group undergoing the same non-diet interventions as the diet groups. The two-week duration of each elimination and provocation of FODMAPs/placebo has, in previous studies, been shown to be an adequate duration for the diet to show efficacy and provocations to provoke symptoms. The two RCTs previously published on the effect of low-FODMAP diet in IBD-IBS overlap were of 1 to 14 days in duration, which is problematic as IBS symptoms vary over time. An optimal study would run over months or even years due to symptom fluctuations in IBS. However, the longer the duration of a strict diet and frequent hospital visits, the higher the risk of dropouts, non-compliance, and risk of poor generalisability as patients willing to continue participation could be less and less comparable to the average patient with IBD. The choice of two weeks of diet or provocation was a compromise to optimise the proportion of patients completing the study and adhering to the low FODMAPdiet. In the control group we chose "no treatment"/"watchful waiting", as the alternative. "Open-label placebo" has previously shown some efficacy on symptoms, which we aimed to minimise. Still, the "no treatment/watchful waiting" in the control group was highly efficacious to decrease symptoms, suggesting a high placebo effect. We followed the gold standard for testing food intolerance by eliminating and subsequently provoking in a blinded fashion as described above. The amount of FODMAPs in the blinded provocation (25 g per day) was chosen to reflect the amount in a typical Danish diet, which is high compared to what has previously been reported from Australian (16 g per day) and American diets (12 g per day). The amount of FODMAPs in provocations fitted well with the amount diet patients had ingested before entering the study (20 g per day). In the study we provoked with a balance of all six types of FODMAPs to better mimic the real-life situation. Previous studies have provoked only with one or two FODMAPs, which rarely resembles a normal diet. The identical taste, look, scent and consistency of the provocation food supplements, efficiently prevented study personnel and patients from guessing the blinding status, which has previously been a problem in low-FODMAP diet studies. Using food supplements instead of supplying all foods for the study allowed patients to live as normally as possible during participation in the study, to eliminate asocial eating behaviour to affect results. In patients on an open-label low-FODMAP diet, abdominal pain scores decreased by 40% and bloating scores by 56%; this was similar to controls on watchful waiting. Provocations for two weeks with either placebo or FODMAPs reverted pain and bloating scores to baseline levels. Patient recruitment problems resulted in a small sample size; 19 patients were included versus the calculated 45. Firstly, many patients chose not to complete the questionnaires, possibly resulting in selection bias, which could impair generalisability. Secondly, many patients with IBS according to questionnaire answers, actually experienced a flare or did not fulfil the ROME IV criteria when interviewed, resulting in a very low percentage of IBS in IBD in this study compared to previous data. Among screened patients where both doctor and patient assessed those patients to be in remission, 8% had a flare documented at endoscopy. This calls for increased use of endoscopy in patients with UC experiencing abdominal pain or bloating but normal non-bloody stools. Finally, as recruitment proved extremely difficult in this study, the primary focus changed to study feasibility and the final sample size reflected the largest feasible sample size at our two study hospitals and a crossover design was used to maximise power. Further, the primary outcome was changed to IBS-SSS as this is the gold standard scale for IBS symptoms. Recalculation of adequate sample size was not performed a priori, as the feasibility of simply completing the trial was the main goal. Additionally, within-person standard deviations were not available at the time. The initial power calculation for this RCT study was designed for a parallel trial with symptom diary as the primary outcome using data from a previous study in patients with IBD. According to this calculation, a minimum difference in the symptom score on a VAS of 2.5, a standard deviation of 2.3 with 80% power and = 0.05, 45 patients were needed for a parallel study. However, our study had a within-person SD of 48.4. Therefore, to detect a minimally relevant difference of 50% as recommended for IBS-SSS (i.e., 125.3 from baseline mean IBS-SSS of 247.5), a total of five patients were needed at 80% power and a two-sided alpha Level of 5%". The findings in this study regarding not significant differences in terms of blood and stool samples confirm other previous studies where no differences have been documented either. A strength of the present study was that the gold standard was used for testing food intolerance with elimination followed by blinded provocations. However, food provocations may result in a substantial nocebo response, which should be taken into account, and this may have influenced the results of our study. In addition, the number of questionnaires should be reduced, as there is a large overlap between the results of the individual questionnaires. The patient group was well defined and in deep remission, established by endoscopy, and histology before the study and calprotectin levels were monitored during the study, ensuring that the results did not reflect a new flare rather than the effect of the diet. Blood samples were analysed for C-reactive protein, white blood cell count, total iron, transferrin, ferritin, haemoglobin (whole blood), folate, cobalamin and red blood cell volume. Faeces was analysed for faecal calprotectin and bacterial DNA. There was no significant difference in any of the blood and microbiota samples except in Ferritin (p = 0.02). The paraclinical tests (calprotectin, blood and stool samples) were stable and documented continuing remission, thus not affecting the results. The existing literature on an association between low FODMAPs and microbiota changes is not unequivocal; this study showed no variations in the microbiota of study participants. Surprisingly, the average pain score in the control group decreased by almost 50%, though not statistically significant, due to the low number and high variability of study participants. However, as these patients attended the same outpatient visits and completed the same questionnaires as the diet group, a placebo response was to be expected. Moreover, placebo responses up to 80% in short-lasting IBS trials have previously been observed. This could not be explained by the controls adhering to the low FODMAP principles (monitored during the study), which constitutes a major challenge when planning future diet studies. The open-label low-FODMAP diet resulted in a symptom decrease, which reflects either a dietary or a placebo effect. The placebo provocation also revealed a large nocebo effect. The nocebo effect resulting from a placebo provocation has previously been observed by Biesierski et al. and should be considered in future studies. Provocation with FODMAPs elicited the same increase in symptoms as placebo provocation. The primary endpoint, which was the combined effect of elimination and subsequent provocation with FODMAPs, documented no change in IBS-SSS score, masking the pain decrease by the diet alone. In future studies, it will be necessary to measure an IBS-SSS score the day before starting the provocations. Conclusions This feasibility study provides insight into how to plan future diet studies. The results of the study document the possibility of performing a RCT following the gold standard for blindly testing food intolerance with the low-FODMAP diet. This feasibility study provides insight into how a future study such as this can be constructed. The results of this blinded FODMAP RCT study in patients with UC in remission and comorbid symptoms of IBD suggested that placebo and nocebo responses explained the symptom dynamics when eliminating and subsequently provoking with FODMAPs. Supplementary Materials: The following supporting information can be downloaded at: https://www.mdpi.com/article/10.3390/nu14061296/s1, Figure S1: Gut microbiota of the different patient groups. Institutional Review Board Statement: The local data authorities and local Ethical Committee approved the database and study protocol (N-20180005), and the study was registered at ClinicalTrials.gov (accessed on 30 January 2022) under the identifier NCT02469220. Oral and written informed consent was obtained from all participants. Informed Consent Statement: Informed consent was obtained from all subjects involved in the study. Data Availability Statement: The data presented in this study are available on request from the corresponding author.
Image caption The house was built in 1859 for Sir Richard Hall Say, who became the High Sheriff of Berkshire in 1864 A gothic country house in Berkshire that was used as a set for Hammer Horror films has gone up for sale. Oakley Court in Bray was also used as Frank N Furter's castle in The Rocky Horror Picture Show in 1975 and in The Belles of St Trinian's in 1954. The former private residence, now a hotel, was a popular film set because of its close proximity to Bray Studios. Sellers Oakley Court Hotel Ltd declined to state the asking price but said the property had gone into administration. The house was built in 1859 for Sir Richard Hall Say, who became the High Sheriff of Berkshire in 1864. Although built in the Victorian period, its gothic architecture made it a popular film set for the Hammer Horror films, which were produced at Bray Studios near Maidenhead. The main entrance became the entrance to Castle Meinster in The Brides of Dracula in 1960, it became Hamilton Manor in The Plague of the Zombies in 1966 and as a school for girls in Nightmare in 1962-3. More recently it was home to several members of Team GB during the London 2012 Olympic Games. Selling agent Julian Troup of Colliers International said he was "anticipating a considerable amount of interest from a variety of different buyers, both in the UK and from abroad".
A Cross-Modal Learning Approach for Recognizing Human Actions Since a large number of videos have sprung up, the human-centric visual understanding tasks especially human action recognition (HAR) is under great demand. The key issue of the HAR is to capture spatial-temporal collaborative representations by mining abstract and discriminative high-level features. In this article, we propose a cross-modal learning framework that mainly includes an alignment net and a fusion net to improve the performance of the HAR. First, extracted different modal information is mapped into a common subspace to align, which compensates for the spatial-temporal discrepancies. Then, the aligned features are further fused to generate complementary, correlated, and consistent representations. Finally, the learnt features are input to the classifier for recognition. The experimental results have shown that our proposed approach can outperform several state-of-the-art baseline approaches.
TIER REIT, Inc. TIER, +0.50% will release its fourth quarter 2018 financial results on Monday, February 11, 2019, after the market closes. A conference call will be held on Tuesday, February 12, 2019, at 11:00 a.m. Eastern time/10:00 a.m. Central time. Callers in the US or Canada may join the conference call by dialing 877.407.0789. TIER REIT, Inc. is a publicly traded TIER, +0.50% self-managed, Dallas-based real estate investment trust focused on owning quality, well-managed commercial office properties in dynamic markets throughout the U.S. TIER REIT’s vision is to be the premier owner and operator of best-in-class office properties in TIER1 submarkets, which are primarily higher density and amenity-rich locations within select, high-growth metropolitan areas that offer a walkable experience to various amenities. Our mission is to provide unparalleled, TIER ONE Property Services to our tenants and outsized total return through stock price appreciation and dividend growth to our stockholders.
/* * * Copyright 2014 http://Bither.net * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ package net.bither.viewsystem; import net.bither.Bither; import net.bither.BitherSetting; import net.bither.bitherj.BitherjSettings; import net.bither.bitherj.core.Address; import net.bither.bitherj.core.AddressManager; import net.bither.implbitherj.BlockNotificationCenter; import net.bither.platform.listener.GenericQuitEventListener; import net.bither.platform.listener.GenericQuitResponse; import net.bither.preference.UserPreference; import net.bither.utils.ImageLoader; import net.bither.utils.LocaliserUtils; import net.bither.viewsystem.base.DisplayHint; import net.bither.viewsystem.base.ViewEnum; import net.bither.viewsystem.base.Viewable; import net.bither.viewsystem.components.ScrollBarUIDecorator; import net.bither.viewsystem.froms.MenuBar; import net.bither.viewsystem.froms.SingleWalletForm; import net.bither.viewsystem.panels.WalletListPanel; import net.bither.viewsystem.themes.Themes; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import javax.swing.*; import java.awt.*; public class MainFrameUI { private MainFrame frame; private WalletListPanel walletsView; private ViewFactory viewFactory; private JSplitPane splitPane; // private BitherTabbedPane viewTabbedPane; private JPanel headerPanel; private JPanel devidePanel; private net.bither.viewsystem.froms.MenuBar menuBarFrom; private JScrollPane scrollPane; private static final Logger log = LoggerFactory.getLogger(MainFrameUI.class); final private GenericQuitEventListener quitEventListener; public static final GenericQuitResponse bitherFrameQuitResponse = new GenericQuitResponse() { @Override public void cancelQuit() { log.debug("Quit Canceled"); } @Override public void performQuit() { log.debug("Performed Quit"); } }; public MainFrameUI(MainFrame frame, GenericQuitEventListener quitEventListener) { this.frame = frame; this.quitEventListener = quitEventListener; viewFactory = new ViewFactory(); } public JPanel getDevidePanel() { return devidePanel; } public WalletListPanel getWalletsView() { return walletsView; } public ViewFactory getViewFactory() { return viewFactory; } public void initUI(ViewEnum initialView) { Container contentPane = frame.getContentPane(); contentPane.setLayout(new GridBagLayout()); contentPane.setBackground(Themes.currentTheme.detailPanelBackground()); GridBagConstraints constraints = new GridBagConstraints(); GridBagConstraints constraints2 = new GridBagConstraints(); headerPanel = new JPanel(); headerPanel.setOpaque(true); headerPanel.setBackground(Themes.currentTheme.detailPanelBackground()); headerPanel.setLayout(new GridBagLayout()); headerPanel.applyComponentOrientation(ComponentOrientation.getOrientation(LocaliserUtils.getLocale())); menuBarFrom = new MenuBar(); // Set the application icon. ImageIcon imageIcon = ImageLoader.createImageIcon(ImageLoader.BITHER_ICON_FILE); if (imageIcon != null) { frame.setIconImage(imageIcon.getImage()); } constraints2.fill = GridBagConstraints.BOTH; constraints2.gridx = 0; constraints2.gridy = 0; constraints2.gridwidth = 1; constraints2.gridheight = 1; constraints2.weightx = 1000.0; constraints2.weighty = 1.0; constraints2.anchor = GridBagConstraints.LINE_START; headerPanel.add(menuBarFrom.getPanelMain(), constraints2); constraints.fill = GridBagConstraints.BOTH; constraints.gridx = 0; constraints.gridy = 0; constraints.gridwidth = 2; constraints.weightx = 1.0; constraints.weighty = 1.0; constraints.anchor = GridBagConstraints.LINE_START; contentPane.add(headerPanel, constraints); devidePanel = new JPanel(); // fill1.setOpaque(false); Dimension dimension = new Dimension(1000, 1); devidePanel.setPreferredSize(dimension); devidePanel.setMinimumSize(dimension); devidePanel.setMaximumSize(dimension); devidePanel.setBackground(new Color(0xd1d1d1)); constraints.fill = GridBagConstraints.BOTH; constraints.gridx = 0; constraints.gridy = 1; constraints.gridwidth = 0; constraints.weightx = 1.0; constraints.weighty = 1.0; constraints.anchor = GridBagConstraints.CENTER; contentPane.add(devidePanel, constraints); // Create the wallet list panel. walletsView = new WalletListPanel(); BlockNotificationCenter.addBlockChange(walletsView); JPanel viewTabbedPane;//= new JPanel(new BorderLayout()); // Add the transactions tab. if (UserPreference.getInstance().getAppMode() == BitherjSettings.AppMode.COLD) { // JPanel transactionsOutlinePanel = new JPanel(new BorderLayout()); Viewable coldWalletView = viewFactory.getView(ViewEnum.COLD_WALLET_VIEW); viewTabbedPane = coldWalletView.getPanel(); } else { Viewable transactionsView = viewFactory.getView(ViewEnum.TRANSACTIONS_VIEW); viewTabbedPane = transactionsView.getPanel(); } // viewTabbedPane.setBackground(ColorAndFontConstants.BACKGROUND_COLOR); GridBagLayout gridBagLayout = new GridBagLayout(); JPanel rightPanel = new JPanel(gridBagLayout); rightPanel.setOpaque(true); rightPanel.setBackground(Themes.currentTheme.detailPanelBackground()); GridBagConstraints rightContraints = new GridBagConstraints(); rightContraints.fill = GridBagConstraints.BOTH; rightContraints.gridx = 0; rightContraints.gridy = 0; rightContraints.gridwidth = 1; rightContraints.gridheight = 1; rightContraints.weightx = 1.0; rightContraints.weighty = 1.0; rightContraints.anchor = GridBagConstraints.LINE_START; rightContraints.insets = new Insets(0, 5, 0, 0); rightPanel.add(viewTabbedPane, rightContraints); // Create a split pane with the two scroll panes in it. scrollPane = new JScrollPane(JScrollPane.VERTICAL_SCROLLBAR_AS_NEEDED, JScrollPane.HORIZONTAL_SCROLLBAR_AS_NEEDED); scrollPane.setViewportView(walletsView); scrollPane.setViewportBorder(BorderFactory.createEmptyBorder()); scrollPane.setBorder(BorderFactory.createEmptyBorder()); scrollPane.getViewport().setBackground(Color.WHITE); scrollPane.getHorizontalScrollBar().setUnitIncrement(BitherSetting.SCROLL_INCREMENT); scrollPane.getVerticalScrollBar().setUnitIncrement(BitherSetting.SCROLL_INCREMENT); scrollPane.getViewport().setOpaque(true); scrollPane.setComponentOrientation(ComponentOrientation.getOrientation(LocaliserUtils.getLocale())); ScrollBarUIDecorator.apply(scrollPane, false); if (ComponentOrientation.LEFT_TO_RIGHT == ComponentOrientation.getOrientation(LocaliserUtils.getLocale())) { splitPane = new JSplitPane(JSplitPane.HORIZONTAL_SPLIT, scrollPane, rightPanel); } else { splitPane = new JSplitPane(JSplitPane.HORIZONTAL_SPLIT, rightPanel, scrollPane); splitPane.setResizeWeight(1.0); } splitPane.setDividerSize(3); splitPane.setBackground(Themes.currentTheme.text()); splitPane.setBorder( BorderFactory.createMatteBorder( 1, 0, 1, 0, Themes.currentTheme.text() )); splitPane.setOneTouchExpandable(false); splitPane.setBorder(BorderFactory.createMatteBorder(0, 0, 1, 0, SystemColor.windowBorder)); // splitPane.setBackground(ColorAndFontConstants.BACKGROUND_COLOR); splitPane.setOpaque(true); constraints.fill = GridBagConstraints.BOTH; constraints.gridx = 0; constraints.gridy = 2; constraints.gridwidth = 2; constraints.gridheight = 1; constraints.weightx = 1.0; constraints.weighty = 1000.0; constraints.gridwidth = 1; constraints.anchor = GridBagConstraints.LINE_START; contentPane.add(splitPane, constraints); calculateDividerPosition(); // Cannot get the RTL wallets drawing nicely so switch off adjustment. splitPane.setEnabled(ComponentOrientation.LEFT_TO_RIGHT.equals(ComponentOrientation.getOrientation(LocaliserUtils.getLocale()))); } public void updateHeaderOnSwingThread(final long estimatedBalance) { String titleText = LocaliserUtils.getString("bitherframe_title"); frame.setTitle(titleText); } public void calculateDividerPosition() { int dividerPosition = SingleWalletForm.calculateNormalWidth(walletsView) + BitherSetting.WALLET_WIDTH_DELTA; // if (walletsView.getScrollPane().getVerticalScrollBar().isVisible()) { // dividerPosition += BitherSetting.SCROLL_BAR_DELTA; // } if (walletsView != null && walletsView.getPreferredSize() != null && walletsView.getPreferredSize().width > dividerPosition) { dividerPosition = walletsView.getPreferredSize().width; } if (ComponentOrientation.RIGHT_TO_LEFT == ComponentOrientation.getOrientation(LocaliserUtils.getLocale())) { int width = frame.getWidth(); if (width == 0) { width = (int) frame.getPreferredSize().getWidth(); } dividerPosition = width - dividerPosition; // - WalletListPanel.LEFT_BORDER - WalletListPanel.RIGHT_BORDER - 2; } splitPane.setEnabled(true); splitPane.setDividerLocation(dividerPosition); splitPane.setEnabled(ComponentOrientation.LEFT_TO_RIGHT.equals(ComponentOrientation.getOrientation(LocaliserUtils.getLocale()))); } public void recreateAllViewsOnSwingThread(final boolean initUI, ViewEnum initialView) { // Close down current view. if (Bither.getCoreController().getCurrentView() != ViewEnum.UNKNOWN_VIEW) { frame.navigateAwayFromView(Bither.getCoreController().getCurrentView()); } if (initUI) { Container contentPane = frame.getContentPane(); viewFactory.initialise(); contentPane.removeAll(); initUI(null); try { frame.applyComponentOrientation(ComponentOrientation.getOrientation(LocaliserUtils.getLocale())); } catch (ClassCastException cce) { cce.printStackTrace(); } } //statusBar.refreshOnlineStatusText(); updateHeader(); // Tell the wallets list to display. if (walletsView != null) { walletsView.displayView(DisplayHint.COMPLETE_REDRAW); } } public MenuBar getTickerTablePanel() { return menuBarFrom; } /** * Actually update the UI. * (Called back from the FireDataChangedTimerTask). */ public void fireDataChangedOnSwingThread(DisplayHint displayHint) { updateHeader(); // Update the password related menu items. updateMenuItemsOnWalletChange(); // Tell the wallets list to display. if (walletsView != null) { walletsView.displayView(displayHint); } // Tell the current view to update itself. Viewable currentViewView = viewFactory.getView(Bither.getCoreController().getCurrentView()); if (currentViewView != null) { currentViewView.displayView(displayHint); } } private void updateMenuItemsOnWalletChange() { } public void updateHeader() { if (UserPreference.getInstance().getAppMode() == BitherjSettings.AppMode.COLD) { return; } long finalEstimatedBalance = 0; for (Address address : AddressManager.getInstance().getAllAddresses()) { finalEstimatedBalance = finalEstimatedBalance + address.getBalance(); } if (AddressManager.getInstance().getHdAccount() != null) { finalEstimatedBalance = finalEstimatedBalance + AddressManager.getInstance().getHdAccount().getBalance(); } final long total = finalEstimatedBalance; if (EventQueue.isDispatchThread()) { updateHeaderOnSwingThread(total); } else { SwingUtilities.invokeLater(new Runnable() { public void run() { updateHeaderOnSwingThread(total); } }); } } public void focusableUI() { if (UserPreference.getInstance().getAppMode() == BitherjSettings.AppMode.HOT) { updateHeader(); } calculateDividerPosition(); } //todo Entering for the first time vericalScrollbar errors public void clearScroll() { SwingUtilities.invokeLater(new Runnable() { @Override public void run() { scrollPane.getVerticalScrollBar().setValue(0); } }); } }
The average hourly wage is just $9.70 an hour, according to the Labor Department. For those in the industry who work full-time, this amounts to roughly $20,000 a year. Many health care aides only work part-time though—and they do not receive benefits. Under these conditions, it's no surprise then that about 40% of home aides rely on public assistance, such as Medicaid and food stamps, just to get by. It's the job of the future! And it's terrible! It's low-wage, low-tech, long hours, in most cases it's not covered by minimum wage or overtime protections, and it's projected to grow by 70 percent between 2010 and 2020. That's right: It's home health care work.Oh, and it's common for home health care aides, like other domestic workers, to be paid less than minimum wage . The industry that exists to skim profits off the hard, low-wage labor of this overwhelmingly female, majority minority, heavily immigrant workforce insists that things couldn't be any different, that if home health care workers got overtime, their hours would be cut, leaving them taking home even less than their current poverty income and the disabled and elderly people they care for vulnerable and without adequate care. Which is one of those signs that maybe this is an industry that needs to be radically rethought. Because "this is a needed and important job" and "this job is only worth poverty wages" should be mutually exclusive statements. Besides which, when 40 percent of people in a given job are on Medicaid or food stamps, taxpayers are covering part of the wages those people should be being paid. It's just that they should get that money—which they have earned, and then some—in their paychecks and on their benefit statements, not as public assistance subject to the constant attacks of Republican politicians and requiring the work of applying and proving they're eligible. And when this is the fastest-growing job in our economy, and the Obama administration's attempts to make it subject to minimum wage and overtime protections are met with fierce opposition, that should be a concern to everyone who has to work for a living. Because don't think it won't drag other jobs down with it.
Anionic Lipid Interaction Induces Prion Protein Conformational Change The conversion of the prion protein (PrP) to the pathogenic PrPScconformation plays a central role in prion disease. However, the precise mechanism underlying this process remains unclear. Here, we report the conformational conversion of PrP upon interaction with anionic lipids. After the discontinuous iodixanol density gradient centrifugation, we found strong binding between PrP and negatively charged phospholipids, involving both electrostatic and hydrophobic interactions. Under physiologically relevant conditions, interactions with lipid were sufficient to convert fulllength, helices rich recombinant mouse PrP to a conformation similar to PrPSc, with increased sheet content and a PrPSclike proteinase K (PK)resistant pattern. Conversion is greatly influenced by lipid headgroup structures and lipid vesicle compositions. When lipid vesicles are disrupted by detergent, aggregation is necessary to maintain the PK resistant conformation. Our results imply that the strong lipidPrP interaction is sufficient to overcome the energy barrier between the two conformational states and support the notion that lipid membrane may play a role in PrP conformational change.
It was reported in Isaac Asimov’s Book of Facts that following the Civil War, Jefferson Davis was captured and indicted for treason. Though many Northerners called for Davis’ execution, one group petitioned for his release. The group? The former slaves from Davis’ cotton plantation. Following a two-year imprisonment, Davis was freed, having avoided a trial. In the above anecdote from the Civil War era, we see both grace and mercy at work on Davis’ behalf, being extended by his former slaves. A basic explanation of the concepts of mercy and grace is as follows: Grace is receiving something beneficial that we don’t deserve and mercy is not receiving some negative consequence for what we do deserve. Whether these former slaves were followers of Christ or not, they demonstrated the same kind of mercy and grace that God has displayed toward us through Christ Jesus – and the same kind of grace that we are to extend to others. Through our own sin, we deserve the wrath of God. Through the mercy and grace demonstrated on our behalf through Jesus’ death and resurrection, we have been forgiven and freed from the penalty of sin – and beyond that – we have received the great blessings of being adopted into the family of God. In return, God asks us to extend mercy and grace to others (see Matthew 6:14-15) As Christians, we talk a lot about the concepts of mercy and grace. But the truth of the matter is that we show whether or not we really understand mercy and grace through our lives. The bottom line is this: Do we extend mercy and grace to others? If not, according to Jesus, we really don’t get it. Has someone mistreated you? Have you suffered injustice from someone or some institution in our society? If so, I urge you – as difficult as it may be – extend God’s mercy and grace: forgive even as the Lord has forgiven you. 1. What do you find most difficult about forgiving others? What difference does God’s forgiveness in your own life make in your ability to forgive others? 2. Who can you extend mercy and grace to today?
<gh_stars>1-10 /** * The package contains JidePopup class for JIDE Common Layer. */ package com.jidesoft.popup;
[prMac.com] Frankfurt, Germany - Eltima Software really got into the holiday spirit and is now offering a hot deal with Eltima X-Mas Slot Machine. This slot machine though doesn't require you to spend any money at all, so basically everyone's a winner. The promo lets players get huge discounts for any product they've been eyeing. Everyone is welcome to try for as many apps as they want! Everyone can test their luck and gamble with Eltima X-Mas Slot Machine. Users can spin the slot machine and win various discounts for their favorite Mac and Windows apps without investing anything but a couple of minutes. Not only is it fun, but you may get an unbelievable discount. The Christmas Promo begins this Friday, December 23 and ends January 6. * Eltima Xmas promo is open to everyone. Every participant is given a chance to get a desirable app with discount that will vary from 11 to 73%!!! Spin the slot machine by pushing the "Start" button and get the random app discount. You will have 3 attempts (coins) for each application. * To increase your chances of winning the best discount, click the "Need more coins?" link and share the following message: "Can't stop! Gambling for the best discount for the coolest apps out there #eltimaxmas2016" on your Twitter or Facebook page. For each share on each of these platforms you'll be given 3 additional coins. * If you are happy with your discount, you can buy the app at a reduced price right away or you can play more. After the final attempt you won't be able to go back to a discount you won before. Review the full promo details to find out how you can enter. For more information on X-Mas Slot Machine promo feel free to visit Eltima Software online. Eltima Software is a global software Development Company, specializing primarily in serial communication, mobile and flash software for Mac OS and Windows. Eltima Software delivers top-notch solutions having a friendly team of 40 professionals. Copyright (C) 2016 Eltima Software. All Rights Reserved. Apple, the Apple logo and Macintosh are registered trademarks of Apple Inc. in the U.S. and/or other countries.
Service-Dominant Logic in tourism: the way to loyalty Many tourist destinations have focused heavily on attracting a growing number of new visitors each year. However, recent changes in the tourism market have led to the need for new strategies oriented towards retaining already existing visitors. Service-Dominant Logic (S-D Logic) is a new orientation that allows greater competitive advantage by recognising the active role of tourists in the creation of their own experiences. The main purpose of this paper is therefore to develop a theoretical model based on S-D Logic in order to increase levels of tourist loyalty. An in-depth analysis of foundational premises has allowed us to identify the key aspects of this process. The proposed model was empirically tested, using a structural equation model with partial least-squares technique, on a sample of 763 tourists visiting Spanish tourism destinations. Research findings have evidenced that tourist loyalty is achieved through the development of relationship quality in the co-creation of tourist experiences. These results will help destination managers to achieve a greater competitive advantage through the development of customer-centric strategies. The relevance of this article also lies in its being one of the first attempts to develop practical measures for S-D Logic by applying them to tourism.
Published: Thursday, July 24, 2014 @ 10:31 AM Updated: Thursday, July 24, 2014 @ 10:31 AM Man's best friend — whether named Fido, Rocket, Rufus, Porkchop, Spike, or whatever — might want to be your only friend. (Via Getty Images) OK, it's not that serious, but a new study shows canine companions aren't too keen on their owners giving attention to others. U.C. San Diego psychologists and professor Christine Harris found that dogs get jealous too, especially when humans give attention to other dogs. Even if they aren't real! Harris and one of her former students recorded the behavior of 36 different dogs in their own homes. The dog's owners were given a picture book, a bucket for Halloween candy and a toy dog — that barked. They were then instructed to read the book aloud and give attention to the bucket and the toy dog like actual pets. According to the study, the dogs paid very little attention to the book and the bucket but more than 75 percent of the dogs nudged and snapped at the toy dog when owners interacted with it. Harris told U.C. San Diego's news center, "Our study suggests not only that dogs do engage in what appear to be jealous behaviors but also that they were seeking to break up the connection between the owner and a seeming rival. ... It looks as though they were motivated to protect an important social relationship." Now, the study of dog behaviors similar to those of humans isn't particularly new. Time points to studies by Marc Beckoff, author and professor emeritus of ecology and evolutionary biology at the University of Colorado, Boulder, that show dogs "exhibit altruism, empathy, and a sense of justice." But Harris says the results from this study are particularly fascinating because jealousy was previously thought to require complex cognition — the kind that separates humans from other animals. While one Forbes writer calls the study's methodology "pretty hilarious" and the results "common sense," she adds the experiments "suggest that there may be some biological underpinnings for jealousy among social animals." But not everyone is convinced the findings show dogs actually feel jealousy. Alexandra Horowitz, a cognitive scientist, told The New York Times, "What can be shown is that dogs seem to want an owner’s attention when there is attention being given out; this study confirms that."