id
stringlengths 40
40
| text
stringlengths 29
2.03k
| original_text
stringlengths 3
154k
| subdomain
stringclasses 20
values | metadata
dict |
---|---|---|---|---|
9387385f9885d8ea479d0314cd1fb5ce632d315e | Stackoverflow Stackexchange
Q: How to check associations feathers-sequelize in service hook I`am using feathersjs.
I have read the documentation.
How do I execute this method to check on the service hook feathers-hook, or tell me another method of check.
const { disallow } = require('feathers-hooks-common');
function include() {
return function (hook) {
const productPrice = hook.app.service('product-prices').Model;
const currencies = hook.app.service('currencies').Model;
const edizm = hook.app.service('edizm').Model;
const pricesShema = { model: productPrice,
include: [
{
model: currencies,
attributes: ['title', 'socr']
},
]
};
const edizmShema = { model: edizm,
attributes: ['title', 'detail']
};
let association={include: [edizmShema, pricesShema]};
hook.params.sequelize = Object.assign(association,{ raw: false });
return Promise.resolve(hook);
}
}
module.exports = {
......
};
A: As explained here:
Feathers hooks work with POJO's not with DB ORM's, and your raw: false returns an ORM.
If you really can't use raw: true then convert the ORM to a POJO:
const dehydrate = require('feathers-sequelize/hooks/dehydrate');
hooks.after.find = [dehydrate(), discard('password')]
You can convert back to an ORM (if you really need to).
| Q: How to check associations feathers-sequelize in service hook I`am using feathersjs.
I have read the documentation.
How do I execute this method to check on the service hook feathers-hook, or tell me another method of check.
const { disallow } = require('feathers-hooks-common');
function include() {
return function (hook) {
const productPrice = hook.app.service('product-prices').Model;
const currencies = hook.app.service('currencies').Model;
const edizm = hook.app.service('edizm').Model;
const pricesShema = { model: productPrice,
include: [
{
model: currencies,
attributes: ['title', 'socr']
},
]
};
const edizmShema = { model: edizm,
attributes: ['title', 'detail']
};
let association={include: [edizmShema, pricesShema]};
hook.params.sequelize = Object.assign(association,{ raw: false });
return Promise.resolve(hook);
}
}
module.exports = {
......
};
A: As explained here:
Feathers hooks work with POJO's not with DB ORM's, and your raw: false returns an ORM.
If you really can't use raw: true then convert the ORM to a POJO:
const dehydrate = require('feathers-sequelize/hooks/dehydrate');
hooks.after.find = [dehydrate(), discard('password')]
You can convert back to an ORM (if you really need to).
| stackoverflow | {
"language": "en",
"length": 162,
"provenance": "stackexchange_0000F.jsonl.gz:881346",
"question_score": "4",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44593261"
} |
6ba92a22b015042df7a8eafcdfc373b5ab0984be | Stackoverflow Stackexchange
Q: Python Pandas Dataframe merge and pick only few columns I have a basic question on dataframe merge. After I merge two dataframe , is there a way to pick only few columns in the result.
Taking an example from documentation
https://pandas.pydata.org/pandas-docs/stable/merging.html#
left = pd.DataFrame({'key1': ['K0', 'K0', 'K1', 'K2'],
'key2': ['K0', 'K1', 'K0', 'K1'],
'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3']})
right = pd.DataFrame({'key1': ['K0', 'K1', 'K1', 'K2'],
'key2': ['K0', 'K0', 'K0', 'K0'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']})
result = pd.merge(left, right, on=['key1', 'key2'])
Result comes as :
A B key1 key2 C D
0 A0 B0 K0 K0 C0 D0
1 A2 B2 K1 K0 C1 D1
2 A2 B2 K1 K0 C2 D2
None
Is there a way I can chose only column 'C' from 'right' dataframe? For example, I would like my result to be like:
A B key1 key2 C
0 A0 B0 K0 K0 C0
1 A2 B2 K1 K0 C1
2 A2 B2 K1 K0 C2
None
A: result = pd.merge(left, right[['key1','key2','C']], on=['key1', 'key2'])
OR
right.merge(left, on=['key1','key2'])[['A','B','C','key1','key2']]
| Q: Python Pandas Dataframe merge and pick only few columns I have a basic question on dataframe merge. After I merge two dataframe , is there a way to pick only few columns in the result.
Taking an example from documentation
https://pandas.pydata.org/pandas-docs/stable/merging.html#
left = pd.DataFrame({'key1': ['K0', 'K0', 'K1', 'K2'],
'key2': ['K0', 'K1', 'K0', 'K1'],
'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3']})
right = pd.DataFrame({'key1': ['K0', 'K1', 'K1', 'K2'],
'key2': ['K0', 'K0', 'K0', 'K0'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']})
result = pd.merge(left, right, on=['key1', 'key2'])
Result comes as :
A B key1 key2 C D
0 A0 B0 K0 K0 C0 D0
1 A2 B2 K1 K0 C1 D1
2 A2 B2 K1 K0 C2 D2
None
Is there a way I can chose only column 'C' from 'right' dataframe? For example, I would like my result to be like:
A B key1 key2 C
0 A0 B0 K0 K0 C0
1 A2 B2 K1 K0 C1
2 A2 B2 K1 K0 C2
None
A: result = pd.merge(left, right[['key1','key2','C']], on=['key1', 'key2'])
OR
right.merge(left, on=['key1','key2'])[['A','B','C','key1','key2']]
| stackoverflow | {
"language": "en",
"length": 181,
"provenance": "stackexchange_0000F.jsonl.gz:881355",
"question_score": "11",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44593284"
} |
5651a903619767033a0e21818c34278c8e9a1f40 | Stackoverflow Stackexchange
Q: Example of bad React dangerouslySetInnerHTML? Is there an example of misuse of dangerouslySetInnerHTML in ReactJS?
Everytime I look this up, it's just someone waving their hand and saying "cross site scripting."
I've seen dangerouslySetInnerHTML used to load CSS files with a css loading npm module:
import {stylesheet, classNames} from '../static/css/styles.css'
<Head><style dangerouslySetInnerHTML={{__html: stylesheet}} /></Head>
And I'm contemplating using dangerouslySetInnerHTML for some script tags for social media share buttons that have been causing my team trouble.
Code examples and explanations of how one would go about hacking a page with XSS would be highly appreciated!
A: <span dangerouslySetInnerHTML={someTextSubmittedByAUser}></span>
Imagine if you had a comment section on your page and someone submitted a comment with:
<script>while(1){}</script>
and you just passed that as the inner HTML to some node. Now anyone who hits a page which loads that comment will have their tab lock up.
There are far more nefarious things people can do. Copying your cookies and send them to a remote server, for example.
| Q: Example of bad React dangerouslySetInnerHTML? Is there an example of misuse of dangerouslySetInnerHTML in ReactJS?
Everytime I look this up, it's just someone waving their hand and saying "cross site scripting."
I've seen dangerouslySetInnerHTML used to load CSS files with a css loading npm module:
import {stylesheet, classNames} from '../static/css/styles.css'
<Head><style dangerouslySetInnerHTML={{__html: stylesheet}} /></Head>
And I'm contemplating using dangerouslySetInnerHTML for some script tags for social media share buttons that have been causing my team trouble.
Code examples and explanations of how one would go about hacking a page with XSS would be highly appreciated!
A: <span dangerouslySetInnerHTML={someTextSubmittedByAUser}></span>
Imagine if you had a comment section on your page and someone submitted a comment with:
<script>while(1){}</script>
and you just passed that as the inner HTML to some node. Now anyone who hits a page which loads that comment will have their tab lock up.
There are far more nefarious things people can do. Copying your cookies and send them to a remote server, for example.
| stackoverflow | {
"language": "en",
"length": 164,
"provenance": "stackexchange_0000F.jsonl.gz:881416",
"question_score": "5",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44593485"
} |
9b944854eb35a3eda0b9b3124cecad8141d6d1ab | Stackoverflow Stackexchange
Q: Visual Studio 2017 web project: can't use C#7 I've created a brand new web project in Visual Studio 2017, but I can't use any C#7 features in this web project. No errors are reported in Visual Studio, but the build fails and this is verified by the output window which shows the CSC task failing.
Here is a project which reproduces the issue.
https://drive.google.com/open?id=0B-mqMIMqm_XHQkxONjRLYzA3SUk
It's an out of the box VS web project with the exception of the addition of the following code in HomeController.cs:
var i = 1_000_000;
This uses _ which is a C#7 feature. Remove this line and the build succeeds.
Non-web projects compile fine with C#7 features, and I've tracked down the difference to a different compiler being used for web projects. Non-web projects use the following compiler:
C:\Program Files (x86)\Microsoft Visual Studio\2017\Enterprise\MSBuild\15.0\Bin\Roslyn\csc.exe
Web projects use this compiler:
[Path to solution root]\packages\Microsoft.Net.Compilers.1.3.2\build..\tools\csc.exe
How can I resolve this?
A: I tried it on my machine and I had the same problem.
The solution is to install the newest Microsoft.Net.Compilers package.
For example by Tools->NuGet package Manager -> Package Manager Console
and then type:
Install-Package Microsoft.Net.Compilers
| Q: Visual Studio 2017 web project: can't use C#7 I've created a brand new web project in Visual Studio 2017, but I can't use any C#7 features in this web project. No errors are reported in Visual Studio, but the build fails and this is verified by the output window which shows the CSC task failing.
Here is a project which reproduces the issue.
https://drive.google.com/open?id=0B-mqMIMqm_XHQkxONjRLYzA3SUk
It's an out of the box VS web project with the exception of the addition of the following code in HomeController.cs:
var i = 1_000_000;
This uses _ which is a C#7 feature. Remove this line and the build succeeds.
Non-web projects compile fine with C#7 features, and I've tracked down the difference to a different compiler being used for web projects. Non-web projects use the following compiler:
C:\Program Files (x86)\Microsoft Visual Studio\2017\Enterprise\MSBuild\15.0\Bin\Roslyn\csc.exe
Web projects use this compiler:
[Path to solution root]\packages\Microsoft.Net.Compilers.1.3.2\build..\tools\csc.exe
How can I resolve this?
A: I tried it on my machine and I had the same problem.
The solution is to install the newest Microsoft.Net.Compilers package.
For example by Tools->NuGet package Manager -> Package Manager Console
and then type:
Install-Package Microsoft.Net.Compilers
| stackoverflow | {
"language": "en",
"length": 189,
"provenance": "stackexchange_0000F.jsonl.gz:881426",
"question_score": "4",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44593512"
} |
c8eee0af3e42a875bbe1a07811faa133e105c3aa | Stackoverflow Stackexchange
Q: Django: How to migrate dynamic models made at runtime In my Django app, a specific user input will result in the creation of a new model. Here is the code I am using to create the model and register it.
model = type(model_name, (ExistingModel,), attrs)
admin.site.register(model, admin_options)
from django.core.urlresolvers import clear_url_caches
from django.utils.module_loading import import_module
reload(import_module(settings.ROOT_URLCONF))
clear_url_caches()
This successfully creates the new model, however, when I click on the model to see the table on the admin page, I get the following error:
relation "ExistingModel_NewModel" does not exist
This usually means that the new model changes have not been migrated. How can I migrate dynamically created models in Django to see their corresponding data tables?
A: Subprocess can migrate your model using migrate command. So try this it will work
import subprocess
command = 'python manage.py migrate'
proc = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
stdout, stderr = proc.communicate(command)
Read also this https://code.djangoproject.com/wiki/DynamicModels If it can help for create dynamic model
| Q: Django: How to migrate dynamic models made at runtime In my Django app, a specific user input will result in the creation of a new model. Here is the code I am using to create the model and register it.
model = type(model_name, (ExistingModel,), attrs)
admin.site.register(model, admin_options)
from django.core.urlresolvers import clear_url_caches
from django.utils.module_loading import import_module
reload(import_module(settings.ROOT_URLCONF))
clear_url_caches()
This successfully creates the new model, however, when I click on the model to see the table on the admin page, I get the following error:
relation "ExistingModel_NewModel" does not exist
This usually means that the new model changes have not been migrated. How can I migrate dynamically created models in Django to see their corresponding data tables?
A: Subprocess can migrate your model using migrate command. So try this it will work
import subprocess
command = 'python manage.py migrate'
proc = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
stdout, stderr = proc.communicate(command)
Read also this https://code.djangoproject.com/wiki/DynamicModels If it can help for create dynamic model
A: A simple solution worked for me. I ended up running the makemigrations and migrate management commands like so, after creating the dynamic model:
from django.core.management import call_command
call_command('makemigrations')
call_command('migrate')
| stackoverflow | {
"language": "en",
"length": 189,
"provenance": "stackexchange_0000F.jsonl.gz:881440",
"question_score": "13",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44593541"
} |
3557d5648b0c9926f564d0dc80a68bea24496d3f | Stackoverflow Stackexchange
Q: MobaXterm - reset terminal screen I need to Reset my MobaXterm screen after some time working, however 'clear' and 'reset' commands wont work as if I was on a linux server.
Anyone have some tip?
A: When 'Paste using right-click' is checked, you still can use Ctrl/Shift + Right Click to pop up the contextual menu.
| Q: MobaXterm - reset terminal screen I need to Reset my MobaXterm screen after some time working, however 'clear' and 'reset' commands wont work as if I was on a linux server.
Anyone have some tip?
A: When 'Paste using right-click' is checked, you still can use Ctrl/Shift + Right Click to pop up the contextual menu.
A: To reset your MobaXterm screen, use:
*
*Right click on selected MobaXterm screen > Clear Scrollback (as describe above)
*Or, if the prompt is available, launch the command 'cls'
*Or, in all cases (prompt available or not), press CTRL+L keyboard keys
A: Mobaterm seems to like playing around with the putty configuration - so I found that right clicking on the window -> change terminal settings
-> features -> enable everything.
-> window -> disable push erased text into scrollback
the usual clear control commands
function clear() {
printf '\033[2J' # clear
printf '\033[H' # home
}
will now clear the screen and clear the scrollback
A: Right click MobaXterm screen > Click Clear Scrollback
Note: Your MobaXterm may be set to paste on Right-Click; in this case use Ctrl+Right-Click.
A: *
*Right click on the terminal window in MobaXterm.
*In the popup menu, click Change terminal settings...
*In the tree, click Terminal > Features
*Uncheck Disable remote-controlled clearing of scrollback
*In the tree, click Window
*Uncheck Reset scrollback on display activity
*Click the Apply button
You can now type clear and have the window clear and remove the scrollback.
| stackoverflow | {
"language": "en",
"length": 248,
"provenance": "stackexchange_0000F.jsonl.gz:881451",
"question_score": "13",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44593582"
} |
36b4000118efba7c7edb66c4956eb070d88ccdbb | Stackoverflow Stackexchange
Q: What is the difference between defaults and context options in file.managed salt state? State file.managed has defaults and context options for template rendering. Both provide context for template vars.
What is the difference between them?
A: defaults are the fallback default values that will be passed to the template in case context doesn't have a proper value. If context has a value - it will override default. E.g:
/etc/myconfig.cfg:
- file.managed:
- source: salt://myconfig.tmpl
- template: jinja
- defaults:
foo: bar
- context:
foo: baz
In this case value of foo will always be baz. Generally context is used when you need to have conditional values. E.g.:
/etc/myconfig.cfg:
- file.managed:
- source: salt://myconfig.tmpl
- template: jinja
- defaults:
foo: bar
{% if salt[grains.get]("os") == 'Debian' %}
- context:
foo: baz
{% endif %}
In this case every non-Debian system will end-up having value bar, while Debian will have baz in the template.
| Q: What is the difference between defaults and context options in file.managed salt state? State file.managed has defaults and context options for template rendering. Both provide context for template vars.
What is the difference between them?
A: defaults are the fallback default values that will be passed to the template in case context doesn't have a proper value. If context has a value - it will override default. E.g:
/etc/myconfig.cfg:
- file.managed:
- source: salt://myconfig.tmpl
- template: jinja
- defaults:
foo: bar
- context:
foo: baz
In this case value of foo will always be baz. Generally context is used when you need to have conditional values. E.g.:
/etc/myconfig.cfg:
- file.managed:
- source: salt://myconfig.tmpl
- template: jinja
- defaults:
foo: bar
{% if salt[grains.get]("os") == 'Debian' %}
- context:
foo: baz
{% endif %}
In this case every non-Debian system will end-up having value bar, while Debian will have baz in the template.
| stackoverflow | {
"language": "en",
"length": 153,
"provenance": "stackexchange_0000F.jsonl.gz:881466",
"question_score": "6",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44593631"
} |
18a650db86150278e1b116c114044f43264c8e18 | Stackoverflow Stackexchange
Q: Angular 2/4: How to restrict access to Login Route if user is already logged in? I have the following route definition.
export const Routes = RouterModule.forChild([
{
path: 'login',
component: LoginComponent
},
{
path: 'protected',
canActivate: [AuthGuardService],
component: ProtectedComponent
},
{
path: 'home',
component: HomeComponent,
canActivate: [AuthGuardService],
},
]);
I have successfully implement AuthGuardService which restrict access to a protected route if user is not logged in.
What I am trying to achieve is, if user is already logged in and accessed the Login Route, I want it to redirect to another route like homepage.
A: Were I you, I would probably simply implement another GuardService that would work in exactly opposite way to your AuthGuardService - only allow user if f.e. there is no session token in local storage. And then use it to secure login component.
export const Routes = RouterModule.forChild([
{
path: 'login',
canActivate: [AnonymousGuardService],
component: LoginComponent
},
{
path: 'protected',
canActivate: [AuthGuardService],
component: ProtectedComponent
},
{
path: 'home',
component: HomeComponent,
canActivate: [AuthGuardService],
},
]);
| Q: Angular 2/4: How to restrict access to Login Route if user is already logged in? I have the following route definition.
export const Routes = RouterModule.forChild([
{
path: 'login',
component: LoginComponent
},
{
path: 'protected',
canActivate: [AuthGuardService],
component: ProtectedComponent
},
{
path: 'home',
component: HomeComponent,
canActivate: [AuthGuardService],
},
]);
I have successfully implement AuthGuardService which restrict access to a protected route if user is not logged in.
What I am trying to achieve is, if user is already logged in and accessed the Login Route, I want it to redirect to another route like homepage.
A: Were I you, I would probably simply implement another GuardService that would work in exactly opposite way to your AuthGuardService - only allow user if f.e. there is no session token in local storage. And then use it to secure login component.
export const Routes = RouterModule.forChild([
{
path: 'login',
canActivate: [AnonymousGuardService],
component: LoginComponent
},
{
path: 'protected',
canActivate: [AuthGuardService],
component: ProtectedComponent
},
{
path: 'home',
component: HomeComponent,
canActivate: [AuthGuardService],
},
]);
| stackoverflow | {
"language": "en",
"length": 169,
"provenance": "stackexchange_0000F.jsonl.gz:881481",
"question_score": "8",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44593673"
} |
84ced22c0d7d8e727cedeeaf68477792d4e2c5ce | Stackoverflow Stackexchange
Q: How to copy over an Excel sheet to another workbook in Python I have a string with a sourcefile path and another string with a destfile path, both pointing to Excel workbooks.
I want to take the first sheet of the sourcefile and copy it as a new tab to the destfile (doesn't matter where in the destfile), then save it.
Couldn't find an easy way in xlrd or xlwt or xlutils to do this. Am I missing something?
A: This might help if you're not opposed to using Pandas
import pandas as pd
#change xxx with the sheet name that includes the data
data = pd.read_excel(sourcefile, sheet_name="xxx")
#save it to the 'new_tab' in destfile
data.to_excel(destfile, sheet_name='new_tab')
Hope it helps
| Q: How to copy over an Excel sheet to another workbook in Python I have a string with a sourcefile path and another string with a destfile path, both pointing to Excel workbooks.
I want to take the first sheet of the sourcefile and copy it as a new tab to the destfile (doesn't matter where in the destfile), then save it.
Couldn't find an easy way in xlrd or xlwt or xlutils to do this. Am I missing something?
A: This might help if you're not opposed to using Pandas
import pandas as pd
#change xxx with the sheet name that includes the data
data = pd.read_excel(sourcefile, sheet_name="xxx")
#save it to the 'new_tab' in destfile
data.to_excel(destfile, sheet_name='new_tab')
Hope it helps
A: Solution 1
A Python-only solution using the openpyxl package. Only data values will be copied.
import openpyxl as xl
path1 = 'C:\\Users\\Xukrao\\Desktop\\workbook1.xlsx'
path2 = 'C:\\Users\\Xukrao\\Desktop\\workbook2.xlsx'
wb1 = xl.load_workbook(filename=path1)
ws1 = wb1.worksheets[0]
wb2 = xl.load_workbook(filename=path2)
ws2 = wb2.create_sheet(ws1.title)
for row in ws1:
for cell in row:
ws2[cell.coordinate].value = cell.value
wb2.save(path2)
Solution 2
A solution that uses the pywin32 package to delegate the copying operation to an Excel application. Data values, formatting and everything else in the sheet is copied. Note: this solution will work only on a Windows machine that has MS Excel installed.
from win32com.client import Dispatch
path1 = 'C:\\Users\\Xukrao\\Desktop\\workbook1.xlsx'
path2 = 'C:\\Users\\Xukrao\\Desktop\\workbook2.xlsx'
xl = Dispatch("Excel.Application")
xl.Visible = True # You can remove this line if you don't want the Excel application to be visible
wb1 = xl.Workbooks.Open(Filename=path1)
wb2 = xl.Workbooks.Open(Filename=path2)
ws1 = wb1.Worksheets(1)
ws1.Copy(Before=wb2.Worksheets(1))
wb2.Close(SaveChanges=True)
xl.Quit()
Solution 3
A solution that uses the xlwings package to delegate the copying operation to an Excel application. Xlwings is in essence a smart wrapper around (most, though not all) pywin32/appscript excel API functions. Data values, formatting and everything else in the sheet is copied. Note: this solution will work only on a Windows or Mac machine that has MS Excel installed.
import xlwings as xw
path1 = 'C:\\Users\\Xukrao\\Desktop\\workbook1.xlsx'
path2 = 'C:\\Users\\Xukrao\\Desktop\\workbook2.xlsx'
wb1 = xw.Book(path1)
wb2 = xw.Book(path2)
ws1 = wb1.sheets(1)
ws1.api.Copy(Before=wb2.sheets(1).api)
wb2.save()
wb2.app.quit()
A: You could also try xlwings.
import xlwings as xw
wb = xw.Book(r'C:\path\to\file.xlsx')
sht = wb.sheets['Sheet1']
new_wb = xw.Book(r'C:\new_path\to\file.xlsx')
new_wb.sheets['name'] = sht
A: Long battle and finally got the answer.
From xlwings source code: https://github.com/xlwings/xlwings/pull/1216/files
source_sheet.range.copy(destination_sheet.range)
In other words:
wb.sheets['Sheet1'].range('A1:A6').copy(wb.sheets['Sheet2'].range('A1:A6'))
*
*It works also from one workbook to another workbook.
*A range of cells must be provided. Just ensure the range is big enough to cover the full worksheet.
*The copy function copy/paste everything withing a range of cells (values, cell format, hyperlinks, cell type, ...)
| stackoverflow | {
"language": "en",
"length": 429,
"provenance": "stackexchange_0000F.jsonl.gz:881491",
"question_score": "20",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44593705"
} |
62ed9a9542c90cd36465f7f1562389eda5c8b22c | Stackoverflow Stackexchange
Q: Intersect two arrays with tolerance I have 2 arrays of data, from which I want to extract common elements. Matlab's intersect does this job. But it returns elements that are exactly the same. What if I want to extract elements with some tolerance? Say for e.g
A = [1 2 3.0002 4.125 5.89];
B = [2 3.012 4.126]
I want to extract elements that are same up to 2 decimal places. So I want the answer to be [2 4.12] . Is there any built-in function to do this? Or How do I achieve this?
A: You can do it manually as follows. This picks the output values from A; if you want to pick from B just swap A and B in the code:
A = [1 2 3.0002 4.125 5.89];
B = [2 3.012 4.126];
tol = .01;
result = A(any(abs(bsxfun(@minus, A(:).', B(:))) < tol, 1));
| Q: Intersect two arrays with tolerance I have 2 arrays of data, from which I want to extract common elements. Matlab's intersect does this job. But it returns elements that are exactly the same. What if I want to extract elements with some tolerance? Say for e.g
A = [1 2 3.0002 4.125 5.89];
B = [2 3.012 4.126]
I want to extract elements that are same up to 2 decimal places. So I want the answer to be [2 4.12] . Is there any built-in function to do this? Or How do I achieve this?
A: You can do it manually as follows. This picks the output values from A; if you want to pick from B just swap A and B in the code:
A = [1 2 3.0002 4.125 5.89];
B = [2 3.012 4.126];
tol = .01;
result = A(any(abs(bsxfun(@minus, A(:).', B(:))) < tol, 1));
A: I would just round the input:
C = intersect(round(A,2),round(B,2))
floor and ceil are also options, depending on what you really want to achieve.
| stackoverflow | {
"language": "en",
"length": 173,
"provenance": "stackexchange_0000F.jsonl.gz:881494",
"question_score": "3",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44593710"
} |
f382ddb8e7f86e58e630719a8af92c85faaed4b7 | Stackoverflow Stackexchange
Q: Constraints altering View visibility Why does void ConstraintSet.applyToInternal(ConstraintLayout constraintLayout); changes the View visibility?
It is not preserving my View visibility. Is there a way to preserve it?
A: I just noticed that the View visibility is treated as part of the constraints. Set it with
ConstraintSet set = new ConstraintSet();
set.setVisibility(checkIcon.getId(), INVISIBLE);
set.applyTo(container);
| Q: Constraints altering View visibility Why does void ConstraintSet.applyToInternal(ConstraintLayout constraintLayout); changes the View visibility?
It is not preserving my View visibility. Is there a way to preserve it?
A: I just noticed that the View visibility is treated as part of the constraints. Set it with
ConstraintSet set = new ConstraintSet();
set.setVisibility(checkIcon.getId(), INVISIBLE);
set.applyTo(container);
| stackoverflow | {
"language": "en",
"length": 54,
"provenance": "stackexchange_0000F.jsonl.gz:881507",
"question_score": "4",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44593738"
} |
b9a14ff75bc2d0b95b677868e0866a78c69b05f4 | Stackoverflow Stackexchange
Q: What is meant by atomicity of write operations in MongoDB? In MongoDB documentation it states that write operations are atomic at the document level. What does this mean?
https://docs.mongodb.com/manual/core/data-modeling-introduction/
A: It means that you can't have atomic operations that span several documents.
Money transfer, for example. You deduct funds from one wallet, add to another. You can't do that atomically in mongodb.
In case you didn't know what "atomically" means, it is "operation either succeeds as a whole or fails as a whole. It can't have only some parts of it succeed and others fail".
| Q: What is meant by atomicity of write operations in MongoDB? In MongoDB documentation it states that write operations are atomic at the document level. What does this mean?
https://docs.mongodb.com/manual/core/data-modeling-introduction/
A: It means that you can't have atomic operations that span several documents.
Money transfer, for example. You deduct funds from one wallet, add to another. You can't do that atomically in mongodb.
In case you didn't know what "atomically" means, it is "operation either succeeds as a whole or fails as a whole. It can't have only some parts of it succeed and others fail".
| stackoverflow | {
"language": "en",
"length": 96,
"provenance": "stackexchange_0000F.jsonl.gz:881517",
"question_score": "5",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44593761"
} |
365c58208804c44eb6f06609f82c40e5d70a8f70 | Stackoverflow Stackexchange
Q: How do I copy a remote file onto my local machine using Ansible? I'm using the command module inside my playbook, and it currently looks like this.
- hosts: all
tasks:
- name: Update tar file
command: sudo scp -r username@hostname:/path/from/destination /path/to/destination
I've omitted the tasks the take place before this task for the purpose of readability, but what happens when I run this playbook is that it stop at this task. It simply doesn't move forward. I'm sure this is because sudo, so It may want the password for that. I'm not sure how to fix that however.
A: You want to use the fetch module.
- hosts: host.example.com
tasks:
# Copy remote file (host.example.com:/tmp/somefile) into
# /tmp/fetched/host.example.com/tmp/somefile on local machine
- fetch:
src: /tmp/somefile
dest: /tmp/fetched
| Q: How do I copy a remote file onto my local machine using Ansible? I'm using the command module inside my playbook, and it currently looks like this.
- hosts: all
tasks:
- name: Update tar file
command: sudo scp -r username@hostname:/path/from/destination /path/to/destination
I've omitted the tasks the take place before this task for the purpose of readability, but what happens when I run this playbook is that it stop at this task. It simply doesn't move forward. I'm sure this is because sudo, so It may want the password for that. I'm not sure how to fix that however.
A: You want to use the fetch module.
- hosts: host.example.com
tasks:
# Copy remote file (host.example.com:/tmp/somefile) into
# /tmp/fetched/host.example.com/tmp/somefile on local machine
- fetch:
src: /tmp/somefile
dest: /tmp/fetched
| stackoverflow | {
"language": "en",
"length": 129,
"provenance": "stackexchange_0000F.jsonl.gz:881572",
"question_score": "10",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44593915"
} |
7eb0ccf35a51e51d9e8844c292caf83828fbabec | Stackoverflow Stackexchange
Q: TypeError: is not a constructor, but different class works fine I'm getting an error where I can instantiate one class but not the other, but as far as I can see, there's no difference between the two. I'm new to nodejs and not sure what I'm doing wrong. All files shown below are siblings in a directory.
/********** exampleClass.js ********/
const classOne = require("./classOne");
const classTwo = require("./classTwo")
module.exports = class exampleClass {
method() {
// works fine
const classOneInstance = new classOne();
const classTwoInstance = new classTwo(); // gives error 'TypeError: classTwo is not a constructor' UNLESS I require classTwo in the method. Doesn't matter if this goes before or after classOneInstance
}
}
/********** classOne.js *****************/
module.exports = class classOne {
}
/************classTwo.js ****************/
const classOne = require("./classOne");
const exampleClass = require("./exampleClass");
module.exports = class classTwo {
method() {
const exampleClassInstance = new exampleClass();
exampleClassInstance.method();
}
}
A: Since I was calling exampleClass from classTwo, and then exampleClass called classTwo, it was a circular reference which gave that error. Thanks to @AndrewLi
| Q: TypeError: is not a constructor, but different class works fine I'm getting an error where I can instantiate one class but not the other, but as far as I can see, there's no difference between the two. I'm new to nodejs and not sure what I'm doing wrong. All files shown below are siblings in a directory.
/********** exampleClass.js ********/
const classOne = require("./classOne");
const classTwo = require("./classTwo")
module.exports = class exampleClass {
method() {
// works fine
const classOneInstance = new classOne();
const classTwoInstance = new classTwo(); // gives error 'TypeError: classTwo is not a constructor' UNLESS I require classTwo in the method. Doesn't matter if this goes before or after classOneInstance
}
}
/********** classOne.js *****************/
module.exports = class classOne {
}
/************classTwo.js ****************/
const classOne = require("./classOne");
const exampleClass = require("./exampleClass");
module.exports = class classTwo {
method() {
const exampleClassInstance = new exampleClass();
exampleClassInstance.method();
}
}
A: Since I was calling exampleClass from classTwo, and then exampleClass called classTwo, it was a circular reference which gave that error. Thanks to @AndrewLi
| stackoverflow | {
"language": "en",
"length": 175,
"provenance": "stackexchange_0000F.jsonl.gz:881575",
"question_score": "4",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44593933"
} |
457f89a0a8551d69e5d07f3997be20e43c9ddcf0 | Stackoverflow Stackexchange
Q: Why does abstract class have to implement all methods from interface? interface BaseInter{
name : string;
test();
}
abstract class Abs implements baseInter{
}
In TypeScript, compiler complaints that the class incorrectly implements the interface:
name is missing in type abs.
Here Absis an abstract class and so why do we need to implement the interface over there?
A: You need to re-write all of the members/methods in the interface and add the abstract keyword to them, so in your case:
interface baseInter {
name: string;
test();
}
abstract class abs implements baseInter {
abstract name: string;
abstract test();
}
(code in playground)
There was a suggestion for it: Missing property declaration in abstract class implementing interfaces but it was declined for this reason:
Although, the convenience of not writing the declaration would be
nice, the possible confusion/complexity arising from this change would
not warrant it. by examine the declaration, it is not clear which
members appear on the type, is it all properties, methods, or
properties with call signatures; would they be considered abstract?
optional?
| Q: Why does abstract class have to implement all methods from interface? interface BaseInter{
name : string;
test();
}
abstract class Abs implements baseInter{
}
In TypeScript, compiler complaints that the class incorrectly implements the interface:
name is missing in type abs.
Here Absis an abstract class and so why do we need to implement the interface over there?
A: You need to re-write all of the members/methods in the interface and add the abstract keyword to them, so in your case:
interface baseInter {
name: string;
test();
}
abstract class abs implements baseInter {
abstract name: string;
abstract test();
}
(code in playground)
There was a suggestion for it: Missing property declaration in abstract class implementing interfaces but it was declined for this reason:
Although, the convenience of not writing the declaration would be
nice, the possible confusion/complexity arising from this change would
not warrant it. by examine the declaration, it is not clear which
members appear on the type, is it all properties, methods, or
properties with call signatures; would they be considered abstract?
optional?
A: You can get what you want with a slight trick that defeats the compile-time errors:
interface baseInter {
name : string;
test();
}
interface abs extends baseInter {}
abstract class abs implements baseInter{
}
This trick takes advantage of Typescript's Declaration Merging, and was originally presented here and posted on a related SO question here.
A: Interfaces are used for defining a contract regarding the shape of an object.
Use constructor to pass in properties to the class
interface BaseInter {
name : string;
test(): boolean;
}
abstract class Abs implements BaseInter {
constructor(public name: string) {}
test(): boolean {
throw new Error('Not implemented')
}
}
| stackoverflow | {
"language": "en",
"length": 284,
"provenance": "stackexchange_0000F.jsonl.gz:881585",
"question_score": "34",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44593961"
} |
c43b466982046b5eb8fb7f5ab764db306f26b0e5 | Stackoverflow Stackexchange
Q: How to get instance attribute name-values in a class with __slots__ in Python When defining two classes, one with the __dict__ implementation (A) and the other with __slots__ implementation (B).
Is there a clever way of getting the instance attribute names and values of the __slots__ class, as I would with using the vars() function on the __dict__ class?
class A(object):
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
class B(object):
__slots__ = ('x', 'y', 'z')
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
vars(A(1, 2, 3)) # {'y': 2, 'x': 1, 'z': 3}
vars(B(1, 2, 3)) # TypeError: vars() argument must have __dict__ attribute
using .__slots__ or dir() with inspection would just return the attribute names, without the value
B(1, 2, 3).__slots__ # ('x', 'y', 'z')
A: Here's a function I've used before:
def vars2(obj):
try:
return vars(obj)
except TypeError:
return {k: getattr(obj, k) for k in obj.__slots__}
| Q: How to get instance attribute name-values in a class with __slots__ in Python When defining two classes, one with the __dict__ implementation (A) and the other with __slots__ implementation (B).
Is there a clever way of getting the instance attribute names and values of the __slots__ class, as I would with using the vars() function on the __dict__ class?
class A(object):
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
class B(object):
__slots__ = ('x', 'y', 'z')
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
vars(A(1, 2, 3)) # {'y': 2, 'x': 1, 'z': 3}
vars(B(1, 2, 3)) # TypeError: vars() argument must have __dict__ attribute
using .__slots__ or dir() with inspection would just return the attribute names, without the value
B(1, 2, 3).__slots__ # ('x', 'y', 'z')
A: Here's a function I've used before:
def vars2(obj):
try:
return vars(obj)
except TypeError:
return {k: getattr(obj, k) for k in obj.__slots__}
A: In [2]: x = B(1,2,3)
In [3]: {a: x.__getattribute__(a) for a in dir(x)}
Out[3]:
{'__class__': __main__.B,
'__delattr__': <method-wrapper '__delattr__' of B object at 0x7f3bb2b48e18>,
'__doc__': None,
'__format__': <function __format__>,
'__getattribute__': <method-wrapper '__getattribute__' of B object at 0x7f3bb2b48e18>,
'__hash__': <method-wrapper '__hash__' of B object at 0x7f3bb2b48e18>,
'__init__': <bound method B.__init__ of <__main__.B object at 0x7f3bb2b48e18>>,
'__module__': '__main__',
'__new__': <function __new__>,
'__reduce__': <function __reduce__>,
'__reduce_ex__': <function __reduce_ex__>,
'__repr__': <method-wrapper '__repr__' of B object at 0x7f3bb2b48e18>,
'__setattr__': <method-wrapper '__setattr__' of B object at 0x7f3bb2b48e18>,
'__sizeof__': <function __sizeof__>,
'__slots__': ('x', 'y', 'z'),
'__str__': <method-wrapper '__str__' of B object at 0x7f3bb2b48e18>,
'__subclasshook__': <function __subclasshook__>,
'x': 1,
'y': 2,
'z': 3}
Or if you don't want to see magic methods:
In [4]: {a: x.__getattribute__(a) for a in dir(x) if not a.startswith('__')}
Out[4]: {'x': 1, 'y': 2, 'z': 3}
| stackoverflow | {
"language": "en",
"length": 295,
"provenance": "stackexchange_0000F.jsonl.gz:881590",
"question_score": "4",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44593980"
} |
ee21bc23deb9a21e0e250754ab9f788e65b80e99 | Stackoverflow Stackexchange
Q: Using Reinforcement Learning for Classfication Problems Can I use reinforcement learning on classification? Such as human activity recognition? And how?
A: Short answer: Yes.
Detailed answer: yes but it's an overkill. Reinforcement learning is useful when you don't have labeled dataset to learn the correct policy, so you need to develop correct strategy based on the rewards. This also allows to backpropagate through non-differentiable blocks (which I suppose is not your case). The biggest drawback of reinforcement learning methods is that thay are typically took a VERY large amount of time to converge. So, if you possess labels, it would be a LOT more faster and easier to use regular supervised learning.
| Q: Using Reinforcement Learning for Classfication Problems Can I use reinforcement learning on classification? Such as human activity recognition? And how?
A: Short answer: Yes.
Detailed answer: yes but it's an overkill. Reinforcement learning is useful when you don't have labeled dataset to learn the correct policy, so you need to develop correct strategy based on the rewards. This also allows to backpropagate through non-differentiable blocks (which I suppose is not your case). The biggest drawback of reinforcement learning methods is that thay are typically took a VERY large amount of time to converge. So, if you possess labels, it would be a LOT more faster and easier to use regular supervised learning.
A: There are two types of feedback. One is evaluative that is used in reinforcement learning method and second is instructive that is used in supervised learning mostly used for classification problems.
When supervised learning is used, the weights of the neural network are adjusted based on the information of the correct labels provided in the training dataset. So, on selecting a wrong class, the loss increases and weights are adjusted, so that for the input of that kind, this wrong class is not chosen again.
However, in reinforcement learning, the system explores all the possible actions, class labels for various inputs in this case and by evaluating the reward it decides what is right and what is wrong. It may be the case too that until it gets the correct class label it may be giving wrong class name as it is the best possible output it has found till now. So, it doesn't make use of the specific knowledge we have about the class labels, hence slows the convergence rate significantly as compared to supervised learning.
You can use reinforcement learning for classification problems but it won't be giving you any added benefit and instead slow down your convergence rate.
A: You may be able to develop an RL model that chooses which classifier to use. The gt labels being used to train the classifiers and the change in performance of those classifiers being the reward for the RL model. As others have said, it would probably take a very long time to converge, if it ever does. This idea may also require many tricks and tweaks to make it work. I would recommend searching for research papers on this topic.
| stackoverflow | {
"language": "en",
"length": 396,
"provenance": "stackexchange_0000F.jsonl.gz:881595",
"question_score": "12",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44594007"
} |
cafb668aa2ac2dbd2efa0a96671989788160ecbd | Stackoverflow Stackexchange
Q: TypeScript: Can I import a folder without having to write an index.ts file? If most directories of a project contain no more than 2-3 TypeScript files and all of their exports should be accessible when importing the containing directory somewhere else, this results in a lot of index.ts files with predictable content.
Example
Directory: my-component
my-component-config.ts
my-component.ts
index.ts
What does index.ts contain? Of course, it contains
export * from "./my-component-config"
export * from "./my-component"
Which is obvious.
For 10 component directories, that means: 10 index.ts files containing 100% redundant information.
How can I make TypeScript(/Node) implicitly create index.ts files on the fly, that need not be stored on the hard disk?
A: I don't think there's a way to import a directory in TS without and index file
check these questions if you haven't
How to import all modules from a directory in TypeScript?
Typescript 1.8 modules: import all files from folder
I think the best approach is to write a script to generate index.ts that imports all files in the directory, and run that script every time you add/remove a file.
| Q: TypeScript: Can I import a folder without having to write an index.ts file? If most directories of a project contain no more than 2-3 TypeScript files and all of their exports should be accessible when importing the containing directory somewhere else, this results in a lot of index.ts files with predictable content.
Example
Directory: my-component
my-component-config.ts
my-component.ts
index.ts
What does index.ts contain? Of course, it contains
export * from "./my-component-config"
export * from "./my-component"
Which is obvious.
For 10 component directories, that means: 10 index.ts files containing 100% redundant information.
How can I make TypeScript(/Node) implicitly create index.ts files on the fly, that need not be stored on the hard disk?
A: I don't think there's a way to import a directory in TS without and index file
check these questions if you haven't
How to import all modules from a directory in TypeScript?
Typescript 1.8 modules: import all files from folder
I think the best approach is to write a script to generate index.ts that imports all files in the directory, and run that script every time you add/remove a file.
A: Component isn't a well defined concept in TypeScript & node.js, but module and package are.
In general, module is a source file, let's ignore the exceptions. So by creating index.ts files per directory, you are generating façade modules aggregating only a few file/modules each. If all you are looking to do is organize your source files into logical components, you don't need the per-directory façade, you can simply import each file individually rather than a directory at a time.
At a higher level, if you have a package that consists of a number of different directories, it can have a single index.ts façade at package-level. That file would exported each file/module just once, no need for index.ts per directory. So this might look like (assuming each is a .ts file):
export * from './IntStream';
export * from './misc/Interval';
export * from './misc/IntervalSet';
export * from './Lexer';
...
| stackoverflow | {
"language": "en",
"length": 331,
"provenance": "stackexchange_0000F.jsonl.gz:881625",
"question_score": "18",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44594082"
} |
5951c90bb55077d859641fe8065c415858e556ab | Stackoverflow Stackexchange
Q: Finding a Strongly Connected Components in unDirected Graphs I want to find a strongly connected components in undirected graph i.e If i start from a Node A then i will get back to node A and each edge is visited exactly by once.
For Directed Graph can use Tarjan’s algorithm for finding the strongly connected components , but how to do for undirected graph.
A: I think you miss understood the meaning of strongly connected component.
Strongly connected components
A directed graph is strongly connected if there is a path between all pairs of vertices. A strongly connected component (SCC) of a directed graph is a maximal strongly connectedsubgraph.
But, from your definition to what your looking for, I'd say you want to find cycle in unDirected graph:
*
*enters each node once
*you can start from node A and finish in node A.
If it's only what you look for I'd say use Dfs algorithm to find cycle in unDirected graph.
Hope I answered your question
| Q: Finding a Strongly Connected Components in unDirected Graphs I want to find a strongly connected components in undirected graph i.e If i start from a Node A then i will get back to node A and each edge is visited exactly by once.
For Directed Graph can use Tarjan’s algorithm for finding the strongly connected components , but how to do for undirected graph.
A: I think you miss understood the meaning of strongly connected component.
Strongly connected components
A directed graph is strongly connected if there is a path between all pairs of vertices. A strongly connected component (SCC) of a directed graph is a maximal strongly connectedsubgraph.
But, from your definition to what your looking for, I'd say you want to find cycle in unDirected graph:
*
*enters each node once
*you can start from node A and finish in node A.
If it's only what you look for I'd say use Dfs algorithm to find cycle in unDirected graph.
Hope I answered your question
| stackoverflow | {
"language": "en",
"length": 168,
"provenance": "stackexchange_0000F.jsonl.gz:881642",
"question_score": "3",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44594132"
} |
0260afdd5b25e75fdf901d81defb18f1a5a1a13c | Stackoverflow Stackexchange
Q: How to create a customized theme similar to theme_bw in ggplot2? I have the following codes in ggplot2:
require("ggplot2")
df <- data.frame(x=factor(rep(1:2,5)), y=rnorm(10))
ggplot(df , aes(x,y)) + geom_point(size = 3) +
theme(axis.text.x = element_text(angle = 40, hjust = 1, colour = "black", size=12),
plot.title = element_text(size=16, face="bold", hjust=0.5)) +
labs(title = "Plot")
Now, I want to change the background color to theme_bw, but then the main title and the x-axis options will be changed back to default.
ggplot(df , aes(x,y)) + geom_point(size = 3) +
theme(axis.text.x = element_text(angle = 40, hjust = 1, colour = "black", size=12),
plot.title = element_text(size=16, face="bold", hjust=0.5)) +
labs(title = "Plot") +
theme_bw()
So, how can I change the theme to be the same as theme_bw but without losing other options?
Thanks
A: Here's the solution (make sure that theme_bw() is used before the theme() that you want to apply:
ggplot(df , aes(x,y)) + geom_point(size = 3) +
theme_bw() +
theme(axis.text.x = element_text(angle = 40, hjust = 1, colour = "black", size=12),
plot.title = element_text(size=16, face="bold", hjust=0.5)) +
labs(title = "Plot")
| Q: How to create a customized theme similar to theme_bw in ggplot2? I have the following codes in ggplot2:
require("ggplot2")
df <- data.frame(x=factor(rep(1:2,5)), y=rnorm(10))
ggplot(df , aes(x,y)) + geom_point(size = 3) +
theme(axis.text.x = element_text(angle = 40, hjust = 1, colour = "black", size=12),
plot.title = element_text(size=16, face="bold", hjust=0.5)) +
labs(title = "Plot")
Now, I want to change the background color to theme_bw, but then the main title and the x-axis options will be changed back to default.
ggplot(df , aes(x,y)) + geom_point(size = 3) +
theme(axis.text.x = element_text(angle = 40, hjust = 1, colour = "black", size=12),
plot.title = element_text(size=16, face="bold", hjust=0.5)) +
labs(title = "Plot") +
theme_bw()
So, how can I change the theme to be the same as theme_bw but without losing other options?
Thanks
A: Here's the solution (make sure that theme_bw() is used before the theme() that you want to apply:
ggplot(df , aes(x,y)) + geom_point(size = 3) +
theme_bw() +
theme(axis.text.x = element_text(angle = 40, hjust = 1, colour = "black", size=12),
plot.title = element_text(size=16, face="bold", hjust=0.5)) +
labs(title = "Plot")
A: Another interesting option would be to write a function that only updates the theme if the the theme elements are not already defined. Thus it would be similar to +, but themes cannot be overwritten, only added:
`%+safe%` <- function(e1, e2){
if (!is.theme(e1) || !is.theme(e2)) {
stop("%+replace% requires two theme objects", call. = FALSE)
}
not_in_e1 <- names(e2)[!names(e2) %in% names(e1)]
e1[not_in_e1] <- e2[not_in_e1]
e1
}
ggplot(df , aes(x,y)) + geom_point(size = 3) + labs(title = "Plot") +
theme(axis.text.x = element_text(angle = 40, hjust = 1, colour = "black", size=12),
plot.title = element_text(size=16, face="bold", hjust=0.5)) %+safe% theme_bw()
Note, this needs to take two theme objects so you have to move labs() anyway.
| stackoverflow | {
"language": "en",
"length": 289,
"provenance": "stackexchange_0000F.jsonl.gz:881644",
"question_score": "6",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44594136"
} |
2e21ef17100ad0c4d8023e2c9acd2c8aba3baa6f | Stackoverflow Stackexchange
Q: How to change to tab color for Chrome tab - desktop version How to change the tab color for Chrome tab - desktop version?
thanks,
Austin
| Q: How to change to tab color for Chrome tab - desktop version How to change the tab color for Chrome tab - desktop version?
thanks,
Austin
| stackoverflow | {
"language": "en",
"length": 27,
"provenance": "stackexchange_0000F.jsonl.gz:881646",
"question_score": "4",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44594139"
} |
140d74c62a352a15237b938f8a69ae769ada4091 | Stackoverflow Stackexchange
Q: How to make a route parameter optional in Azure Function How to make a route parameter optional in Azure Function
public static async Task<HttpResponseMessage> Run([HttpTrigger(AuthorizationLevel.Function, "get", "post", Route = "ResolveKey/{key}/{resolver}")]HttpRequestMessage req, TraceWriter log, string key,string resolver= "default")
In the above code I tried to make resolver parameter optional by setting a default value string resolver= "default" . The code compiles and runs fine, but the URL always wants resolver parameter to be present, otherwise I get 404.
I want to make the resolver parameter optional in the above code. Is there any way?
A: You can express that a parameter is optional in the route template itself.
For the route above, you can just change your template to the following:
ResolveKey/{key}/{resolver?}
You can find more information about optional routes and default values here
| Q: How to make a route parameter optional in Azure Function How to make a route parameter optional in Azure Function
public static async Task<HttpResponseMessage> Run([HttpTrigger(AuthorizationLevel.Function, "get", "post", Route = "ResolveKey/{key}/{resolver}")]HttpRequestMessage req, TraceWriter log, string key,string resolver= "default")
In the above code I tried to make resolver parameter optional by setting a default value string resolver= "default" . The code compiles and runs fine, but the URL always wants resolver parameter to be present, otherwise I get 404.
I want to make the resolver parameter optional in the above code. Is there any way?
A: You can express that a parameter is optional in the route template itself.
For the route above, you can just change your template to the following:
ResolveKey/{key}/{resolver?}
You can find more information about optional routes and default values here
A: Azure Functions now support regular expressions. You can change your routing template to
ResolveKey/{key}/{*resolver}
| stackoverflow | {
"language": "en",
"length": 149,
"provenance": "stackexchange_0000F.jsonl.gz:881650",
"question_score": "17",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44594154"
} |
c181bbe35ea06fcc8c0be6b9493fd26643056729 | Stackoverflow Stackexchange
Q: Can Fabric know to expect a host to disconnect? I'm writing a Fabric script to provision some OpenWRT access points. Part of my script involves assigning a new static IP to the APs, which requires restarting the network interface.
My problem is that when I do this, Fabric hangs because the connection to the host disappears.
Is there a way to tell Fabric to either:
*
*Expect this command to fail to return
*Connect to the new host IP automatically
For reference, this is the output from the task:
[root@192.168.1.1:22] run: uci set network.lan.proto=static
[root@192.168.1.1:22] run: uci set network.lan.ipaddr=<my IP here>
[root@192.168.1.1:22] run: uci commit network
[root@192.168.1.1:22] run: /etc/init.d/network reload
<Fabric hangs indefinitely>
A: You can try setting env.command_timeout and then calling the function that sets static IPs with execute.
command_timeout
Default: None
Remote command timeout, in seconds.
| Q: Can Fabric know to expect a host to disconnect? I'm writing a Fabric script to provision some OpenWRT access points. Part of my script involves assigning a new static IP to the APs, which requires restarting the network interface.
My problem is that when I do this, Fabric hangs because the connection to the host disappears.
Is there a way to tell Fabric to either:
*
*Expect this command to fail to return
*Connect to the new host IP automatically
For reference, this is the output from the task:
[root@192.168.1.1:22] run: uci set network.lan.proto=static
[root@192.168.1.1:22] run: uci set network.lan.ipaddr=<my IP here>
[root@192.168.1.1:22] run: uci commit network
[root@192.168.1.1:22] run: /etc/init.d/network reload
<Fabric hangs indefinitely>
A: You can try setting env.command_timeout and then calling the function that sets static IPs with execute.
command_timeout
Default: None
Remote command timeout, in seconds.
| stackoverflow | {
"language": "en",
"length": 139,
"provenance": "stackexchange_0000F.jsonl.gz:881652",
"question_score": "3",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44594160"
} |
427ad433327acfe20157bad6ac4b0a103d404544 | Stackoverflow Stackexchange
Q: Pass Powershell parameters within Task Scheduler function ServiceRestart
{
Param
(
$ErrorLog,
$Services,
$MaxSize
)
$Time = Get-Date -Format 'yyyy:MM:dd HH:mm:ss'
$Result = (Get-Item $ErrorLog).length
if($Result -gt $MaxSize)
{
Clear-Content $ErrorLog
}
Try
{
Foreach($Service in $Services)
{
Restart-Service -DisplayName $Service -ErrorAction Stop
}
} Catch
{
"ERROR: $Service could not be restarted $Time" | Add-Content $ErrorLog
}
}
ServiceRestart -ErrorLog -Services -MaxSize
I need to pass in the following parameters from Task Scheduler
- Errorlog
- Services
- MaxSize
I currently have my Task Scheduler setup like this
Program/script: C:\Windows\System32\WindowsPowerShell\v1.0\powershell.exe
Add arguments(optional): -Command "& \ServerName\C$\Users*****\Documents\Scripts\Scheduled-ServiceRestart.ps1 -ErrorLog 'ServerName\C$\Users*****\Documents\log\ScriptErrors.txt' -Services 'foo1' , 'foo2' -MaxSize '5MB'"
When I run the scheduled task nothing happens, what could be going wrong.
A: The function needs to be imported first. I would recommend saving the function as a module and placing it in the modules folder in either system32 or program files. This way, when powershell is launched, it will automatically import your function.
After you do that, the task scheduler is very simple.
Program/Script
Powershell
Add arguments(optional):
-Command &{ServiceRestart -ErrorLog 'ServerName\C$\Users*****\Documents\log\ScriptErrors.txt' -Services 'foo1','foo2' -MaxSize '5MB'}
| Q: Pass Powershell parameters within Task Scheduler function ServiceRestart
{
Param
(
$ErrorLog,
$Services,
$MaxSize
)
$Time = Get-Date -Format 'yyyy:MM:dd HH:mm:ss'
$Result = (Get-Item $ErrorLog).length
if($Result -gt $MaxSize)
{
Clear-Content $ErrorLog
}
Try
{
Foreach($Service in $Services)
{
Restart-Service -DisplayName $Service -ErrorAction Stop
}
} Catch
{
"ERROR: $Service could not be restarted $Time" | Add-Content $ErrorLog
}
}
ServiceRestart -ErrorLog -Services -MaxSize
I need to pass in the following parameters from Task Scheduler
- Errorlog
- Services
- MaxSize
I currently have my Task Scheduler setup like this
Program/script: C:\Windows\System32\WindowsPowerShell\v1.0\powershell.exe
Add arguments(optional): -Command "& \ServerName\C$\Users*****\Documents\Scripts\Scheduled-ServiceRestart.ps1 -ErrorLog 'ServerName\C$\Users*****\Documents\log\ScriptErrors.txt' -Services 'foo1' , 'foo2' -MaxSize '5MB'"
When I run the scheduled task nothing happens, what could be going wrong.
A: The function needs to be imported first. I would recommend saving the function as a module and placing it in the modules folder in either system32 or program files. This way, when powershell is launched, it will automatically import your function.
After you do that, the task scheduler is very simple.
Program/Script
Powershell
Add arguments(optional):
-Command &{ServiceRestart -ErrorLog 'ServerName\C$\Users*****\Documents\log\ScriptErrors.txt' -Services 'foo1','foo2' -MaxSize '5MB'}
A: I recommend scheduling the task to use the -File parameter rather than -Command. Example:
Program/script: C:\Windows\System32\WindowsPowerShell\v1.0\powershell.exe
Add arguments (optional): -NoProfile -ExecutionPolicy Bypass -File "Scheduled-ServiceRestart.ps1" -ErrorLog "ScriptErrors.txt" -Services "foo1","foo2" -MaxSize 5MB
Start in (optional): C:\Users\<username>\Documents\Scripts
You can specify the starting directory for the script in the "Start in" property for the task and avoid the lengthy path names to the script and log files. (Note that I am assuming you are running a copy of the script on the local computer, not over the network, which adds potential complications and possibilities for failure.)
A: As far as a literal answer to the question: Likely you need to add single quotes to surround your script location, otherwise it will try to interpret special characters/backslashes as escapes.
Ex:
-Command "& '\ServerName\C$\Users*****\Documents\Scripts\Scheduled-ServiceRestart.ps1' ...
You could also add some basic logging in your powershell script to ensure that it is actually starting.
I use the "-Command &" style in production Powershell scheduled tasks & they work fine provided the strings are formatted properly.
A: Apparently, when using Scheduled Tasks, the choice of quotes makes a huge difference. Try this for your arguments:
-Command "& '\\ServerName\C$\Users\...\Documents\Scripts\Scheduled-ServiceRestart.ps1' -ErrorLog '\\ServerName\C$\Users\...\Documents\log\ScriptErrors.txt' -Services 'foo1','foo2' -MaxSize '5MB'"
A: In addition to Bill's suggestion, another thing to look out for is the "Configure for:" setting in the General tab of Task Scheduler. Depending on your setup it may be defaulting to an earlier version of Windows than the one you're running, and in the case of Server 2008 it looks like it doesn't support adding parameters like that to the .ps1 file you're calling. Took me a while to work out after trying the other suggestions here.
For example :
-file "c:\scripts\myscript.ps1"
Works fine in task scheduler
.\myscript.ps1 -myparameter foo
Works fine from the shell, however :
-file "c:\scripts\myscript.ps1" -myparameter foo
In task scheduler fails to run with a 0xfffd0000 result code.
Spotted that "Configure for:" had defaulted to 2008, so changed that to 2012 R2 and that fixed the issue and it's now running fine.
| stackoverflow | {
"language": "en",
"length": 517,
"provenance": "stackexchange_0000F.jsonl.gz:881657",
"question_score": "11",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44594179"
} |
e2035e70854193c1eef9a4e366ee93af19979b77 | Stackoverflow Stackexchange
Q: Non-fixed length lookbehind I am trying to write a regular expression to match the empty string, preceded either by a vowel and ck, or by a vowel and any other consonant (it's from a challenge from CodeGolf). So far I came up with (?<=[aeiou](?:ck|[^aeiou])). The problem with this is that it just won't match after ck with [^aeiou] there. It always matches after a c in the case of, say, nickel : nic-kel. Why does this happen?
A: I think you need
(?<=[aeiou](?:(?!ck)[a-zA-Z-[aeiou]]|ck))
See regex demo.
Lookbehind is a nonconsuming pattern that - being unanchored - is tried at each location in the string. Since you allow matching the location preceded with a vowel and any char but a vowel you will get the matches between c and k and k and e.
If you want to allow matching the position after a vowel followed with any consonant but not when there is a ck clustet temper the consonant pattern with a (?!ck) negative lookahead, (?!ck). The consonant must be matched with [a-zA-Z-[aeiouAEIOU]]. That matches any ASCII letter but a, e, i, o, u (case insensitively).
| Q: Non-fixed length lookbehind I am trying to write a regular expression to match the empty string, preceded either by a vowel and ck, or by a vowel and any other consonant (it's from a challenge from CodeGolf). So far I came up with (?<=[aeiou](?:ck|[^aeiou])). The problem with this is that it just won't match after ck with [^aeiou] there. It always matches after a c in the case of, say, nickel : nic-kel. Why does this happen?
A: I think you need
(?<=[aeiou](?:(?!ck)[a-zA-Z-[aeiou]]|ck))
See regex demo.
Lookbehind is a nonconsuming pattern that - being unanchored - is tried at each location in the string. Since you allow matching the location preceded with a vowel and any char but a vowel you will get the matches between c and k and k and e.
If you want to allow matching the position after a vowel followed with any consonant but not when there is a ck clustet temper the consonant pattern with a (?!ck) negative lookahead, (?!ck). The consonant must be matched with [a-zA-Z-[aeiouAEIOU]]. That matches any ASCII letter but a, e, i, o, u (case insensitively).
A: There is nothing wrong with your regex, just need to add a simple (?!ck)
before the consonants.
(?<=([aeiou](?:ck|(?!ck)[^aeiou])))
(?<=
( # (1 start)
[aeiou]
(?:
ck
| (?! ck ) # <== here
[^aeiou]
)
) # (1 end)
)
But, you may want to know the reason why.
The reason is that on variable length lookbehinds in C#
it starts at a point between a character.
At any point, and only that point it looks backwards for a match.
All that is ahead of it is not allowed in the match.
Let's see how they do it :
Using your regex (?<=[aeiou](?:ck|[^aeiou]))
i<=absolute position ck , then looks back
Finds [aeiou].
Fails ck and [^aeiou]
Goes forward (to right) by 1 position, then looks back
ic<=absolute position k
Fails ck
BUT, matches the 'c' with [^aeiou]
The important thing to remember is that it can't defy it's own
two primary rules.
Their rules state that it has to take the first match it finds,
and it must find it looking backwards between characters.
So, it's clear that if finds and matches this ic<=absolute position k
first.
Each assertion is in it's own relative frame position that is
independent of it's surrounding code.
That position is dynamic (changing) and it's origin is the current position
of the calling expression (even another assertion).
So when calling an assertion inside assertion it just takes the parents current position and does it's checking from that position, internally
maintaining it's own current position.
Lets see what the fix does (?<=[aeiou](?:ck|(?!ck)[^aeiou]))
i<=absolute position ck , then looks back
Finds [aeiou].
Fails ck and [^aeiou]
Goes forward (to right) by 1 position, then looks back
ic<=absolute position k
Note that internally, it is matching forward and the
relative position is now here =>ck
because it already matched the i and is checking after it.
Fails ck because the 'k' would extend 1 character beyond its
absolute position
HOWEVER, it could match 'c' with [^aeiou] without going beyond it's absolute position
To STOP that, just a simple (?!ck) is needed before [^aeiou]
At this point (?!ck) is passed this relative position, and it
is not restricted by the callers absolute position.
It see's there is a ck looking forward and returns a false condition
making the outer assertion fail.
Goes forward (to right) by 1 position, then looks back
ick<=absolute position , then looks back
This time it scores a match on the ick
Demo
Target string
nickel : nic-ikel
C#
string Stxt = "nickel : nic-ikel";
var RxR = new Regex(@"(?<=([aeiou](?:ck|(?!ck)[^aeiou])))");
foreach (Match match in RxR.Matches(Stxt))
Console.WriteLine("{0}", match.Groups[1].Value);
Output
ick
el
ic
ik
el
| stackoverflow | {
"language": "en",
"length": 626,
"provenance": "stackexchange_0000F.jsonl.gz:881665",
"question_score": "3",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44594204"
} |
2948281c9626a994fef718e83f25eb28e39b0864 | Stackoverflow Stackexchange
Q: The requested URL returned error 502, corporate? Does anyone know what the problem seems to be with git?
I am getting this error when running running a git clone to clone from team foundation server (tfs).
fatal: unable to access 'http://website.com/_git/project': The requested URL returned error: 502
A: For reference: Within a corporate environment behind an authenticating proxy server I had previously edited my git global config an override for the proxy server. Once I corrected this, I was able to connect.
Useful command:
git config --global --list
[filter "lfs"]
clean = git-lfs clean -- %f
smudge = git-lfs smudge -- %f
process = git-lfs filter-process
required = true
[user]
name = GuyWicks
[user]
email = guy.wicks@iworkhere.com
[http]
proxy = http://public-cache:8080
[https]
proxy = http://public-cache:8080
I commented out the four proxy lines (#)
| Q: The requested URL returned error 502, corporate? Does anyone know what the problem seems to be with git?
I am getting this error when running running a git clone to clone from team foundation server (tfs).
fatal: unable to access 'http://website.com/_git/project': The requested URL returned error: 502
A: For reference: Within a corporate environment behind an authenticating proxy server I had previously edited my git global config an override for the proxy server. Once I corrected this, I was able to connect.
Useful command:
git config --global --list
[filter "lfs"]
clean = git-lfs clean -- %f
smudge = git-lfs smudge -- %f
process = git-lfs filter-process
required = true
[user]
name = GuyWicks
[user]
email = guy.wicks@iworkhere.com
[http]
proxy = http://public-cache:8080
[https]
proxy = http://public-cache:8080
I commented out the four proxy lines (#)
A: This took me considerable amount of time to troubleshoot and fix this annoying issue.
In my case I had the http_proxy and https_proxy environment variables set on my PC. ( You can find them under control panel - System and Security - Advanced system settings - Env variables - System variables section bottom panel in windows 10)
Once I deleted those 2 environment variables ( highlight - delete - ok ) and closed the Git-Terminal window and re-opened it the problem (http 502) went away. Just to confirm I tested with Git GUI + VS Code + Git Term. Did git fetch/clone/push/etc ...
Note: I am in a corporate env behind a strict proxy but so is the Enterprise tfs URL therefore I do not need a proxy interfering between me and my tfs server. I suspect (cannot confirm or rule out) the proxy was playing a MITM. Client machine is Windows 10 latest build running git version 2.22.0.windows.1
A: I was running into the same issue. For some reason I had http and Https proxies set up in my git config list. Removing those allowed me to clone successfully.
A: I have also met this issue when cloning a project from our private gitlab repo. Initially I am using the following command:
git clone http://remote_gitlab_url/user_name/some_projects
I am seeing the following error:
The requested URL returned error: 502
After adding .git to the project name, everything works smoothly:
git clone http://remote_gitlab_url/user_name/some_projects.git
| stackoverflow | {
"language": "en",
"length": 376,
"provenance": "stackexchange_0000F.jsonl.gz:881714",
"question_score": "14",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44594353"
} |
7068964cfbc12735ce3335a75e3acad344690c6d | Stackoverflow Stackexchange
Q: Can I deploy my ReactJS app on a regular host? I've seen many guides where people teach you how to deploy your react app on services like digital ocean, heroku, GitHub Pages, aws.
But I'm wondering if I can deploy my React app (create-react-app) which consists of only front end in a host service like 000webhost or Ipage? Because a person wants me to design a website, and he says that he already has a domain name and a host service in lpage.
A: Run 'npm run build' from your command line folder where your project is located. Copy files from the 'build' folder that is created and upload them in your host's root folder or equivalent. Takes less than a minute.
| Q: Can I deploy my ReactJS app on a regular host? I've seen many guides where people teach you how to deploy your react app on services like digital ocean, heroku, GitHub Pages, aws.
But I'm wondering if I can deploy my React app (create-react-app) which consists of only front end in a host service like 000webhost or Ipage? Because a person wants me to design a website, and he says that he already has a domain name and a host service in lpage.
A: Run 'npm run build' from your command line folder where your project is located. Copy files from the 'build' folder that is created and upload them in your host's root folder or equivalent. Takes less than a minute.
A: I use webpack to bundle my react app. So at the end react app will be:
*
*one bundle (rarely more if you use dynamic bundle loading - probably not), that is just a javascript file
*your index.html that includes this file at the end of body
*and your .css that you can set in principle in one file (or separate folder with several files) and include at the top of your index.html
Regarding .css there are several better ways how to include, but you can likely start with simple setup as mentioned above.
So you just put these 3 things on your server, and your app is available at index.html.
P.S. Don't know what is Ipage, and haven't worked with create-react-app.
A: This may be quite old, but, i have accomplished the React deploy on a regular web hosting on subdirectory;
A few things that must be changed or included
1 - Set the "homepage" on package.json - "homepage":"https://example.com"
or in my case "homepage":"https://example.com/myApp"
2 - Set basename on router file - <Router basename={'/myApp'}></Router>
OR <BrowserRouter basename={'/myApp'}></BrowserRouter>
3 - Set the .htaccess file as -
<IfModule mod_rewrite.c>
RewriteEngine On
RewriteBase /myApp/
RewriteCond %{REQUEST_FILENAME} !-f
RewriteCond %{REQUEST_FILENAME} !-d
RewriteCond %{REQUEST_FILENAME} !-l
RewriteRule . /myApp/index.html [L]
</IfModule>
4 - build - npm run build
5 - Now i can access my React App on https://example.com/myApp
source: google and
https://www.fullstacktutorials.com/deploy-react-app-subdirectory-10.html
| stackoverflow | {
"language": "en",
"length": 352,
"provenance": "stackexchange_0000F.jsonl.gz:881753",
"question_score": "15",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44594500"
} |
a2fc7e8cbf36ce48a266bcb12e40a55b40f9f369 | Stackoverflow Stackexchange
Q: Telegram more than 20 bots I have 20 bots in use in Telegram and when I want to create more than 20 the botfather tells me the following:
That I cannot do.
You come to me asking for more than 20 bots. But you don't ask with respect. You don't offer friendship. You don't even think to call me Botfather.
In the telegram help I didn't find anything, someone knows something about it.
Thanks, Matías.
A: It already said that you can't create more bot with this account.
There is no way to do that except delete useless one.
You might want to create bot via your friends' account, it's simplest way.
Please refer to another question.
| Q: Telegram more than 20 bots I have 20 bots in use in Telegram and when I want to create more than 20 the botfather tells me the following:
That I cannot do.
You come to me asking for more than 20 bots. But you don't ask with respect. You don't offer friendship. You don't even think to call me Botfather.
In the telegram help I didn't find anything, someone knows something about it.
Thanks, Matías.
A: It already said that you can't create more bot with this account.
There is no way to do that except delete useless one.
You might want to create bot via your friends' account, it's simplest way.
Please refer to another question.
| stackoverflow | {
"language": "en",
"length": 118,
"provenance": "stackexchange_0000F.jsonl.gz:881801",
"question_score": "5",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44594649"
} |
8280292d390a98691af6336cd6476bea52ccfadd | Stackoverflow Stackexchange
Q: How can I use cognito to create user accounts, but a different service for verification? I am building an app that will be using Plivo or Twilio for user interaction and input.
I wanted to use AWS Cognito for user management, but for verification, they say a user must verify email or phone number.
I wanted to have the user interaction and verification come from the same phone number, so is there a way to set this up in Node.js somehow, where a text to a Plivo/Twilio number reroutes to AWS Cognito for verification?
A: Looking at the document, it seems that it is now possible to send emails and SMS using a third party.
It is unknown when it was released.
Custom Sender Lambda Trigger
https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-custom-sender-triggers.html
Custom SMS Sender Lambda Trigger
https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-custom-sms-sender.html
Custom Email Lambda Trigger
https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-custom-email-sender.html
| Q: How can I use cognito to create user accounts, but a different service for verification? I am building an app that will be using Plivo or Twilio for user interaction and input.
I wanted to use AWS Cognito for user management, but for verification, they say a user must verify email or phone number.
I wanted to have the user interaction and verification come from the same phone number, so is there a way to set this up in Node.js somehow, where a text to a Plivo/Twilio number reroutes to AWS Cognito for verification?
A: Looking at the document, it seems that it is now possible to send emails and SMS using a third party.
It is unknown when it was released.
Custom Sender Lambda Trigger
https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-custom-sender-triggers.html
Custom SMS Sender Lambda Trigger
https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-custom-sms-sender.html
Custom Email Lambda Trigger
https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-custom-email-sender.html
A: You can leverage the custom auth flow to achieve this. Take a look on a example article we have here:
https://aws.amazon.com/blogs/mobile/customizing-your-user-pool-authentication-flow/
The idea is that you will explicitly do the calls you want to Twilio/Plivo from the lambda function.
Cognito UserPools does not intergrade out of the box with a third party.
And offers its own notification mechanism through SNS and SES services.
I would consider this option as well in your position.
| stackoverflow | {
"language": "en",
"length": 213,
"provenance": "stackexchange_0000F.jsonl.gz:881816",
"question_score": "4",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44594700"
} |
4718df5011ba09aa3588a3c0f5768e3fda36a482 | Stackoverflow Stackexchange
Q: Wagtail API - show image URL on json output Fairly new to Wagtail - I'm currently creating a Wagtail API for my React app. Have installed successfully and am getting a json output, but not getting a url for images that are uploaded in the Wagtail admin panel. I have searched online, but not having much joy.
This is the basic home page model I have created
class BarsHomePage(Previewable, Themable, Page):
bars_site_homepage_test = models.CharField(max_length=255, blank=True)
feed_image = models.ForeignKey(
'DemoImage',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
api_fields = ['bars_site_homepage_test','feed_image']
class DemoImage(Image):
@property
def fullwidth_url(self):
return generate_image_url(self, 'width-800')
@property
def halfwidth_url(self):
return generate_image_url(self, 'width-400')
api_fields = (
'fullwidth_url',
'halfwidth_url',
)
class Meta:
proxy = True
Json output
{
"id": 504,
"meta": {
"type": "wagtailimages.Image",
"detail_url": "http://www.lv.local/api/v1/images/504/"
},
"title": "Lighthouse.jpg",
"tags": [],
"width": 1365,
"height": 2048
}
Thanks
A: As of Wagtail 1.10, you can use ImageRenditionField in your page's api_fields definition to include the URL for an image, rendered at a size of your choosing:
from wagtail.api import APIField
from wagtail.wagtailimages.api.fields import ImageRenditionField
class BarsHomePage(Previewable, Themable, Page):
# ...
api_fields = [
APIField('bars_site_homepage_test'),
APIField('feed_image_fullwidth', serializer=ImageRenditionField('width-800', source='feed_image')),
]
| Q: Wagtail API - show image URL on json output Fairly new to Wagtail - I'm currently creating a Wagtail API for my React app. Have installed successfully and am getting a json output, but not getting a url for images that are uploaded in the Wagtail admin panel. I have searched online, but not having much joy.
This is the basic home page model I have created
class BarsHomePage(Previewable, Themable, Page):
bars_site_homepage_test = models.CharField(max_length=255, blank=True)
feed_image = models.ForeignKey(
'DemoImage',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
api_fields = ['bars_site_homepage_test','feed_image']
class DemoImage(Image):
@property
def fullwidth_url(self):
return generate_image_url(self, 'width-800')
@property
def halfwidth_url(self):
return generate_image_url(self, 'width-400')
api_fields = (
'fullwidth_url',
'halfwidth_url',
)
class Meta:
proxy = True
Json output
{
"id": 504,
"meta": {
"type": "wagtailimages.Image",
"detail_url": "http://www.lv.local/api/v1/images/504/"
},
"title": "Lighthouse.jpg",
"tags": [],
"width": 1365,
"height": 2048
}
Thanks
A: As of Wagtail 1.10, you can use ImageRenditionField in your page's api_fields definition to include the URL for an image, rendered at a size of your choosing:
from wagtail.api import APIField
from wagtail.wagtailimages.api.fields import ImageRenditionField
class BarsHomePage(Previewable, Themable, Page):
# ...
api_fields = [
APIField('bars_site_homepage_test'),
APIField('feed_image_fullwidth', serializer=ImageRenditionField('width-800', source='feed_image')),
]
| stackoverflow | {
"language": "en",
"length": 185,
"provenance": "stackexchange_0000F.jsonl.gz:881835",
"question_score": "3",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44594767"
} |
21b71ff17e84e967c33b5e6191bb67473d6fa4cc | Stackoverflow Stackexchange
Q: How to launch and open email client React-native? I do not want to compose an email. I just want to be able to launch the main email app on a user's device (iOS& Android) from a react-native app.
Scenario: I will send a verification email to the user upon signup.
A: Expo.io pure js/typescript solution:
import * as IntentLauncher from 'expo-intent-launcher';
// ...
public openMailClientIOS() {
Linking.canOpenURL('message:0')
.then(supported => {
if (!supported) {
console.log('Cant handle url')
} else {
return Linking.openURL('message:0')
.catch(this.handleOpenMailClientErrors)
}
})
.catch(this.handleOpenMailClientErrors)
}
public openMailClientAndroid() {
const activityAction = 'android.intent.action.MAIN'; // Intent.ACTION_MAIN
const intentParams: IntentLauncher.IntentLauncherParams = {
flags: 268435456, // Intent.FLAG_ACTIVITY_NEW_TASK
category: 'android.intent.category.APP_EMAIL' // Intent.CATEGORY_APP_EMAIL
};
IntentLauncher.startActivityAsync(activityAction, intentParams)
.catch(this.handleOpenMailClientErrors);
}
Works in iOS with Mail, works in Android
Android Intent docs: https://developer.android.com/reference/android/content/Intent#ACTION_MAIN
Expo IntentLauncher doc: https://docs.expo.io/versions/latest/sdk/intent-launcher/
| Q: How to launch and open email client React-native? I do not want to compose an email. I just want to be able to launch the main email app on a user's device (iOS& Android) from a react-native app.
Scenario: I will send a verification email to the user upon signup.
A: Expo.io pure js/typescript solution:
import * as IntentLauncher from 'expo-intent-launcher';
// ...
public openMailClientIOS() {
Linking.canOpenURL('message:0')
.then(supported => {
if (!supported) {
console.log('Cant handle url')
} else {
return Linking.openURL('message:0')
.catch(this.handleOpenMailClientErrors)
}
})
.catch(this.handleOpenMailClientErrors)
}
public openMailClientAndroid() {
const activityAction = 'android.intent.action.MAIN'; // Intent.ACTION_MAIN
const intentParams: IntentLauncher.IntentLauncherParams = {
flags: 268435456, // Intent.FLAG_ACTIVITY_NEW_TASK
category: 'android.intent.category.APP_EMAIL' // Intent.CATEGORY_APP_EMAIL
};
IntentLauncher.startActivityAsync(activityAction, intentParams)
.catch(this.handleOpenMailClientErrors);
}
Works in iOS with Mail, works in Android
Android Intent docs: https://developer.android.com/reference/android/content/Intent#ACTION_MAIN
Expo IntentLauncher doc: https://docs.expo.io/versions/latest/sdk/intent-launcher/
A: To open email app on iOS:
Linking.canOpenURL('message:')
.then(supported => {
if (!supported) {
console.log('Cant handle url')
} else {
return Linking.openURL('message:')
}
})
.catch(err => {
console.error('An error occurred', err)
})
A: Unfortunately, none of the answers after are correct.
I do not want to compose an email. I just want to be able to launch the main email app
I would like to have the same behavior:
*
*Sign-In Screen with a button Open Email App
*The user opens his email app
*He can click on the magic link to get back in the app
More or less the same as the Slack Onboarding with the magic link.
I found a solution with the library react-native-email-link.
You can open an email client from React Native (for 'magic link' type feature).
*
*Works on Android.
*If you want to try on iOS you need to have a real device because there is no mail.app on iOS Simulator.
A: You can use this method to send open any email client and send an email with some data.
export const sendEmailViaEmailApp = (toMailId, subject, body) => {
if (!isUndefined(toMailId)) {
let link = `mailto:${toMailId}`;
if (!isUndefined(subject)) {
link = `${link}?subject=${subject}`;
}
if (isUndefined(subject)) {
link = `${link}?body=${body}`;
} else {
link = `${link}&body=${body}`;
}
Linking.canOpenURL(link)
.then(supported => {
if (supported) {
// 'mailto:support@example.com?subject=Billing Query&body=Description'
Linking.openURL(link);
}
})
.catch(err => console.error('An error occurred', err));
} else {
console.log('sendEmailViaEmailApp -----> ', 'mail link is undefined');
}
};
Place this method inside a utils class and use this method where ever you want
A: React Native Open Mail Function
<Button onPress={() => Linking.openURL('mailto:support@example.com') }
title="support@example.com" />
React Native Open Mail Function With Subject and Body
<Button onPress={() => Linking.openURL('mailto:support@example.com?subject=SendMail&body=Description') }
title="support@example.com" />
React Native Open URL
<Button onPress={() => Linking.openURL('https://www.google.co.in/') }
title="www.google.co.in" />
##Don't forget to import
import { Linking } from 'react-native'
Note: Not supported in iOS simulator, so you must test on a device.
A: You can use react natives Linking module for this purpose. Here is a link to the module https://facebook.github.io/react-native/docs/linking.html.
Example: Linking.openURL('mailto:example@gmail.com?subject=example&body=example')
A: import { Linking } from 'react-native'
React Native Open Mail
<TouchableOpacity onPress={() => Linking.openURL('mailto:support@example.com')}>
<Text>support@example.com</Text>
</TouchableOpacity>
React Native Open Mail With Subject & Body
<TouchableOpacity onPress={() => Linking.openURL('mailto:support@example.com?subject=sendmail&body=details')}>
<Text>support@example.com</Text>
</TouchableOpacity>
this will only work in real device. not working in iOS simulator.
A: For open mail app, I've used like this and it's working for me
const subject = "Mail Subject";
const message = "Message Body";
Linking.openURL(`mailto:support@domain.com?subject=${subject}&body=${message}`)
A: import { View,Linking,Text, Image,TouchableOpacity } from 'react-native';
const emailId= 'care@flipkart.com'
const onPressEmailClick = (email) => {
Linking.openURL('mailto:'+email)
// Linking.openURL('mailto:Care@amazon.com')
}
<View style={{ flexDirection: "row", alignItems: "center", justifyContent: "center" }} >
<Text style={{ textAlign: "center", marginTop: 15, color: "black" }} >
{"For any query mail us "}
</Text>
<TouchableOpacity
onPress={() => onPressEmailClick(emailId)} >
<Text style={{ textAlign: "center", marginTop: 15, color: "black", textDecorationLine: 'underline' }} >
{emailId}
</Text>
</TouchableOpacity>
A: I think the following npm module should have what you're looking for. Unfortunately it uses native libraries so you'll have to run some react-native links.
https://www.npmjs.com/package/react-native-mail
A: Use react-native-mail for launch email client. It will automatically open email client.
https://www.npmjs.com/package/react-native-mail
A: You can open the gmail app using this as your URL googlegmail://, its gmail's scheme. The first time your app opens gmail it will open an alert saying " wants to open gmail", the user can then tap ok or cancel. This behaviour only happens once.
This won't open the email composer but just straight to the user's primary inbox.
A: I have found a way to open mail account, not compose mail. Since you need physcial iOS device, I have tested only on Android (Samsung S9+) and it works.
import { openInbox } from "react-native-email-link";
import * as IntentLauncher from 'expo-intent-launcher';
const openMail = async () => {
if (Platform.OS === "ios") {
try {
await openInbox({ title: "Open mail app" });
} catch (error) {
console.error(`OpenEmailbox > iOS Error > ${error}`);
}
}
if (Platform.OS === "android") {
const activityAction = "android.intent.action.MAIN";
const intentParams = {
category: "android.intent.category.APP_EMAIL",
};
IntentLauncher.startActivityAsync(activityAction, intentParams);
}
}
A: <TouchableOpacity onPress={()=>{
Linking.openURL('mailto:support@domain.com?subject=mailsubject&body=mailbody');
}}>
<View><Text>Contact Us</Text></View>
</TouchableOpacity>
This work for me.!
A: If you want a wrapper that works with Android and iOS. https://www.npmjs.com/package/react-native-email-action
iOS work's with other email app.
| stackoverflow | {
"language": "en",
"length": 837,
"provenance": "stackexchange_0000F.jsonl.gz:881847",
"question_score": "120",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44594818"
} |
b00406e3a6f8f69f0264beddc1b75930b6c45f66 | Stackoverflow Stackexchange
Q: How to get week ending next Sunday from date I know lubridate has a function ceiling_date but it provides week ending on the next Saturday from a given date. How can I change it to get the week ending next Sunday instead?
> ceiling_date(as.Date('2017-06-16'), 'week')
[1] "2017-06-17 20:00:00 EDT"
A: I think this might be a bug with ceiling_date as listed here: https://github.com/tidyverse/lubridate/issues/479
One workaround might be to grab the year/week and then round to Sunday:
as.Date(paste0(year('2017-06-16'), week('2017-06-16') + ifelse(wday('2017-06-16')==1,0,1), 7), format = "%Y%U%u")
#[1] "2017-06-18"
From ?strptime:
*
*%Y is year with century (i.e. 2017)
*%U is week of year as decimal
*%u is day of week from 1-7 (1 is Monday)
So we need to shift our week by 1 (hence the +1) and then we set the day to be Sunday 7.
| Q: How to get week ending next Sunday from date I know lubridate has a function ceiling_date but it provides week ending on the next Saturday from a given date. How can I change it to get the week ending next Sunday instead?
> ceiling_date(as.Date('2017-06-16'), 'week')
[1] "2017-06-17 20:00:00 EDT"
A: I think this might be a bug with ceiling_date as listed here: https://github.com/tidyverse/lubridate/issues/479
One workaround might be to grab the year/week and then round to Sunday:
as.Date(paste0(year('2017-06-16'), week('2017-06-16') + ifelse(wday('2017-06-16')==1,0,1), 7), format = "%Y%U%u")
#[1] "2017-06-18"
From ?strptime:
*
*%Y is year with century (i.e. 2017)
*%U is week of year as decimal
*%u is day of week from 1-7 (1 is Monday)
So we need to shift our week by 1 (hence the +1) and then we set the day to be Sunday 7.
| stackoverflow | {
"language": "en",
"length": 136,
"provenance": "stackexchange_0000F.jsonl.gz:881851",
"question_score": "3",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44594826"
} |
669a750fdbe3312257115754b01db48196564b81 | Stackoverflow Stackexchange
Q: How can I do an UPDATE statement with JOIN in JPQL (Spring JPA Queries)? This is an extension of this question Update Statement with JOIN in SQL but I am trying to use Spring Data JPQL.
I am trying to use Update along with JOIN in JPQL as follows
@Modifying
@Query("UPDATE TotalValue tv JOIN LineItems li WHERE li.totalValue.totalValueId=:totalValuedId SET tv.totalAmount =sum(li.itemTotalValue) ")
void calculateTotalAmount(@Param("totalValuedId") Long totalValuedId);
However, i get an error as follows
org.hibernate.hql.internal.ast.QuerySyntaxException: expecting "set", found 'JOIN'
Is UPDATE and JOIN not possible in JPQL ? What is the alternative. Thanks
A: The JPQL syntax seems to indicate that a JOIN is actually not possible in an UPDATE statement.
| Q: How can I do an UPDATE statement with JOIN in JPQL (Spring JPA Queries)? This is an extension of this question Update Statement with JOIN in SQL but I am trying to use Spring Data JPQL.
I am trying to use Update along with JOIN in JPQL as follows
@Modifying
@Query("UPDATE TotalValue tv JOIN LineItems li WHERE li.totalValue.totalValueId=:totalValuedId SET tv.totalAmount =sum(li.itemTotalValue) ")
void calculateTotalAmount(@Param("totalValuedId") Long totalValuedId);
However, i get an error as follows
org.hibernate.hql.internal.ast.QuerySyntaxException: expecting "set", found 'JOIN'
Is UPDATE and JOIN not possible in JPQL ? What is the alternative. Thanks
A: The JPQL syntax seems to indicate that a JOIN is actually not possible in an UPDATE statement.
| stackoverflow | {
"language": "en",
"length": 111,
"provenance": "stackexchange_0000F.jsonl.gz:881898",
"question_score": "3",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44594983"
} |
dff66cf0383300e78632fc31f26753e1d787b0c0 | Stackoverflow Stackexchange
Q: .NET Core: Remove null fields from API JSON response On a global level in .NET Core 1.0 (all API responses), how can I configure Startup.cs so that null fields are removed/ignored in JSON responses?
Using Newtonsoft.Json, you can apply the following attribute to a property, but I'd like to avoid having to add it to every single one:
[JsonProperty(NullValueHandling = NullValueHandling.Ignore)]
public string FieldName { get; set; }
[JsonProperty(NullValueHandling = NullValueHandling.Ignore)]
public string OtherName { get; set; }
A: In net 5, it's actually DefaultIgnoreCondition:
public void ConfigureServices(IServiceCollection services)
{
services.AddControllers()
.AddJsonOptions(options =>
{
options.JsonSerializerOptions.DefaultIgnoreCondition =
System.Text.Json.Serialization.JsonIgnoreCondition.WhenWritingNull;
});
}
This will prevent both serializazion and deserialization of any null value without needing any extra attribute on properties.
| Q: .NET Core: Remove null fields from API JSON response On a global level in .NET Core 1.0 (all API responses), how can I configure Startup.cs so that null fields are removed/ignored in JSON responses?
Using Newtonsoft.Json, you can apply the following attribute to a property, but I'd like to avoid having to add it to every single one:
[JsonProperty(NullValueHandling = NullValueHandling.Ignore)]
public string FieldName { get; set; }
[JsonProperty(NullValueHandling = NullValueHandling.Ignore)]
public string OtherName { get; set; }
A: In net 5, it's actually DefaultIgnoreCondition:
public void ConfigureServices(IServiceCollection services)
{
services.AddControllers()
.AddJsonOptions(options =>
{
options.JsonSerializerOptions.DefaultIgnoreCondition =
System.Text.Json.Serialization.JsonIgnoreCondition.WhenWritingNull;
});
}
This will prevent both serializazion and deserialization of any null value without needing any extra attribute on properties.
A: .Net core 6 with Minimal API:
using Microsoft.AspNetCore.Http.Json;
builder.Services.Configure<JsonOptions>(options =>
options.SerializerOptions.DefaultIgnoreCondition
= JsonIgnoreCondition.WhenWritingDefault | JsonIgnoreCondition.WhenWritingNull);
A: If you would like to apply this for specific properties and only use System.Text.Json then you can decorate properties like this
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public string Market { get; set; }
A: This can also be done per controller in case you don't want to modify the global behavior:
public IActionResult GetSomething()
{
var myObject = GetMyObject();
return new JsonResult(myObject, new JsonSerializerSettings()
{
NullValueHandling = NullValueHandling.Ignore
});
};
A: The following works for .NET Core 3.0, in Startup.cs > ConfigureServices():
services.AddMvc()
.AddJsonOptions(options =>
{
options.JsonSerializerOptions.IgnoreNullValues = true;
});
A: In Asp.Net Core you can also do it in the action method, by returning
return new JsonResult(result, new JsonSerializerOptions
{
IgnoreNullValues = true,
});
A: In .Net 5 and greater, if you are using AddNewtonsoftJson instead of AddJsonOptions, the setting is as following
services.AddMvc(options =>
{
//any other settings
})
.AddNewtonsoftJson(options =>
{
options.SerializerSettings.NullValueHandling = NullValueHandling.Ignore;
});
A: .NET Core 1.0
In Startup.cs, you can attach JsonOptions to the service collection and set various configurations, including removing null values, there:
public void ConfigureServices(IServiceCollection services)
{
services.AddMvc()
.AddJsonOptions(options => {
options.SerializerSettings
.NullValueHandling = NullValueHandling.Ignore;
});
}
.NET Core 3.1
Instead of this line:
options.SerializerSettings.NullValueHandling = NullValueHandling.Ignore;
Use:
options.JsonSerializerOptions.IgnoreNullValues = true;
.NET 5.0
Instead of both variants above, use:
options.JsonSerializerOptions.DefaultIgnoreCondition
= JsonIgnoreCondition.WhenWritingNull;
The variant from .NET Core 3.1 still works, but it is marked as NonBrowsable (so you never get the IntelliSense hint about this parameter), so it is very likely that it is going to be obsoleted at some point.
A: I found that for dotnet core 3 this solves it -
services.AddControllers().AddJsonOptions(options => {
options.JsonSerializerOptions.IgnoreNullValues = true;
});
A: I used the below in my .net core v3.1 MVC api.
services.AddMvc().AddJsonOptions(options =>
{
options.JsonSerializerOptions.DefaultIgnoreCondition = System.Text.Json.Serialization.JsonIgnoreCondition.WhenWritingNull;
});
A: One more way in .Net 6, for specific ObjectResult:
public class IdentityErrorResult : BadRequestObjectResult
{
public IdentityErrorResult([ActionResultObjectValue] object? error) : base(error)
{
Formatters.Add(new SystemTextJsonOutputFormatter(new JsonSerializerOptions
{
DefaultIgnoreCondition = JsonIgnoreCondition.WhenWritingNull
}));
}
}
in Controller:
public IdentityErrorResult IdentityError(ErrorResponseObject value)
=> new IdentityErrorResult(value);
A: If you are using .NET 6 and want to get rid of the null values in your REST response, on Program.cs just add the following lines:
builder.Services.AddControllers()
.AddJsonOptions(options =>
{
options.JsonSerializerOptions.DefaultIgnoreCondition = JsonIgnoreCondition.WhenWritingNull;
});
A: The code below work for me in .Net core 2.2
services.AddMvc().SetCompatibilityVersion(CompatibilityVersion.Version_2_2);
| stackoverflow | {
"language": "en",
"length": 510,
"provenance": "stackexchange_0000F.jsonl.gz:881910",
"question_score": "81",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44595027"
} |
8e461852d21b288fb23a14d91249518b45e241e4 | Stackoverflow Stackexchange
Q: Create transparent image in opencv python I am trying to make a transparent image and draw on it, and after I will addWeighted over the base image.
How can I initialize fully transparent image with width and hight in openCV python?
EDIT: I want to make a effect like in Photoshop, having stack of the layers, all stacked layers are initially transparent and drawing is performed on fully transparent layer. On the end I will merge all layers to get final image
A: To convert an image's white parts to transparent:
import cv2
import numpy as np
img = cv2.imread("image.png", cv2.IMREAD_UNCHANGED)
img[np.where(np.all(img[..., :3] == 255, -1))] = 0
cv2.imwrite("transparent.png", img)
| Q: Create transparent image in opencv python I am trying to make a transparent image and draw on it, and after I will addWeighted over the base image.
How can I initialize fully transparent image with width and hight in openCV python?
EDIT: I want to make a effect like in Photoshop, having stack of the layers, all stacked layers are initially transparent and drawing is performed on fully transparent layer. On the end I will merge all layers to get final image
A: To convert an image's white parts to transparent:
import cv2
import numpy as np
img = cv2.imread("image.png", cv2.IMREAD_UNCHANGED)
img[np.where(np.all(img[..., :3] == 255, -1))] = 0
cv2.imwrite("transparent.png", img)
A: For creating a transparent image you need a 4 channel matrix, 3 of which would represent RGB colors and the 4th channel would represent Alpha channel, To create a transparent image, you can ignore the RGB values and directly set the alpha channel to be 0. In Python OpenCV uses numpy to manipulate matrices, so a transparent image can be created as
import numpy as np
import cv2
img_height, img_width = 300, 300
n_channels = 4
transparent_img = np.zeros((img_height, img_width, n_channels), dtype=np.uint8)
# Save the image for visualization
cv2.imwrite("./transparent_img.png", transparent_img)
A: If you want to draw on several "layers" and then stack the drawings together, then how about this:
import cv2
import numpy as np
#create 3 separate BGRA images as our "layers"
layer1 = np.zeros((500, 500, 4))
layer2 = np.zeros((500, 500, 4))
layer3 = np.zeros((500, 500, 4))
#draw a red circle on the first "layer",
#a green rectangle on the second "layer",
#a blue line on the third "layer"
red_color = (0, 0, 255, 255)
green_color = (0, 255, 0, 255)
blue_color = (255, 0, 0, 255)
cv2.circle(layer1, (255, 255), 100, red_color, 5)
cv2.rectangle(layer2, (175, 175), (335, 335), green_color, 5)
cv2.line(layer3, (170, 170), (340, 340), blue_color, 5)
res = layer1[:] #copy the first layer into the resulting image
#copy only the pixels we were drawing on from the 2nd and 3rd layers
#(if you don't do this, the black background will also be copied)
cnd = layer2[:, :, 3] > 0
res[cnd] = layer2[cnd]
cnd = layer3[:, :, 3] > 0
res[cnd] = layer3[cnd]
cv2.imwrite("out.png", res)
| stackoverflow | {
"language": "en",
"length": 369,
"provenance": "stackexchange_0000F.jsonl.gz:881948",
"question_score": "8",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44595160"
} |
df24a038bda3cf40be40b541eeefc061d13730f9 | Stackoverflow Stackexchange
Q: Python __repr__ for all member variables Implementing __repr__ for a class Foo with member variables x and y, is there a way to automatically populate the string? Example that does not work:
class Foo(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "Foo({})".format(**self.__dict__)
>>> foo = Foo(42, 66)
>>> print(foo)
IndexError: tuple index out of range
And another:
from pprint import pprint
class Foo(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "Foo({})".format(pprint(self.__dict__))
>>> foo = Foo(42, 66)
>>> print(foo)
{'x': 42, 'y': 66}
Foo(None)
Yes I could define the method as
def __repr__(self):
return "Foo({x={}, y={}})".format(self.x, self.x)
but this gets tedious when there are many member variables.
A: I think you want something like this:
def __repr__(self):
return "Foo({!r})".format(self.__dict__)
This will add repr(self.__dict__) in the string, using !r in a format specifier tells format() to call the item's __repr__().
See the "Conversion field" here: https://docs.python.org/3/library/string.html#format-string-syntax
Based on Ned Batchelder's answer, you can replace the line above by
return "{}({!r})".format(self.__class__.__name__, self.__dict__)
for a more generic approach.
| Q: Python __repr__ for all member variables Implementing __repr__ for a class Foo with member variables x and y, is there a way to automatically populate the string? Example that does not work:
class Foo(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "Foo({})".format(**self.__dict__)
>>> foo = Foo(42, 66)
>>> print(foo)
IndexError: tuple index out of range
And another:
from pprint import pprint
class Foo(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "Foo({})".format(pprint(self.__dict__))
>>> foo = Foo(42, 66)
>>> print(foo)
{'x': 42, 'y': 66}
Foo(None)
Yes I could define the method as
def __repr__(self):
return "Foo({x={}, y={}})".format(self.x, self.x)
but this gets tedious when there are many member variables.
A: I think you want something like this:
def __repr__(self):
return "Foo({!r})".format(self.__dict__)
This will add repr(self.__dict__) in the string, using !r in a format specifier tells format() to call the item's __repr__().
See the "Conversion field" here: https://docs.python.org/3/library/string.html#format-string-syntax
Based on Ned Batchelder's answer, you can replace the line above by
return "{}({!r})".format(self.__class__.__name__, self.__dict__)
for a more generic approach.
A: I use this as a mixin when I want something like that:
class SimpleRepr(object):
"""A mixin implementing a simple __repr__."""
def __repr__(self):
return "<{klass} @{id:x} {attrs}>".format(
klass=self.__class__.__name__,
id=id(self) & 0xFFFFFF,
attrs=" ".join("{}={!r}".format(k, v) for k, v in self.__dict__.items()),
)
It gives the class name, the (shortened) id, and all of the attributes.
A: Nice example!
for pretty output better to
place simple return "\n{!r}".format(self.__dict__)
and in root full print return "Class name: '{}' \n{!r}".format(self.__class__.__name__, self.__dict__)
| stackoverflow | {
"language": "en",
"length": 253,
"provenance": "stackexchange_0000F.jsonl.gz:881971",
"question_score": "11",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44595218"
} |
7ec87c9f35af8dd576c7fdf4a69b4108b47d9f79 | Stackoverflow Stackexchange
Q: MySQL - ER_TRUNCATED_WRONG_VALUE: Incorrect datetime value, on a Timestamp column I have a MYSQL column that is defined as TIMESTAMP.
Whenever I create a row with javascript new Date() the value is stored ok without issues.
However when I want to update the value, sending the same new Date() on that column, i get an error
Error: ER_TRUNCATED_WRONG_VALUE: Incorrect datetime value: '2017-06-16T17:35:34.377Z' for column 'last_access'.
I can manage the whole thing as Datetime and format the date accordingly to solve the error, however I want to understand why the error is happening.
A: The Date must be formatted like "YYYY-MM-DD HH:MM:SS" for MariaDB.
If you want to insert a Date with JS, try to make your own string.
const newDate: Date = new Date();
const dd = newDate.getDate();
const number = newDate.getMonth() + 1; // month start at 0, we have to add 1.
const yyyy = newDate.getUTCFullYear();
const insertString = `${yyyy}-${mm}-${dd}`;
| Q: MySQL - ER_TRUNCATED_WRONG_VALUE: Incorrect datetime value, on a Timestamp column I have a MYSQL column that is defined as TIMESTAMP.
Whenever I create a row with javascript new Date() the value is stored ok without issues.
However when I want to update the value, sending the same new Date() on that column, i get an error
Error: ER_TRUNCATED_WRONG_VALUE: Incorrect datetime value: '2017-06-16T17:35:34.377Z' for column 'last_access'.
I can manage the whole thing as Datetime and format the date accordingly to solve the error, however I want to understand why the error is happening.
A: The Date must be formatted like "YYYY-MM-DD HH:MM:SS" for MariaDB.
If you want to insert a Date with JS, try to make your own string.
const newDate: Date = new Date();
const dd = newDate.getDate();
const number = newDate.getMonth() + 1; // month start at 0, we have to add 1.
const yyyy = newDate.getUTCFullYear();
const insertString = `${yyyy}-${mm}-${dd}`;
| stackoverflow | {
"language": "en",
"length": 153,
"provenance": "stackexchange_0000F.jsonl.gz:881984",
"question_score": "4",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44595247"
} |
488a08ba6b62099c378729a532a4b3dc120ed213 | Stackoverflow Stackexchange
Q: ValueError at /admin/zinnia/entry/add/ in Zinnia? When adding an entry using the admin site in Zinnia app in Django I get the following error?
ValueError at /admin/zinnia/entry/add/
too many values to unpack (expected 2)
Does anybody know how to solve this?
A: There is an issue with categories and how it is displayed when you add or edit entry if you use Django 1.11. Fastest workaround is to downgrade Django to 1.10.
@weather api suggested to look into dependencies, but setup.py doesn't even mention Django package by itself, so by default Django 1.11 will be installed.
| Q: ValueError at /admin/zinnia/entry/add/ in Zinnia? When adding an entry using the admin site in Zinnia app in Django I get the following error?
ValueError at /admin/zinnia/entry/add/
too many values to unpack (expected 2)
Does anybody know how to solve this?
A: There is an issue with categories and how it is displayed when you add or edit entry if you use Django 1.11. Fastest workaround is to downgrade Django to 1.10.
@weather api suggested to look into dependencies, but setup.py doesn't even mention Django package by itself, so by default Django 1.11 will be installed.
A: Make sure your using correct versions of dependencies for Zinnia.
| stackoverflow | {
"language": "en",
"length": 107,
"provenance": "stackexchange_0000F.jsonl.gz:882014",
"question_score": "3",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44595359"
} |
a39e67f44d0d24ee7c9d13bafa87a76e791e34fd | Stackoverflow Stackexchange
Q: What is the difference between Application.Windows() and Excel.Windows() in Excel VBA? Basically, when should I use Application.Windows() and when should I use Excel.Windows()? Any code example would be specifically preferable.
A: Excel.Windows() lets your write to it [1], whereas Application.Windows() is read-only [2] according to the documentation. [1], [2].
Other than the read-only distinction for the Application version, they are the same. So, if you're just reading properties, use the Application version, if you need to change something, use the Excel version.
| Q: What is the difference between Application.Windows() and Excel.Windows() in Excel VBA? Basically, when should I use Application.Windows() and when should I use Excel.Windows()? Any code example would be specifically preferable.
A: Excel.Windows() lets your write to it [1], whereas Application.Windows() is read-only [2] according to the documentation. [1], [2].
Other than the read-only distinction for the Application version, they are the same. So, if you're just reading properties, use the Application version, if you need to change something, use the Excel version.
A: The Excel library has classes named Windows and Application. Then Excel.Windows is the fully qualified name for the class Windows. Class Application has property named Windows which returns collection of the windows in all workbooks.
So you can use Excel.Windows when you want to refer to a class Windows and you will use Application.Windows property when you want to refer to windows objects. HTH.
In Object Browser we can see it:
There is problem with the naming. The name of class Windows and the property Windows are the same. All the following code examples refer to the same collection of window objects using the Windows property:
Dim eaw As Excel.Windows
Dim aw As Excel.Windows
Dim ew As Excel.Windows
Set eaw = Excel.Application.Windows
Set aw = Application.Windows
Set ew = Excel.Windows
The code Set aw = Application.Windows and Set ew = Excel.Windows are the same, because many of the properties and methods that return the most common objects can be used without the Application object qualifier. Properties and methods that can be used without the Application object qualifier are considered global and that is the case for Windows as well.
In summary:
*
*When declaring a variable of type Excel.Windows you will not make any mistake when you take the fully qualified name e.g. Dim wnds As Excel.Windows.
*When referring to collection of window objects it is up to you which way you choose, all the following are equivalent (return the same collection) Set eaw = Excel.Application.Windows, Set aw = Application.Windows, Set ew = Excel.Windows. Note: According to Object Browser of my Excel 2007 this collection is read-only.
| stackoverflow | {
"language": "en",
"length": 351,
"provenance": "stackexchange_0000F.jsonl.gz:882042",
"question_score": "3",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44595434"
} |
0c167cfaaaec5c4492489537a239bdaf3a6defee | Stackoverflow Stackexchange
Q: How can I change the scrollbar separator color in vscode?
Need to change this color, its not one of the options provided for scrollbar theming. Is it hiden someplace else?
Scroll Bar Control
scrollbar.shadow: Scroll Bar shadow to indicate that the view is scrolled.
scrollbarSlider.activeBackground: Slider background color when active.
scrollbarSlider.background: Slider background color.
scrollbarSlider.hoverBackground: Slider background color when hovering.
A: settings.json:
"editor.overviewRulerBorder": false
| Q: How can I change the scrollbar separator color in vscode?
Need to change this color, its not one of the options provided for scrollbar theming. Is it hiden someplace else?
Scroll Bar Control
scrollbar.shadow: Scroll Bar shadow to indicate that the view is scrolled.
scrollbarSlider.activeBackground: Slider background color when active.
scrollbarSlider.background: Slider background color.
scrollbarSlider.hoverBackground: Slider background color when hovering.
A: settings.json:
"editor.overviewRulerBorder": false
A: To change the colour of the separator, you can add this to settings.json:
"workbench.colorCustomizations": {
"editorOverviewRuler.border": "#191C22"
}
| stackoverflow | {
"language": "en",
"length": 84,
"provenance": "stackexchange_0000F.jsonl.gz:882045",
"question_score": "3",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44595442"
} |
4936a73bdad67dc3f035a110815ce28468ddbcc6 | Stackoverflow Stackexchange
Q: In React Native, how do you make an interactive tap-and-hold modal like Instagram? Instagram has a really cool interaction where you can tap-and-hold, aka longPress, on an image and an interactive pop-up appears. Once it appears you can drag your finger to one of the action buttons and removing your finger applies the action and closes the pop-up.
Here it is in action.
And here's a link to a video if you need a closer look.
In my app I've got the tap-and-hold popup working, but the finger drag continues to control the underlying View.
How do you get the finger interaction to transfer to the pop-up?
How do you get the buttons responding to finger drag-over?
A: This is something you can achieve with PanResponder. Using Pan Responder you are basically creating your own touchable components. Click here for the docs.
Also, you would need the Animated API for animating the transitions.
| Q: In React Native, how do you make an interactive tap-and-hold modal like Instagram? Instagram has a really cool interaction where you can tap-and-hold, aka longPress, on an image and an interactive pop-up appears. Once it appears you can drag your finger to one of the action buttons and removing your finger applies the action and closes the pop-up.
Here it is in action.
And here's a link to a video if you need a closer look.
In my app I've got the tap-and-hold popup working, but the finger drag continues to control the underlying View.
How do you get the finger interaction to transfer to the pop-up?
How do you get the buttons responding to finger drag-over?
A: This is something you can achieve with PanResponder. Using Pan Responder you are basically creating your own touchable components. Click here for the docs.
Also, you would need the Animated API for animating the transitions.
| stackoverflow | {
"language": "en",
"length": 154,
"provenance": "stackexchange_0000F.jsonl.gz:882055",
"question_score": "7",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44595477"
} |
154cc1bfeb962f253df7f019a7d7fa23ae90c40e | Stackoverflow Stackexchange
Q: How do I get text onto a newline in bootstrap tooltips? I have my HTML like this:
<i class="fa fa-cubes" data-toggle="tooltip" data-html="true" data-placement="top" title="{% for product in products.category.all %} {{product.description}} x {{product.quantity}} 
{% endfor %}"></i></td>
So, what this does is for every product in the category, it'll display the name of the product and the quantity of the product on hover. For example:
Product 1 x 25
So, what I want to be able to do is, the next Product, should be in a new line like this:
Product 1 x 25
Product 2 X 43
Instead, I'm getting this: Product 1 X 25 Product 2 X43
I looked at numerous other answers on Stackoverflow and I tried these things:
*
*Adding data-html=true and using a <br> to separate the sentences
*Using 
*Adding the following CSS:
.tooltip-inner {
white-space:pre-wrap;
min-width: 100px;
}
But none of these methods worked. What am I missing?
A: In Bootstrap 5.0 and later, to enable HTML inside the tooltip, use:
data-bs-html="true"
and not (make note of the "bs"):
data-html="true"
| Q: How do I get text onto a newline in bootstrap tooltips? I have my HTML like this:
<i class="fa fa-cubes" data-toggle="tooltip" data-html="true" data-placement="top" title="{% for product in products.category.all %} {{product.description}} x {{product.quantity}} 
{% endfor %}"></i></td>
So, what this does is for every product in the category, it'll display the name of the product and the quantity of the product on hover. For example:
Product 1 x 25
So, what I want to be able to do is, the next Product, should be in a new line like this:
Product 1 x 25
Product 2 X 43
Instead, I'm getting this: Product 1 X 25 Product 2 X43
I looked at numerous other answers on Stackoverflow and I tried these things:
*
*Adding data-html=true and using a <br> to separate the sentences
*Using 
*Adding the following CSS:
.tooltip-inner {
white-space:pre-wrap;
min-width: 100px;
}
But none of these methods worked. What am I missing?
A: In Bootstrap 5.0 and later, to enable HTML inside the tooltip, use:
data-bs-html="true"
and not (make note of the "bs"):
data-html="true"
A: Use 
 as the sequence to insert a new line. This would help on all the browsers - Chrome, Firefox, IE with some optimizations of their own.
A: I ran into this problem and resolved it by using a line break (<br/>) and enclosing the text in a <pre> tag. Make sure that the data-html attribute is set to true.
My setup is below. I was dynamically updating the title.
My div and the initial title:
<div id="example_div" data-toggle="tooltip" title="Loading..." data-html="true">
A function I called on an onchange event:
function populateToolTips(outcomeData)
{
let Txt = `<pre>Line 1 value: ${outcome.first}<br/>Line 2 value: ${outcome.second}</pre>`;
$("#example_div").attr('title', Txt).tooltip('fixTitle');
}
The end result looks like this when hovering over the div:
Line 1 value: 123.45
Line 2 value: 678.90
| stackoverflow | {
"language": "en",
"length": 301,
"provenance": "stackexchange_0000F.jsonl.gz:882068",
"question_score": "3",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44595511"
} |
4aa632dc9c5907ff7e10a0f63599c53389acde0b | Stackoverflow Stackexchange
Q: Smart cast to 'Type' is impossible, because 'variable' is a mutable property that could have been changed by this time And the Kotlin newbie asks, "why won't the following code compile?":
var left: Node? = null
fun show() {
if (left != null) {
queue.add(left) // ERROR HERE
}
}
Smart cast to 'Node' is impossible, because 'left' is a mutable
property that could have been changed by this time
I get that left is mutable variable, but I'm explicitly checking left != null and left is of type Node so why can't it be smart-casted to that type?
How can I fix this elegantly?
A: 1) Also you can use lateinit If you are sure you will do your initialization later on onCreate() or elsewhere.
Use this
lateinit var left: Node
Instead of this
var left: Node? = null
2) And there is other way that use !! end of variable when you use it like this
queue.add(left!!) // add !!
| Q: Smart cast to 'Type' is impossible, because 'variable' is a mutable property that could have been changed by this time And the Kotlin newbie asks, "why won't the following code compile?":
var left: Node? = null
fun show() {
if (left != null) {
queue.add(left) // ERROR HERE
}
}
Smart cast to 'Node' is impossible, because 'left' is a mutable
property that could have been changed by this time
I get that left is mutable variable, but I'm explicitly checking left != null and left is of type Node so why can't it be smart-casted to that type?
How can I fix this elegantly?
A: 1) Also you can use lateinit If you are sure you will do your initialization later on onCreate() or elsewhere.
Use this
lateinit var left: Node
Instead of this
var left: Node? = null
2) And there is other way that use !! end of variable when you use it like this
queue.add(left!!) // add !!
A: The practical reason why this doesn't work is not related to threads. The point is that node.left is effectively translated into node.getLeft().
This property getter might be defined as:
val left get() = if (Math.random() < 0.5) null else leftPtr
Therefore two calls might not return the same result.
A: Change var left: Node? = null to lateinit var left: Node. Problem solved.
A: Between execution of left != null and queue.add(left) another thread could have changed the value of left to null.
To work around this you have several options. Here are some:
*
*Use a local variable with smart cast:
val node = left
if (node != null) {
queue.add(node)
}
*Use a safe call such as one of the following:
left?.let { node -> queue.add(node) }
left?.let { queue.add(it) }
left?.let(queue::add)
*Use the Elvis operator with return to return early from the enclosing function:
queue.add(left ?: return)
Note that break and continue can be used similarly for checks within loops.
A: There is a fourth option in addition to the ones in mfulton26's answer.
By using the ?. operator it is possible to call methods as well as fields without dealing with let or using local variables.
Some code for context:
var factory: ServerSocketFactory = SSLServerSocketFactory.getDefault();
socket = factory.createServerSocket(port)
socket.close()//smartcast impossible
socket?.close()//Smartcast possible. And works when called
It works with methods, fields and all the other things I tried to get it to work.
So in order to solve the issue, instead of having to use manual casts or using local variables, you can use ?. to call the methods.
For reference, this was tested in Kotlin 1.1.4-3, but also tested in 1.1.51 and 1.1.60. There's no guarantee it works on other versions, it could be a new feature.
Using the ?. operator can't be used in your case since it's a passed variable that's the problem. The Elvis operator can be used as an alternative, and it's probably the one that requires the least amount of code. Instead of using continue though, return could also be used.
Using manual casting could also be an option, but this isn't null safe:
queue.add(left as Node);
Meaning if left has changed on a different thread, the program will crash.
A: Your most elegant solution must be:
var left: Node? = null
fun show() {
left?.also {
queue.add( it )
}
}
Then you don't have to define a new and unnecessary local variable, and you don't have any new assertions or casts (which are not DRY). Other scope functions could also work so choose your favourite.
A: Do this:
var left: Node? = null
fun show() {
val left = left
if (left != null) {
queue.add(left) // safe cast succeeds
}
}
Which seems to be the first option provided by the accepted answer, but that's what you're looking for.
A: For there to be a Smart Cast of the properties, the data type of the property must be the class that contains the method or behavior that you want to access and NOT that the property is of the type of the super class.
e.g on Android
Be:
class MyVM : ViewModel() {
fun onClick() {}
}
Solution:
From: private lateinit var viewModel: ViewModel
To: private lateinit var viewModel: MyVM
Usage:
viewModel = ViewModelProvider(this)[MyVM::class.java]
viewModel.onClick {}
GL
A: Try using the not-null assertion operator...
queue.add(left!!)
A: How I would write it:
var left: Node? = null
fun show() {
val left = left ?: return
queue.add(left) // no error because we return if it is null
}
A: Perform as below :-
var left: Node? = null
Use a null safe call
left?.let { node -> queue.add(node) } // The most preferred one
A: For future viewers, especially Kotlin newcomers with js/ts background
Since this is the most popular question about Smart cast to 'Type' is impossible problems, I would like to elaborate on what did brought me here.
My case was to simply transform nullable var instance into the another class instance but I was getting errors like this one:
Smart cast to 'TestClass' is impossible, because 'testObject' is a mutable property that could have been changed by this time
data class TestClass(
val x: String,
val y: String
)
data class FooClass(
val x: String,
val y: String
)
var testObject: Testclass? = Testclass("x", "y")
val transformed = testObject?.let { FooClass(x = testObject.x, y = testObject.y) }
and I didn't really understand what was wrong, since I've used safe call operator and everything was looking fine!
But then I realized, Kotlin in comparison to JS is multi-threaded, so in theory a mutable variable could be changed, and then I realized my mistake - I used testObject variable directly instead of using it
val transformed = testObject?.let { FooClass(x = testObject.x, y = testObject.y) }
so the correct solution is
val transformed = testObject?.let { FooClass(x = it.x, y = it.y) }
A: This worked for me:
private lateinit var varName: String
| stackoverflow | {
"language": "en",
"length": 990,
"provenance": "stackexchange_0000F.jsonl.gz:882078",
"question_score": "394",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44595529"
} |
c1ed2f8fce4460c4a27f509d759dfec74c06f800 | Stackoverflow Stackexchange
Q: Python for Nuke: Select node before current How can I select a node via python before the one currently selected?
For example, I want to add a "Clamp" node exactly before all "Write" ones.
A: This code snippet allows you to define a node upstream existing Write node.
import nuke
iNode = nuke.toNode('Write1')
def upstream(iNode, maxDeep=-1, found=None):
if found is None:
found = set()
if maxDeep != 0:
willFind = set(z for z in iNode.dependencies() if z not in found)
found.update(willFind)
for depth in willFind:
upstream(depth, maxDeep+1, found)
return found
Then call the method upstream(iNode).
And a script's snippet you've sent me earlier should look like this:
allWrites = nuke.allNodes('Grade')
depNodes = nuke.selectedNode().dependencies()
for depNode in depNodes:
depNode.setSelected(True)
queueElem = len(allWrites)
trigger = -1
for i in range(1,queueElem+1):
trigger += 1
for write in allWrites[(0+trigger):(1+trigger)]:
write.setSelected(True)
nuke.createNode("Clamp")
for all in nuke.allNodes():
all.setSelected(False)
| Q: Python for Nuke: Select node before current How can I select a node via python before the one currently selected?
For example, I want to add a "Clamp" node exactly before all "Write" ones.
A: This code snippet allows you to define a node upstream existing Write node.
import nuke
iNode = nuke.toNode('Write1')
def upstream(iNode, maxDeep=-1, found=None):
if found is None:
found = set()
if maxDeep != 0:
willFind = set(z for z in iNode.dependencies() if z not in found)
found.update(willFind)
for depth in willFind:
upstream(depth, maxDeep+1, found)
return found
Then call the method upstream(iNode).
And a script's snippet you've sent me earlier should look like this:
allWrites = nuke.allNodes('Grade')
depNodes = nuke.selectedNode().dependencies()
for depNode in depNodes:
depNode.setSelected(True)
queueElem = len(allWrites)
trigger = -1
for i in range(1,queueElem+1):
trigger += 1
for write in allWrites[(0+trigger):(1+trigger)]:
write.setSelected(True)
nuke.createNode("Clamp")
for all in nuke.allNodes():
all.setSelected(False)
| stackoverflow | {
"language": "en",
"length": 143,
"provenance": "stackexchange_0000F.jsonl.gz:882082",
"question_score": "3",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44595555"
} |
5505c01cd24087302a2588298b4c86e9753028b1 | Stackoverflow Stackexchange
Q: Is there a way for a Python script to "recognize" which EC2 instance it is running on? Let's say that my script is running on an EC2 instance named ec2-test1 and it communicates with an S3 bucket named: s3-bucket-test1, but when the script is ran on ec2-test2 it's able to identify the EC2 instance it is currently running on and change the directory to reference s3-bucket-test2. Is there a way to do that? I know that for internal paths you can use os.path.dirname(os.path.realpath(__file__)) but was wondering if there is a way to do something like that for EC2 instance name in Python?
A: Use the following Boto3 to get the current instance name. Warning: No exception handling is included.
import boto3
import os
def get_inst_name():
# Get Instance ID from EC2 Instance Metadata
inst_id = os.popen("curl -s http://169.254.169.254/latest/meta-data/instance-id").read()
# Get EC2 instance object
ec2 = boto3.resource('ec2')
inst = ec2.Instance(inst_id)
# Obtain tags associated with the EC2 instance object
for tags in inst.tags:
if tags["Key"] == 'Name':
#print tags["Value"]
return tags["Value"]
*
*Get the instance id from metadata server
*Use the instance id to query Boto3 resource
| Q: Is there a way for a Python script to "recognize" which EC2 instance it is running on? Let's say that my script is running on an EC2 instance named ec2-test1 and it communicates with an S3 bucket named: s3-bucket-test1, but when the script is ran on ec2-test2 it's able to identify the EC2 instance it is currently running on and change the directory to reference s3-bucket-test2. Is there a way to do that? I know that for internal paths you can use os.path.dirname(os.path.realpath(__file__)) but was wondering if there is a way to do something like that for EC2 instance name in Python?
A: Use the following Boto3 to get the current instance name. Warning: No exception handling is included.
import boto3
import os
def get_inst_name():
# Get Instance ID from EC2 Instance Metadata
inst_id = os.popen("curl -s http://169.254.169.254/latest/meta-data/instance-id").read()
# Get EC2 instance object
ec2 = boto3.resource('ec2')
inst = ec2.Instance(inst_id)
# Obtain tags associated with the EC2 instance object
for tags in inst.tags:
if tags["Key"] == 'Name':
#print tags["Value"]
return tags["Value"]
*
*Get the instance id from metadata server
*Use the instance id to query Boto3 resource
| stackoverflow | {
"language": "en",
"length": 187,
"provenance": "stackexchange_0000F.jsonl.gz:882086",
"question_score": "3",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44595564"
} |
a5ab7b9363d746af9b42a5ea7d97dee7a59ccefa | Stackoverflow Stackexchange
Q: How to run script in v-html I get embed codes (Instagram, Twitter and so on) from database. How to bind them to a vue component? Is there any way for executing script tag in v-html?
A: Short answer: You can't. Your browsers blocks the execution of script tags once the dom has loaded.
Long answer: You could try matching the src attribute of the script and fetch + evaluate it, or match the inner content of the div and evaluate it, but this is not recommended.
| Q: How to run script in v-html I get embed codes (Instagram, Twitter and so on) from database. How to bind them to a vue component? Is there any way for executing script tag in v-html?
A: Short answer: You can't. Your browsers blocks the execution of script tags once the dom has loaded.
Long answer: You could try matching the src attribute of the script and fetch + evaluate it, or match the inner content of the div and evaluate it, but this is not recommended.
A: For the purpose of the title, the browser will block you.
However, for the purpose of the question, you can easily predict/list the embed codes you want to support, so this is something you could do:
if (window.twttr) {
// The script is already loaded, so just reload the embeds
window.twttr.widgets.load();
} else if (!document.getElementByID('twttr-widgets')) {
const embed = document.createElement('script');
embed.id = 'twttr-widgets'
embed.src = 'https://platform.twitter.com/widgets.js';
document.body.appendChild(embed);
// And when the script loads, the embeds will load too
}
This can easily be replicated for most embed libraries that allow you to "reload" all the widgets on the page:
*
*Facebook FB.XFBML.parse
*Twitter twttr.widgets.load
*Instagram instgrm.Embeds.process
| stackoverflow | {
"language": "en",
"length": 194,
"provenance": "stackexchange_0000F.jsonl.gz:882113",
"question_score": "8",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44595657"
} |
51a8efeda953736eaad584780f57cbe1df94f21a | Stackoverflow Stackexchange
Q: Chai test array of objects to "contain something like" an object submatch Ok. I've tried to read other questions here but still didn't find a straightforward answer.
How can I assert a partial object match in an array using chai?
Something like the following:
var expect = require('chai').expect;
var data = [ { name: 'test', value: 'bananas' } ];
expect(data).to.be.an('array').that.contains.somethig.like({name: 'test'});
Just to clarify, my intention is to get as close to the example provided as possible.
*
*to chain after the .be.an('array') and
*to provide only the partial object as a parameter (unlike chai-subset).
I really thought that expect(data).to.be.an('array').that.deep.contains({name: 'test'}); would work, but it fails on not being a partial match and I'm kinda screwed there.
A: ES6+
Clean, functional and without dependencies, simply use a map to filter the key you want to check
something like:
const data = [ { name: 'test', value: 'bananas' } ];
expect(data.map(e=>e.name)).to.include("test");
and if you want to test multiple keys:
expect(data.map(e=>({name:e.name}))).to.include({name:"test"});
https://www.chaijs.com/api/bdd/
| Q: Chai test array of objects to "contain something like" an object submatch Ok. I've tried to read other questions here but still didn't find a straightforward answer.
How can I assert a partial object match in an array using chai?
Something like the following:
var expect = require('chai').expect;
var data = [ { name: 'test', value: 'bananas' } ];
expect(data).to.be.an('array').that.contains.somethig.like({name: 'test'});
Just to clarify, my intention is to get as close to the example provided as possible.
*
*to chain after the .be.an('array') and
*to provide only the partial object as a parameter (unlike chai-subset).
I really thought that expect(data).to.be.an('array').that.deep.contains({name: 'test'}); would work, but it fails on not being a partial match and I'm kinda screwed there.
A: ES6+
Clean, functional and without dependencies, simply use a map to filter the key you want to check
something like:
const data = [ { name: 'test', value: 'bananas' } ];
expect(data.map(e=>e.name)).to.include("test");
and if you want to test multiple keys:
expect(data.map(e=>({name:e.name}))).to.include({name:"test"});
https://www.chaijs.com/api/bdd/
A: Not sure why you dismissed chai-subset as this seems to work:
expect(data).to.be.an("array").to.containSubset([{ name: "test" }]);
A: Since chai-like@0.2.14 the following approch will work:
var chai = require('chai'),
expect = chai.expect;
chai.use(require('chai-like'));
chai.use(require('chai-things')); // Don't swap these two
expect(data).to.be.an('array').that.contains.something.like({name: 'test'});
A: A solution without third libraries or plugins:
var data = [ { name: 'test', value: 'bananas' } ];
expect(data.map(({name}) => ({name})).to.deep.include({name: 'test'});
| stackoverflow | {
"language": "en",
"length": 224,
"provenance": "stackexchange_0000F.jsonl.gz:882114",
"question_score": "5",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44595658"
} |
618fa4ad82d9df1ada8f7a42ee19e013937e28d5 | Stackoverflow Stackexchange
Q: Does Go have a case-insensitive string contains() function? I would like to be able to determine whether stringB is a case-insensitive substring of stringA. Looking through Go's strings pkg, the closest I can get is strings.Contains(strings.ToLower(stringA), strings.ToLower(stringB). Is there a less wordy alternative that I'm not seeing?
A: Another option:
package main
import "regexp"
func main() {
b := regexp.MustCompile("(?i)we").MatchString("West East")
println(b)
}
https://golang.org/pkg/regexp/syntax
| Q: Does Go have a case-insensitive string contains() function? I would like to be able to determine whether stringB is a case-insensitive substring of stringA. Looking through Go's strings pkg, the closest I can get is strings.Contains(strings.ToLower(stringA), strings.ToLower(stringB). Is there a less wordy alternative that I'm not seeing?
A: Another option:
package main
import "regexp"
func main() {
b := regexp.MustCompile("(?i)we").MatchString("West East")
println(b)
}
https://golang.org/pkg/regexp/syntax
A: If it's just the wordiness that you dislike, you could try making your code formatting cleaner, e.g.:
strings.Contains(
strings.ToLower(stringA),
strings.ToLower(stringB),
)
Or hiding it in a function in your own utils (or whatever) package:
package utils
import "strings"
func ContainsI(a string, b string) bool {
return strings.Contains(
strings.ToLower(a),
strings.ToLower(b),
)
}
A: I don't see one in the standard packages. How about this?
package main
import (
"fmt"
"strings"
)
func strcasestr(a, b string) bool {
d := len(a)
if d == 0 {
return true
}
xx := strings.ToLower(a[0:1]) + strings.ToUpper(a[0:1])
for i := 0; i <= len(b)-len(a); i++ {
i = strings.IndexAny(b, xx)
if i == -1 || i+d > len(b) {
break
}
if d == 1 {
return true
}
if strings.EqualFold(a[1:], b[i+1:i+d]) {
return true
}
}
return false
}
func main() {
examples := []struct {
a, b string
}{
{"APP", "apple pie"},
{"Read", "banana bread"},
{"ISP", "cherry crisp"},
{"ago", "dragonfruit tart"},
{"INC", "elderberry wine"},
{"M", "Feijoa jam"},
}
for i, e := range examples {
fmt.Println(i, ":", e.a, " in ", e.b, "? ", strcasestr(e.a, e.b))
}
}
A: Expanding on Zombo's answer with a benchmark comparing the use of strings.Contains(strings.ToLower(s), strings.ToLower(substr)) to the use of regexp.MustCompile("(?i)" + regexp.QuoteMeta(substr)).MatchString(s).
Code
import (
"regexp"
"strings"
"testing"
)
const checkStringLen38 = "Hello RiCHard McCliNTock. How are you?"
const checkStringLen3091 = `What is Lorem Ipsum?
Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum.
Why do we use it?
It is a long established fact that a reader will be distracted by the readable content of a page when looking at its layout. The point of using Lorem Ipsum is that it has a more-or-less normal distribution of letters, as opposed to using 'Content here, content here', making it look like readable English. Many desktop publishing packages and web page editors now use Lorem Ipsum as their default model text, and a search for 'lorem ipsum' will uncover many web sites still in their infancy. Various versions have evolved over the years, sometimes by accident, sometimes on purpose (injected humour and the like).
Where does it come from?
Contrary to popular belief, Lorem Ipsum is not simply random text. It has roots in a piece of classical Latin literature from 45 BC, making it over 2000 years old. RiCHard McCliNTock, a Latin professor at Hampden-Sydney College in Virginia, looked up one of the more obscure Latin words, consectetur, from a Lorem Ipsum passage, and going through the cites of the word in classical literature, discovered the undoubtable source. Lorem Ipsum comes from sections 1.10.32 and 1.10.33 of "de Finibus Bonorum et Malorum" (The Extremes of Good and Evil) by Cicero, written in 45 BC. This book is a treatise on the theory of ethics, very popular during the Renaissance. The first line of Lorem Ipsum, "Lorem ipsum dolor sit amet..", comes from a line in section 1.10.32.
The standard chunk of Lorem Ipsum used since the 1500s is reproduced below for those interested. Sections 1.10.32 and 1.10.33 from "de Finibus Bonorum et Malorum" by Cicero are also reproduced in their exact original form, accompanied by English versions from the 1914 translation by H. Rackham.
Where can I get some?
There are many variations of passages of Lorem Ipsum available, but the majority have suffered alteration in some form, by injected humour, or randomised words which don't look even slightly believable. If you are going to use a passage of Lorem Ipsum, you need to be sure there isn't anything embarrassing hidden in the middle of text. All the Lorem Ipsum generators on the Internet tend to repeat predefined chunks as necessary, making this the first true generator on the Internet. It uses a dictionary of over 200 Latin words, combined with a handful of model sentence structures, to generate Lorem Ipsum which looks reasonable. The generated Lorem Ipsum is therefore always free from repetition, injected humour, or non-characteristic words etc.`
const searchQuery = "richard mcclintock"
func BenchmarkContainsLowerLowerShort(b *testing.B) {
for n := 0; n < b.N; n++ {
strings.Contains(strings.ToLower(checkStringLen38), strings.ToLower(searchQuery))
}
}
func BenchmarkContainsLowerLowerLong(b *testing.B) {
for n := 0; n < b.N; n++ {
strings.Contains(strings.ToLower(checkStringLen3091), strings.ToLower(searchQuery))
}
}
func BenchmarkRegexpShort(b *testing.B) {
for n := 0; n < b.N; n++ {
regexp.MustCompile("(?i)" + regexp.QuoteMeta(searchQuery)).MatchString(checkStringLen38)
}
}
func BenchmarkRegexpLong(b *testing.B) {
for n := 0; n < b.N; n++ {
regexp.MustCompile("(?i)" + regexp.QuoteMeta(searchQuery)).MatchString(checkStringLen3091)
}
}
func BenchmarkRegexpShortPrebuilt(b *testing.B) {
prebuiltRegExp := regexp.MustCompile("(?i)" + regexp.QuoteMeta(searchQuery))
for n := 0; n < b.N; n++ {
prebuiltRegExp.MatchString(checkStringLen38)
}
}
func BenchmarkRegexpLongPrebuilt(b *testing.B) {
prebuiltRegExp := regexp.MustCompile("(?i)" + regexp.QuoteMeta(searchQuery))
for n := 0; n < b.N; n++ {
prebuiltRegExp.MatchString(checkStringLen3091)
}
}
Results
>go test -bench=. ./...
goos: windows
goarch: amd64
cpu: Intel(R) Core(TM) i7-6700K CPU @ 4.00GHz
BenchmarkContainsLowerLowerShort-8 9147040 130.3 ns/op
BenchmarkContainsLowerLowerLong-8 158318 7594 ns/op
BenchmarkRegexpShort-8 364604 3262 ns/op
BenchmarkRegexpLong-8 40394 29851 ns/op
BenchmarkRegexpShortPrebuilt-8 3741936 328.8 ns/op
BenchmarkRegexpLongPrebuilt-8 44394 27264 ns/op
Interpretation of Results
When only searching short strings, use of regexp.MustCompile("(?i)" + regexp.QuoteMeta(substr)).MatchString(s) benefits greatly (one order of magnitude) from building *regexp.Regexp only once. However, even then it takes about three times as long to execute as strings.Contains(strings.ToLower(s), strings.ToLower(substr)) for both long as well as short input strings (and we did not even check how much faster the ToLower()-variant would be if we assumed that the query string already was lower-cased) and even that is only the case under the constraint that substr is always the same, because building the regular expression only once is not an option otherwise.
tl;dr
There is nothing to be gained by using regexp.MustCompile("(?i)" + regexp.QuoteMeta(substr)).MatchString(s) over strings.Contains(strings.ToLower(s), strings.ToLower(substr)).
| stackoverflow | {
"language": "en",
"length": 1073,
"provenance": "stackexchange_0000F.jsonl.gz:882117",
"question_score": "21",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44595669"
} |
11ef3c81b1e7c6247d9d3209671c66d3d37b6f25 | Stackoverflow Stackexchange
Q: How to hide warning and show errors only in Stylelint linting result? Stylelint has this option "severity": "warning" to changed errors to warnings. Is it possible to hide warnings from output temporarily? because I want to fix some errors and because of lots of warnings, it takes a long time to reach to error instances
A: I believe you are looking for the quiet option which suppress "warnings"
https://github.com/stylelint/stylelint/blob/master/lib/cli.js#L257-L260
--quiet, -q
Only register warnings for rules with an "error"-level severity (ignore
"warning"-level).
| Q: How to hide warning and show errors only in Stylelint linting result? Stylelint has this option "severity": "warning" to changed errors to warnings. Is it possible to hide warnings from output temporarily? because I want to fix some errors and because of lots of warnings, it takes a long time to reach to error instances
A: I believe you are looking for the quiet option which suppress "warnings"
https://github.com/stylelint/stylelint/blob/master/lib/cli.js#L257-L260
--quiet, -q
Only register warnings for rules with an "error"-level severity (ignore
"warning"-level).
A: I believe you have a choice:
*
*Turn off the rules using null.
*Write a custom formatter that does not display warnings.
| stackoverflow | {
"language": "en",
"length": 106,
"provenance": "stackexchange_0000F.jsonl.gz:882122",
"question_score": "3",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44595681"
} |
1149921c76c76d1f6eba30d4d83b7e217cb67468 | Stackoverflow Stackexchange
Q: django transaction management with class based view I have a class based view and that has method post as follows
class Comment(View):
def dispatch(self, request, *args, **kwargs):
super(Comment, self).dispatch(request, *args, **kwargs)
@method_decorator(transaction.non_atomic_requests)
def post(self, request, *args, **kwargs):
<Some code>
In above example transaction.non_atomic_requests does not make post method non atomic, but if I use same decorator for dispatch method then it works? Why?
I am using django 1.9
A: When you use the transaction.non_atomic_requests decorator, it sets an attribute _non_atomic_requests on the decorated object.
Django then checks for the _non_atomic_requests attribute when the view runs.
When Django runs a class based view, it doesn't run the dispatch method directly, it runs the view that is returned by the as_view() method.
There is code in the as_view() method that copies attributes like _non_atomic_requests from the dispatch method to the view that is returned.
However the as_view method does not copy attributes from any other methods like get or post. When you decorate the post method, the _non_atomic_requests attribute is not copied to the view, so the decorator does not work.
Therefore, you must apply decorators like transaction.non_atomic_requests or csrf_exempt to the dispatch method.
| Q: django transaction management with class based view I have a class based view and that has method post as follows
class Comment(View):
def dispatch(self, request, *args, **kwargs):
super(Comment, self).dispatch(request, *args, **kwargs)
@method_decorator(transaction.non_atomic_requests)
def post(self, request, *args, **kwargs):
<Some code>
In above example transaction.non_atomic_requests does not make post method non atomic, but if I use same decorator for dispatch method then it works? Why?
I am using django 1.9
A: When you use the transaction.non_atomic_requests decorator, it sets an attribute _non_atomic_requests on the decorated object.
Django then checks for the _non_atomic_requests attribute when the view runs.
When Django runs a class based view, it doesn't run the dispatch method directly, it runs the view that is returned by the as_view() method.
There is code in the as_view() method that copies attributes like _non_atomic_requests from the dispatch method to the view that is returned.
However the as_view method does not copy attributes from any other methods like get or post. When you decorate the post method, the _non_atomic_requests attribute is not copied to the view, so the decorator does not work.
Therefore, you must apply decorators like transaction.non_atomic_requests or csrf_exempt to the dispatch method.
A: According to the django documentation:
To decorate every instance of a class-based view, you need to decorate the class definition itself. To do this you apply the decorator to the dispatch() method of the class.
The dispatch is the first method that handle the request for the view.
https://docs.djangoproject.com/en/1.11/topics/class-based-views/intro/#decorating-the-class
| stackoverflow | {
"language": "en",
"length": 243,
"provenance": "stackexchange_0000F.jsonl.gz:882160",
"question_score": "4",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44595770"
} |
d0aa2a4b816024e6faf97a8a5cf8971d44bb1134 | Stackoverflow Stackexchange
Q: How do I convert IEnumerable to Collection with linq I am populating a partial view with product from my ProductViewModel. When the model comes back we have...
var viewModel = _productAgent.GetProductsByCatalog(catalogId);
viewModel is a Collection of ProductViewModel
I am using linq to limit the size of the collection to the top 10 products orderby createDate desc like so...
var newList = (from p in viewModel
//from pf in p.DomainObjectFields
select p).Distinct().OrderByDescending(d => d.CreateDate).Take(10);
and I try to load the partial...
return PartialView("_ProductGrid", viewModel);
The problem is newList is IEnumerable It needs to be a collection and I do not know how to convert it OR if I'm taking the correct approach.
A: You can use the extension methods .ToList(), .ToArray(), etc.
var newList = viewModel
.Distinct()
.OrderByDescending(d => d.CreateDate)
.Take(10)
.ToList();
Update
If you want to convert an IEnumerable<T> to Collection<T> you can use the overload of the constructor of the class Collection<T> like this:
Collection<ProductViewModel> newList = new Collection<ProductViewModel>(viewModel
.Distinct()
.OrderByDescending(d => d.CreateDate)
.Take(10)
.ToList());
| Q: How do I convert IEnumerable to Collection with linq I am populating a partial view with product from my ProductViewModel. When the model comes back we have...
var viewModel = _productAgent.GetProductsByCatalog(catalogId);
viewModel is a Collection of ProductViewModel
I am using linq to limit the size of the collection to the top 10 products orderby createDate desc like so...
var newList = (from p in viewModel
//from pf in p.DomainObjectFields
select p).Distinct().OrderByDescending(d => d.CreateDate).Take(10);
and I try to load the partial...
return PartialView("_ProductGrid", viewModel);
The problem is newList is IEnumerable It needs to be a collection and I do not know how to convert it OR if I'm taking the correct approach.
A: You can use the extension methods .ToList(), .ToArray(), etc.
var newList = viewModel
.Distinct()
.OrderByDescending(d => d.CreateDate)
.Take(10)
.ToList();
Update
If you want to convert an IEnumerable<T> to Collection<T> you can use the overload of the constructor of the class Collection<T> like this:
Collection<ProductViewModel> newList = new Collection<ProductViewModel>(viewModel
.Distinct()
.OrderByDescending(d => d.CreateDate)
.Take(10)
.ToList());
A: You could try newList.toList();
| stackoverflow | {
"language": "en",
"length": 172,
"provenance": "stackexchange_0000F.jsonl.gz:882169",
"question_score": "9",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44595802"
} |
62d795b49101f4c26d9423123afc4f949aa760d0 | Stackoverflow Stackexchange
Q: Validate Date FORMAT (not date string) using MomentJS? I've seen that you can use an ".isValid()" function to check that a given string is in a date format:
moment('2007-05-05', 'YYYY-MM-DD', true).isValid()
But is there a way to confirm that the format is correct? For example:
'YYYY-MM-DD' should return true, but
'YYYY-MM-DDsadsadl' should return false since the characters at the end of the string aren't valid DateTime chars.
We're working on a tool that allows a user to input an existing date format, and then a second input to enter the desired format, but we need validation to ensure the string can properly parse and convert, but they aren't entering a specific date.
The application must accept any and all possible date formats.
A: Use the following function to validate your format.
validFormat = function(inputFormat){
var validation = moment(moment('2017-06-17').format(inputFormat), inputFormat).inspect();
if(validation.indexOf('invalid') < 0)
return true;
else
return false;
}
Do spend some time to understand this. This simply does a reverse verification using inspect(). The date 2017-06-17 can be replaced by any valid date.
This Moment Js Docs will help you identify the valid formats.
Just make a call to this function as
validFormat('YYYY MM DD')
| Q: Validate Date FORMAT (not date string) using MomentJS? I've seen that you can use an ".isValid()" function to check that a given string is in a date format:
moment('2007-05-05', 'YYYY-MM-DD', true).isValid()
But is there a way to confirm that the format is correct? For example:
'YYYY-MM-DD' should return true, but
'YYYY-MM-DDsadsadl' should return false since the characters at the end of the string aren't valid DateTime chars.
We're working on a tool that allows a user to input an existing date format, and then a second input to enter the desired format, but we need validation to ensure the string can properly parse and convert, but they aren't entering a specific date.
The application must accept any and all possible date formats.
A: Use the following function to validate your format.
validFormat = function(inputFormat){
var validation = moment(moment('2017-06-17').format(inputFormat), inputFormat).inspect();
if(validation.indexOf('invalid') < 0)
return true;
else
return false;
}
Do spend some time to understand this. This simply does a reverse verification using inspect(). The date 2017-06-17 can be replaced by any valid date.
This Moment Js Docs will help you identify the valid formats.
Just make a call to this function as
validFormat('YYYY MM DD')
A: const getIsValid = inputFormat => moment(moment().format(inputFormat), inputFormat).isValid()
Explanation:
moment().format(inputFormat) - Create a date string from the current time from that format
This is then wrapped with moment() to make that string a moment date object, defining the format to parse it with. Finally we call the isValid() property on that moment date object. This ensures we are able to both create and parse a moment with our custom format.
| stackoverflow | {
"language": "en",
"length": 266,
"provenance": "stackexchange_0000F.jsonl.gz:882177",
"question_score": "4",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44595835"
} |
8db3bfbab06de814b9d2b9b0bbcd1e3227816907 | Stackoverflow Stackexchange
Q: How to copy file from localhost to remote host in Ansible playbook? I have a directory:
/users/rolando/myfile
I want to copy "myfile" to hostname "targetserver" in directory:
/home/rolando/myfile
What is the syntax in the playbook to do this? Examples I found with the copy command look like it's more about copying a file from a source directory on a remote server to a target directory on the same remote server.
The line in my playbook .yml I tried that failed:
- copy:
src='/users/rolando/myfile'
dest='rolando@targetserver:/home/rolando/myfile'
What am I doing wrong?
A: From copy synopsis:
The copy module copies a file on the local box to remote locations.
- hosts: targetserver
tasks:
- copy:
src: /users/rolando/myfile
dest: /users/rolando/myfile
| Q: How to copy file from localhost to remote host in Ansible playbook? I have a directory:
/users/rolando/myfile
I want to copy "myfile" to hostname "targetserver" in directory:
/home/rolando/myfile
What is the syntax in the playbook to do this? Examples I found with the copy command look like it's more about copying a file from a source directory on a remote server to a target directory on the same remote server.
The line in my playbook .yml I tried that failed:
- copy:
src='/users/rolando/myfile'
dest='rolando@targetserver:/home/rolando/myfile'
What am I doing wrong?
A: From copy synopsis:
The copy module copies a file on the local box to remote locations.
- hosts: targetserver
tasks:
- copy:
src: /users/rolando/myfile
dest: /users/rolando/myfile
A: Here is the updated answer. Above answer helps copy files in local machine itself.
This should be easy using remote_src parameter available in copy module
- name: Copy a "sudoers" file on the remote machine for editing
copy:
src: /users/rolando/myfile
dest: /home/rolando/myfile
remote_src: yes
| stackoverflow | {
"language": "en",
"length": 162,
"provenance": "stackexchange_0000F.jsonl.gz:882184",
"question_score": "20",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44595867"
} |
dabadaad9b38658b89ad83cfc24b4b501145b22f | Stackoverflow Stackexchange
Q: Vue2 navigate to external url with location.href I tried to go to 'www.mytargeturl.org' using router.go, router.push, router.replace and window.location.href to redirect my vuejs app but i always get myVueapp.com/www.mytargeturl.org
Here's my route:
routes:[
{path: '/', component: App,
children:[
{
path: 'detail',
component: ItemDetail,
props: true
},
{
path: 'search',
component: Middle
}
]
},
{
path: '/test', component: Test
},
{ path: '/a', redirect: 'www.mytargeturl.org' } // also tried this but didnt work
]
A: No need to use navigation guards in this case, cleaner to use dynamic redirect right there in route config
routes:[
{
path: '/redirect-example',
redirect: (to: Route) => {
window.location.href = 'http://example.com'
return '/redirecting' // not important since redirecting
}
}
]
| Q: Vue2 navigate to external url with location.href I tried to go to 'www.mytargeturl.org' using router.go, router.push, router.replace and window.location.href to redirect my vuejs app but i always get myVueapp.com/www.mytargeturl.org
Here's my route:
routes:[
{path: '/', component: App,
children:[
{
path: 'detail',
component: ItemDetail,
props: true
},
{
path: 'search',
component: Middle
}
]
},
{
path: '/test', component: Test
},
{ path: '/a', redirect: 'www.mytargeturl.org' } // also tried this but didnt work
]
A: No need to use navigation guards in this case, cleaner to use dynamic redirect right there in route config
routes:[
{
path: '/redirect-example',
redirect: (to: Route) => {
window.location.href = 'http://example.com'
return '/redirecting' // not important since redirecting
}
}
]
A: Agreed with the people in other comments. Vue's philosophy is not to solve already solved problems. Same here. Just use an ordinary a tag for the link whenever possible. If you need to go through the router though, use Navigation Guards:
{
path: '/redirect',
beforeEnter(to, from, next) {
// Put the full page URL including the protocol http(s) below
window.location.replace("https://example.com")
}
}
A: In your route, set a meta property with the redirection url
{
name: "Shop",
path: "/shop",
component: {},
meta: { RedirectExternalUrl: "https://mystore.com" },
},
And setup a router guard function to do the work:
router.beforeEach(async (to, from, next) => {
// External redirect
if (to.matched.some((record) => record.meta.RedirectExternalUrl)) {
const url: string = to.meta.RedirectExternalUrl as string;
window.location.replace(url);
return;
}
next();
});
A: Found the solution. By adding http in my target url, all is well! Like this
window.location = 'http://mytargeturl.org"
This seems to be universal javascript truth not just vue.
| stackoverflow | {
"language": "en",
"length": 270,
"provenance": "stackexchange_0000F.jsonl.gz:882206",
"question_score": "25",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44595929"
} |
a39dd20f8f93c9c777e391250597624ce24ebe1f | Stackoverflow Stackexchange
Q: How to fix build error when running `react-native run-windows`? After running react-native windows in a freshly initialized React Native app, if you immediately run react-native run-windows this error happens:
Build failed with message Error: Command failed: "C:\Program Files (x86)\MSBuild\14.0\bin\msbuild.exe"
"C:/<path_to_project>/windows/<project_name>.sln" /clp:NoSummary;NoItemAndProperty;Verbosity=minimal /nologo
/p:Configuration=debug /p:Platform=x86
. Check your build configuration.
A: This can happen when you have installed Visual Studio, but never started it as "devenv.exe" before trying to use some commandline tools. There is some additional setup that happens on first launch of Visual Studio before the commandline tools can be invoked by the react-native run-windows command.
See the React Native Windows troubleshooting section for more information:
https://github.com/microsoft/react-native-windows/blob/0.60-stable/current/docs/SetupTroubleshooting.md
| Q: How to fix build error when running `react-native run-windows`? After running react-native windows in a freshly initialized React Native app, if you immediately run react-native run-windows this error happens:
Build failed with message Error: Command failed: "C:\Program Files (x86)\MSBuild\14.0\bin\msbuild.exe"
"C:/<path_to_project>/windows/<project_name>.sln" /clp:NoSummary;NoItemAndProperty;Verbosity=minimal /nologo
/p:Configuration=debug /p:Platform=x86
. Check your build configuration.
A: This can happen when you have installed Visual Studio, but never started it as "devenv.exe" before trying to use some commandline tools. There is some additional setup that happens on first launch of Visual Studio before the commandline tools can be invoked by the react-native run-windows command.
See the React Native Windows troubleshooting section for more information:
https://github.com/microsoft/react-native-windows/blob/0.60-stable/current/docs/SetupTroubleshooting.md
| stackoverflow | {
"language": "en",
"length": 109,
"provenance": "stackexchange_0000F.jsonl.gz:882222",
"question_score": "4",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44595974"
} |
977c47dc275d9b36ef7bfff981019096d955a208 | Stackoverflow Stackexchange
Q: How to read docker env variables into Java code I am trying to dockerize my Java application which tries to connect to a Rabbitmq server. I have passed the Rabbitmq Url via docker env variable and readig the same url using
System.getenv("RABBITMQ_URL")
, but it came out to be null. Is there anything wrong with the way I am reading the docker env variable ? Here is my Docker create command :
docker service create --name xxx --env
RABBITMQ_URL=amqp://rabbitmq:xxxx --network msgq --with-registry-auth
${imageName}
A: This is weird but, restarting the docker container just worked fine for me. Turns out , i have to restart the container when ever I update the network connection using "--network". Thanks
| Q: How to read docker env variables into Java code I am trying to dockerize my Java application which tries to connect to a Rabbitmq server. I have passed the Rabbitmq Url via docker env variable and readig the same url using
System.getenv("RABBITMQ_URL")
, but it came out to be null. Is there anything wrong with the way I am reading the docker env variable ? Here is my Docker create command :
docker service create --name xxx --env
RABBITMQ_URL=amqp://rabbitmq:xxxx --network msgq --with-registry-auth
${imageName}
A: This is weird but, restarting the docker container just worked fine for me. Turns out , i have to restart the container when ever I update the network connection using "--network". Thanks
A: Seems to work just fine for me. Please see below:
>> cat Dockerfile
FROM java
COPY Test.java /Test.java
RUN javac Test.java
CMD java Test
>> cat Test.java
class Test {
public static void main(String[] j) {
System.out.println(System.getenv("RABBITMQ_URL"));
while (true) {}
}
}
>> docker build -t testj .
Sending build context to Docker daemon 6.656kB
Step 1/4 : FROM java
---> d23bdf5b1b1b
Step 2/4 : COPY Test.java /Test.java
---> Using cache
---> 2333685c6488
Step 3/4 : RUN javac Test.java
---> Using cache
---> 8d1e98d604b9
Step 4/4 : CMD java Test
---> Using cache
---> 6f9625f04966
Successfully built 6f9625f04966
Successfully tagged testj:latest
>> docker service create --name xxx --env RABBITMQ_URL=amqp://rabbitmq:xxxx --detach testj
937rbfctrds0z1mhpk1e7dlja
>> docker service logs xxx
xxx.1.acv6mqqy38pf@moby | amqp://rabbitmq:xxxx
>>
| stackoverflow | {
"language": "en",
"length": 239,
"provenance": "stackexchange_0000F.jsonl.gz:882230",
"question_score": "5",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44596007"
} |
7f17e583375b0064f7113a8ce86fc21aae343669 | Stackoverflow Stackexchange
Q: Datetime strptime in python I've tried the following code :
import datetime
d = datetime.datetime.strptime("01/27/2012", "%m/%d/%Y")
print(d)
and the output is :
2012-01-27 00:00:00
I'am using Linux Mint :
test@testsrv ~/pythonvault $ date
Fri Jun 16 21:40:57 EEST 2017
So,the question is why the output of python code returns a date in "%Y/%m/%d" ( 2012-01-27 ) instead of "%m/%d/%Y" format ?
Please note that I'am using python 2.7
Any help would be appreciated.
A: datetime.strptime(date_string, format) function returns a datetime object corresponding to date_string, parsed according to format.
When you print datetime object, it is formatted as a string in ISO 8601 format, YYYY-MM-DDTHH:MM:SS
References:
*
*https://docs.python.org/2/library/datetime.html#datetime.datetime.strptime
*https://docs.python.org/2/library/datetime.html#datetime.datetime.isoformat
| Q: Datetime strptime in python I've tried the following code :
import datetime
d = datetime.datetime.strptime("01/27/2012", "%m/%d/%Y")
print(d)
and the output is :
2012-01-27 00:00:00
I'am using Linux Mint :
test@testsrv ~/pythonvault $ date
Fri Jun 16 21:40:57 EEST 2017
So,the question is why the output of python code returns a date in "%Y/%m/%d" ( 2012-01-27 ) instead of "%m/%d/%Y" format ?
Please note that I'am using python 2.7
Any help would be appreciated.
A: datetime.strptime(date_string, format) function returns a datetime object corresponding to date_string, parsed according to format.
When you print datetime object, it is formatted as a string in ISO 8601 format, YYYY-MM-DDTHH:MM:SS
References:
*
*https://docs.python.org/2/library/datetime.html#datetime.datetime.strptime
*https://docs.python.org/2/library/datetime.html#datetime.datetime.isoformat
A: You need to make sure you provide input accordingly
datetime.strptime(date_string,date_string_format).strftime(convert_to_date_string_format)
To print the date in specified format you need to provide format as below.
import datetime
d =datetime.datetime.strptime("01/27/2012","%m/%d/%Y").strftime('%m/%d/%Y')
print d
Output:
01/27/2012
>>Demo<<
A: As astutely noted in the comments, you are parsing to a datetime object using the format you specified.
strptime(...) is String Parse Time. You have specified the format for how the string should be interpreted to initialize a Datetime object, but that format is only utilized for initialization. By default, when you go to print that datetime object, you are getting the representation of str(DatetimeObjectInstance) (in your case, str(d)).
If you want a different format, you should use String Format Time (strftime(...))
A: import datetime
str_time= "2018-06-03 08:00:00"
date_date = datetime.datetime.strptime(str_time, "%Y-%m-%d %H:%M:%S")
print date_date
A: There is a difference between datetime.strp[arse]time() and datetime.strf[ormat]time().
The first one, strptime() allows you to create a date object from a string source, provided you can tell it what format to expect:
strDate = "11-Apr-2019_09:15:42"
dateObj = datetime.strptime(strDate, "%d-%b-%Y_%H:%M%S")
print(dateObj) # shows: 2019-04-11 09:15:42
The second one, strftime() allows you to export your date object to a string in the format of your choosing:
dateObj = datetime(2019, 4, 11, 9, 19, 25)
strDate = dateObj.strftime("%m/%d/%y %H:%M:%S")
print(strDate) # shows: 04/11/19 09:19:25
What you're seeing is simply the default string format of a datetime object because you didn't explicitly tell it what format to use.
Checkout http://strftime.org/ for a list of all the different string format options that are availble.
A:
The datetime.strptime() class method creates a datetime object from a
string representing a date and time and a corresponding format string.
*
*%a
Weekday as locale’s abbreviated name.
Sun, Mon, …, Sat (en_US);
So, Mo, …, Sa (de_DE)
*%A
Weekday as locale’s full name.
Sunday, Monday, …, Saturday (en_US);
Sonntag, Montag, …, Samstag (de_DE)
*%w
Weekday as a decimal number, where 0 is Sunday and 6 is Saturday.
0, 1, …, 6
*%d
Day of the month as a zero-padded decimal number.
01, 02, …, 31
*%b
Month as locale’s abbreviated name.
Jan, Feb, …, Dec (en_US);
Jan, Feb, …, Dez (de_DE)
*%B
Month as locale’s full name.
January, February, …, December (en_US);
Januar, Februar, …, Dezember (de_DE)
*%m
Month as a zero-padded decimal number.
01, 02, …, 12
*%y
Year without century as a zero-padded decimal number.
00, 01, …, 99
*%Y
Year with century as a decimal number.
0001, 0002, …, 2013, 2014, …, 9998, 9999
*%H
Hour (24-hour clock) as a zero-padded decimal number.
00, 01, …, 23
*%I
Hour (12-hour clock) as a zero-padded decimal number.
01, 02, …, 12
*%p
Locale’s equivalent of either AM or PM.
AM, PM (en_US);
am, pm (de_DE)
*%M
Minute as a zero-padded decimal number.
00, 01, …, 59
*%S
Second as a zero-padded decimal number.
00, 01, …, 59
*%f
Microsecond as a decimal number, zero-padded to 6 digits.
000000, 000001, …, 999999
*%z
UTC offset in the form ±HHMM[SS[.ffffff]] (empty string if the object is naive).
(empty), +0000, -0400, +1030, +063415, -030712.345216
*%Z
Time zone name (empty string if the object is naive).
(empty), UTC, GMT
*%j
Day of the year as a zero-padded decimal number.
001, 002, …, 366
*%U
Week number of the year (Sunday as the first day of the week) as a zero-padded decimal number. All days in a new year preceding the
first Sunday are considered to be in week 0.
00, 01, …, 53
*%W
Week number of the year (Monday as the first day of the week) as a zero-padded decimal number. All days in a new year preceding the
first Monday are considered to be in week 0.
00, 01, …, 53
*%c
Locale’s appropriate date and time representation.
Tue Aug 16 21:30:00 1988 (en_US);
Di 16 Aug 21:30:00 1988 (de_DE)
*%x
Locale’s appropriate date representation.
08/16/88 (None);
08/16/1988 (en_US);
16.08.1988 (de_DE)
*%X
Locale’s appropriate time representation.
21:30:00 (en_US);
21:30:00 (de_DE)
*%%
A literal '%' character.
%
for more information please visit official website for python
Example Programs for better understanding
Program 1:
from datetime import datetime
data = "2022-02-09 12:25:22"
dat = datetime.strptime(data, "%Y-%m-%d %H:%M:%S")
print(dat)
Output:
2022-02-09 12:25:22
Program 2:
from datetime import datetime
data = "Thursday, 10 February 2022 11:28:45 am"
dat = datetime.strptime(data, "%A, %d %B %Y %H:%M:%S %p")
print(dat)
Output:
2022-02-10 11:28:45
| stackoverflow | {
"language": "en",
"length": 827,
"provenance": "stackexchange_0000F.jsonl.gz:882254",
"question_score": "25",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44596077"
} |
fc08ec424c2a5bced59a582ef038cd891cff2be9 | Stackoverflow Stackexchange
Q: Automatically inferring types from overridden interfaces in TypeScript I'm trying to create some TypeScript definitions for modules that already exist. In a particular interface to be implemented, the signature looks like this:
type NextFunction<T> = () => T;
type Response = string[] | Promise<string[]>;
interface IPage {
getBodyClasses(next: NextFunction<Response>): Response;
}
The parameter and return structures are fixed, and I'd really like to be able to have TypeScript infer what the parameter types are of my overridden methods. However, when I create my override, I see that the parameter implicitly has an any type.
class Page implements IPage {
getBodyClasses(next) {
return next();
}
}
Is there any way to mark getBodyClasses as a dedicated override so that the types for parameters are automatically inferred? It would already say that Page was improperly implementing the interface if I typed next as number, so I don't quite understand why it can't also then infer the type of next is the same as the interface's.
A: Contextual typing of implemented properties is not supported.
More
The main issue that tracked this is https://github.com/Microsoft/TypeScript/issues/1373
| Q: Automatically inferring types from overridden interfaces in TypeScript I'm trying to create some TypeScript definitions for modules that already exist. In a particular interface to be implemented, the signature looks like this:
type NextFunction<T> = () => T;
type Response = string[] | Promise<string[]>;
interface IPage {
getBodyClasses(next: NextFunction<Response>): Response;
}
The parameter and return structures are fixed, and I'd really like to be able to have TypeScript infer what the parameter types are of my overridden methods. However, when I create my override, I see that the parameter implicitly has an any type.
class Page implements IPage {
getBodyClasses(next) {
return next();
}
}
Is there any way to mark getBodyClasses as a dedicated override so that the types for parameters are automatically inferred? It would already say that Page was improperly implementing the interface if I typed next as number, so I don't quite understand why it can't also then infer the type of next is the same as the interface's.
A: Contextual typing of implemented properties is not supported.
More
The main issue that tracked this is https://github.com/Microsoft/TypeScript/issues/1373
| stackoverflow | {
"language": "en",
"length": 182,
"provenance": "stackexchange_0000F.jsonl.gz:882269",
"question_score": "4",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44596117"
} |
aca361f5ca2b14115e0af15d27cdde00ed3a8a39 | Stackoverflow Stackexchange
Q: reload behaves different in python 2 and 3 In python 2:
import math
print(math.pi)
3.14159265359
math.pi = 2
print(math.pi)
2
reload(math)
print(math.pi)
3.14159265359
Now in python 3:
import math
print(math.pi)
3.141592653589793
math.pi=2
print(math.pi)
2
from importlib import reload
reload(math)
print(math.pi)
2
I was expecting the reload to reset the value of pi in the math library.
What do I wrong with the reload in python 3?
| Q: reload behaves different in python 2 and 3 In python 2:
import math
print(math.pi)
3.14159265359
math.pi = 2
print(math.pi)
2
reload(math)
print(math.pi)
3.14159265359
Now in python 3:
import math
print(math.pi)
3.141592653589793
math.pi=2
print(math.pi)
2
from importlib import reload
reload(math)
print(math.pi)
2
I was expecting the reload to reset the value of pi in the math library.
What do I wrong with the reload in python 3?
| stackoverflow | {
"language": "en",
"length": 67,
"provenance": "stackexchange_0000F.jsonl.gz:882271",
"question_score": "4",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44596126"
} |
1e612b2c3378420aae73fea0601fe5311c5cc9f5 | Stackoverflow Stackexchange
Q: Firebase cloud function deploy error irregularly my firebase deployment get stuck at this log:
i functions: updating function [FUNCTION NAME]...
After canceling the deploy and retrying it throws the following error message:
⚠ functions: failed to update function resetBadgeCount
⚠ functions: HTTP Error: 400, An operation on function [FUNCTION NAME]
in region us-central1 in project [PROJECT NAME] is already in progress.
Please try again later.
So it seams like that the deploy got stuck and kept in the pipeline blocking further deploys. After a while it let me deploy the functions normally again.
But is there an explanation for this? Or maybe even a word around?
A: Go to Google cloud functions console and see if there is red exclamation mark against your function. Then select that particular function and try to delete. once it gets deleted from there, you can deploy again successfully. if it is showing spinner, then wait till it shows red mark.
| Q: Firebase cloud function deploy error irregularly my firebase deployment get stuck at this log:
i functions: updating function [FUNCTION NAME]...
After canceling the deploy and retrying it throws the following error message:
⚠ functions: failed to update function resetBadgeCount
⚠ functions: HTTP Error: 400, An operation on function [FUNCTION NAME]
in region us-central1 in project [PROJECT NAME] is already in progress.
Please try again later.
So it seams like that the deploy got stuck and kept in the pipeline blocking further deploys. After a while it let me deploy the functions normally again.
But is there an explanation for this? Or maybe even a word around?
A: Go to Google cloud functions console and see if there is red exclamation mark against your function. Then select that particular function and try to delete. once it gets deleted from there, you can deploy again successfully. if it is showing spinner, then wait till it shows red mark.
A: You can temporarily rename your function:
$ firebase deploy --only functions
...
i functions: deleting function onSameDataChanged...
i functions: creating function onSameDataChanged1...
...
✔ functions: all functions deployed successfully!
✔ Deploy complete!
A: *
*Comment or cut your function
*Deploy
*Uncomment or paste back the function
*Rename the function
*Deploy
*Rename the function back
*Deploy
A: Try this
You can fix the issue much easier by examining the actual logs using this command to open the log
firebase functions:log
The specific issue will be visible there. I sometimes even had errors as simple as a missing package in package.json
A: also you can wait a few minutes and you will get an error with {"code":10,"message":"ABORTED"}, then you can deploy again.
A: just copy your index.js to some where else and delete function form firebasa function console
*
*firebase init -and overe write all file again
*past index.js text again
*deploy...
A: For me it was the node version. Turns out I had the 15.x on my machine and the 12.x on the server. Just updating it solved my upload issue
A: Make sure you've installed dependencies in the functions directory.
for more information about you function you can go to this page
A: Set your directory to your project directory \functions then run this command:
npm install -g firebase-tools
| stackoverflow | {
"language": "en",
"length": 377,
"provenance": "stackexchange_0000F.jsonl.gz:882297",
"question_score": "48",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44596181"
} |
75335183e129bf973c8610fbebb81f5926625d7d | Stackoverflow Stackexchange
Q: swagger-ui How do you hide rest methods not implemented Maybe im just missing it, I would like to hide some rest methods from controllers that do not implement them like options , delete, head
Is there an annotation for this? I could not find it in the documentation
using https://github.com/nelmio/NelmioApiDocBundle v3
currently when i view /api/doc any controllers I add list all rest methods even if I only have a GET method implemented.
<?php
namespace ApiBundle\Controller;
use Symfony\Bundle\FrameworkBundle\Controller\Controller;
use Sensio\Bundle\FrameworkExtraBundle\Configuration\Security;
use Symfony\Component\Routing\Annotation\Route;
use Symfony\Component\HttpFoundation\JsonResponse;
use Swagger\Annotations as SWG;
class UserController extends Controller
{
/**
* @Security("is_granted('IS_AUTHENTICATED_FULLY')")
* @Route("/api/users", name="get_users", methods={"GET"})
*
* @SWG\Response(
* response=200,
* description="Returns all users"
* )
* @SWG\Tag(name="users")
*
*
* @return \Symfony\Component\HttpFoundation\JsonResponse
*/
public function getUsersAction()
{
$repo = $this->getDoctrine()
->getRepository('AccountBundle:User');
$users = $repo->createQueryBuilder('q')
->getQuery()
->getArrayResult();
return new JsonResponse($users);
}
}
A: Just figured it out if you do not specify the methods in the controller in the @Route() annotation then it will show all of them but if you add methods={} to the Route annotation then it will only list the defined methods
* @Route("/api/users", name="get_users", methods={"GET"})
| Q: swagger-ui How do you hide rest methods not implemented Maybe im just missing it, I would like to hide some rest methods from controllers that do not implement them like options , delete, head
Is there an annotation for this? I could not find it in the documentation
using https://github.com/nelmio/NelmioApiDocBundle v3
currently when i view /api/doc any controllers I add list all rest methods even if I only have a GET method implemented.
<?php
namespace ApiBundle\Controller;
use Symfony\Bundle\FrameworkBundle\Controller\Controller;
use Sensio\Bundle\FrameworkExtraBundle\Configuration\Security;
use Symfony\Component\Routing\Annotation\Route;
use Symfony\Component\HttpFoundation\JsonResponse;
use Swagger\Annotations as SWG;
class UserController extends Controller
{
/**
* @Security("is_granted('IS_AUTHENTICATED_FULLY')")
* @Route("/api/users", name="get_users", methods={"GET"})
*
* @SWG\Response(
* response=200,
* description="Returns all users"
* )
* @SWG\Tag(name="users")
*
*
* @return \Symfony\Component\HttpFoundation\JsonResponse
*/
public function getUsersAction()
{
$repo = $this->getDoctrine()
->getRepository('AccountBundle:User');
$users = $repo->createQueryBuilder('q')
->getQuery()
->getArrayResult();
return new JsonResponse($users);
}
}
A: Just figured it out if you do not specify the methods in the controller in the @Route() annotation then it will show all of them but if you add methods={} to the Route annotation then it will only list the defined methods
* @Route("/api/users", name="get_users", methods={"GET"})
A: Specify @Value and @method type in the @RequestMapping
@RequestMapping(value="/instances/all",method=RequestMethod.GET)
@JsonFormat
public String showInstances(){
return "instances";
}
| stackoverflow | {
"language": "en",
"length": 202,
"provenance": "stackexchange_0000F.jsonl.gz:882325",
"question_score": "3",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44596251"
} |
07b516b595a2c9d5b4b974c18b6cb8beb4f9e284 | Stackoverflow Stackexchange
Q: Caret position is way off in contenteditable div Chrome (and maybe other browsers) position the caret in a strange way within a contenteditable div.
Please consider the following snippet:
<div contenteditable="true" style="width: 100%; height: 200px; border: 1px solid black; overflow: auto;">
<p>
<span contenteditable="false" style="width: 75%; height: 80px; display: inline-block; border: 1px solid red;"> </span>.
</p>
</div>
Also available in this JSFiddle.
If you click on the right-side of the period next to the red span and press backspace to delete the period, the caret suddenly shifts to the extreme right of the paragraph. I would expect the caret to be positioned where the period used to be, next to the red span.
Why is it not positioned the way I expect, and is there a way to get the behavior I'm looking for?
A: This strange behavior is happening because of the p tag, the cause is possibly some conflict between widths, you can edit the css of the tag, instead of using display:block(default), use display:inline.
I created this fiddle: JsFiddle, with display:inline, was the closest I could get from the display:block.
I tried the align attribute but I did not succeed.
| Q: Caret position is way off in contenteditable div Chrome (and maybe other browsers) position the caret in a strange way within a contenteditable div.
Please consider the following snippet:
<div contenteditable="true" style="width: 100%; height: 200px; border: 1px solid black; overflow: auto;">
<p>
<span contenteditable="false" style="width: 75%; height: 80px; display: inline-block; border: 1px solid red;"> </span>.
</p>
</div>
Also available in this JSFiddle.
If you click on the right-side of the period next to the red span and press backspace to delete the period, the caret suddenly shifts to the extreme right of the paragraph. I would expect the caret to be positioned where the period used to be, next to the red span.
Why is it not positioned the way I expect, and is there a way to get the behavior I'm looking for?
A: This strange behavior is happening because of the p tag, the cause is possibly some conflict between widths, you can edit the css of the tag, instead of using display:block(default), use display:inline.
I created this fiddle: JsFiddle, with display:inline, was the closest I could get from the display:block.
I tried the align attribute but I did not succeed.
A: I'm fairly confident that it's the span causing it, because the moment you remove it or even display: none it, the problem goes away. I got really curious about this myself and did some searching, this person seems to have the same problem as you.
Why Is My Contenteditable Cursor Jumping to the End in Chrome?
A: Non-breaking spaces are just what they sound like — spaces where a line break will not occur. You should not use them to add space between words, sentences, or elements. Especially not elements.
Remove & nbsp; (with space or will actually make space :D) and everything's good.
| stackoverflow | {
"language": "en",
"length": 298,
"provenance": "stackexchange_0000F.jsonl.gz:882383",
"question_score": "3",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44596440"
} |
7fd1eaf93881ca95a3f8262915072d578810a312 | Stackoverflow Stackexchange
Q: strange query in server logs REMOTE HI_SRDK_DEV_GetHddInfo I have this http request in my server logs:
REMOTE HI_SRDK_DEV_GetHddInfo
Has anyone seen this message before?
A: Looks like somebody using exploit to find and hack Kguard Digital Video Recorders in the internet. Ignore if you don't have one.
https://dl.packetstormsecurity.net/1506-exploits/kdvr-authorization.txt
| Q: strange query in server logs REMOTE HI_SRDK_DEV_GetHddInfo I have this http request in my server logs:
REMOTE HI_SRDK_DEV_GetHddInfo
Has anyone seen this message before?
A: Looks like somebody using exploit to find and hack Kguard Digital Video Recorders in the internet. Ignore if you don't have one.
https://dl.packetstormsecurity.net/1506-exploits/kdvr-authorization.txt
A: HiSilicon provides SDK for developing digital video recorders and web cameras software. SDK contains protocol for remote device configuration named "MCTP". Сommands transmitted by this protocol have names similar to HI_SRDK_SYS_USERMNG_GetUserList, HI_SRDK_NET_GetEmailAttr, etc. Someone wanted to find a HiSilicon based video recorder on your IP address, possibly for hacking.
| stackoverflow | {
"language": "en",
"length": 99,
"provenance": "stackexchange_0000F.jsonl.gz:882396",
"question_score": "7",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44596493"
} |
098169adf306783214b620422d95054c3f943d0f | Stackoverflow Stackexchange
Q: How get total amount in MS Access SQL? I use following query to get OrderID :
SELECT OrderItem.ID
, ProductID
, OrderID
, Quantity
, P.Title
, P.CurrentPrice
, P.ID
, (P.CurrentPrice* OrderItem.Quantity) AS Total
FROM OrderItem
INNER JOIN Product AS P
ON OrderItem.ProductID = P.ID
How can I get the total amount (Add all Total with same OrderID ) for each OrderID?
A: You could use a select form your select and group by
select OrderID, sum(Total)
from (
SELECT
OrderItem.ID
, ProductID
, OrderID
, Quantity
, P.Title
,P.CurrentPrice
, P.ID
, (P.CurrentPrice* OrderItem.Quantity) AS Total
FROM OrderItem
INNER JOIN Product AS P ON OrderItem.ProductID = P.ID
) t
group by OrderId
| Q: How get total amount in MS Access SQL? I use following query to get OrderID :
SELECT OrderItem.ID
, ProductID
, OrderID
, Quantity
, P.Title
, P.CurrentPrice
, P.ID
, (P.CurrentPrice* OrderItem.Quantity) AS Total
FROM OrderItem
INNER JOIN Product AS P
ON OrderItem.ProductID = P.ID
How can I get the total amount (Add all Total with same OrderID ) for each OrderID?
A: You could use a select form your select and group by
select OrderID, sum(Total)
from (
SELECT
OrderItem.ID
, ProductID
, OrderID
, Quantity
, P.Title
,P.CurrentPrice
, P.ID
, (P.CurrentPrice* OrderItem.Quantity) AS Total
FROM OrderItem
INNER JOIN Product AS P ON OrderItem.ProductID = P.ID
) t
group by OrderId
A: I'm only new to SQL but I think this is the solution.
SELECT OrderItem.ID, ProductID, OrderID, Sum(Quantity) AS Sum of Quantity, P.Title,P.CurrentPrice, P.ID, (P.CurrentPrice* OrderItem.Quantity) AS Total
FROM OrderItem INNER JOIN Product AS P ON OrderItem.ProductID = P.ID GROUP BY OrderID
Hope this Helps.
| stackoverflow | {
"language": "en",
"length": 160,
"provenance": "stackexchange_0000F.jsonl.gz:882401",
"question_score": "4",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44596501"
} |
043b41d5b3fd4ae5bdbea340ef92dbec02d68314 | Stackoverflow Stackexchange
Q: How to interpret output of config show from numpy? Trouble in understanding the output of __config__.show() from the numpy module in Python2.7.
Following is the output from executing the above command:
>>> numpy.__config__.show()
lapack_opt_info:
libraries = ['mkl_intel_lp64', 'mkl_intel_thread', 'mkl_core', 'iomp5', 'pthread']
library_dirs = ['/r/ge.unx.xxx.com/vol/vol110/u11/jack/anaconda2/lib']
define_macros = [('SCIPY_MKL_H', None), ('HAVE_CBLAS', None)]
include_dirs = ['/r/ge.unx.xxx.com/vol/vol110/u11/jack/anaconda2/include']
blas_opt_info:
libraries = ['mkl_intel_lp64', 'mkl_intel_thread', 'mkl_core', 'iomp5', 'pthread']
library_dirs = ['/r/ge.unx.xxx.com/vol/vol110/u11/jack/anaconda2/lib']
define_macros = [('SCIPY_MKL_H', None), ('HAVE_CBLAS', None)]
include_dirs = ['/r/ge.unx.xxx.com/vol/vol110/u11/jack/anaconda2/include']
lapack_mkl_info:
libraries = ['mkl_intel_lp64', 'mkl_intel_thread', 'mkl_core', 'iomp5', 'pthread']
library_dirs = ['/r/ge.unx.xxx.com/vol/vol110/u11/jack/anaconda2/lib']
define_macros = [('SCIPY_MKL_H', None), ('HAVE_CBLAS', None)]
include_dirs = ['/r/ge.unx.xxx.com/vol/vol110/u11/jack/anaconda2/include']
blas_mkl_info:
libraries = ['mkl_intel_lp64', 'mkl_intel_thread', 'mkl_core', 'iomp5', 'pthread']
library_dirs = ['/r/ge.unx.xxx.com/vol/vol110/u11/jack/anaconda2/lib']
define_macros = [('SCIPY_MKL_H', None), ('HAVE_CBLAS', None)]
include_dirs = ['/r/ge.unx.xxx.com/vol/vol110/u11/jack/anaconda2/include']
>>>
From the above output I wanted to infer whether numpy is using Intel MKL library or not. If yes, what is the location for the library?
| Q: How to interpret output of config show from numpy? Trouble in understanding the output of __config__.show() from the numpy module in Python2.7.
Following is the output from executing the above command:
>>> numpy.__config__.show()
lapack_opt_info:
libraries = ['mkl_intel_lp64', 'mkl_intel_thread', 'mkl_core', 'iomp5', 'pthread']
library_dirs = ['/r/ge.unx.xxx.com/vol/vol110/u11/jack/anaconda2/lib']
define_macros = [('SCIPY_MKL_H', None), ('HAVE_CBLAS', None)]
include_dirs = ['/r/ge.unx.xxx.com/vol/vol110/u11/jack/anaconda2/include']
blas_opt_info:
libraries = ['mkl_intel_lp64', 'mkl_intel_thread', 'mkl_core', 'iomp5', 'pthread']
library_dirs = ['/r/ge.unx.xxx.com/vol/vol110/u11/jack/anaconda2/lib']
define_macros = [('SCIPY_MKL_H', None), ('HAVE_CBLAS', None)]
include_dirs = ['/r/ge.unx.xxx.com/vol/vol110/u11/jack/anaconda2/include']
lapack_mkl_info:
libraries = ['mkl_intel_lp64', 'mkl_intel_thread', 'mkl_core', 'iomp5', 'pthread']
library_dirs = ['/r/ge.unx.xxx.com/vol/vol110/u11/jack/anaconda2/lib']
define_macros = [('SCIPY_MKL_H', None), ('HAVE_CBLAS', None)]
include_dirs = ['/r/ge.unx.xxx.com/vol/vol110/u11/jack/anaconda2/include']
blas_mkl_info:
libraries = ['mkl_intel_lp64', 'mkl_intel_thread', 'mkl_core', 'iomp5', 'pthread']
library_dirs = ['/r/ge.unx.xxx.com/vol/vol110/u11/jack/anaconda2/lib']
define_macros = [('SCIPY_MKL_H', None), ('HAVE_CBLAS', None)]
include_dirs = ['/r/ge.unx.xxx.com/vol/vol110/u11/jack/anaconda2/include']
>>>
From the above output I wanted to infer whether numpy is using Intel MKL library or not. If yes, what is the location for the library?
| stackoverflow | {
"language": "en",
"length": 141,
"provenance": "stackexchange_0000F.jsonl.gz:882493",
"question_score": "3",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44596820"
} |
2d0258ff0a63f5b0f47ea73e403b41cf428d344d | Stackoverflow Stackexchange
Q: How to check if a (possibly obfuscated) class exists without showing in logs I'm trying to check if a class exists in runtime.
Using Class.forName( "com.class.Name" ); is not an option since the class I want to check may be obfuscated using proguard.
Another option is using a try-catch(NoClassDefFoundError) block and trying to access the actual class, but this results in the following logs:
*
*art: Rejecting re-init on previously-failed class on newer os versions with ART - which I was able to avoid by doing Class cls = CheckedClass.class surrounded with a try-catch from a different method.
*dalvikvm: Could not find class... on older os versions using dalvik - which I haven't yet figured out a way to avoid. Any thoughts?
| Q: How to check if a (possibly obfuscated) class exists without showing in logs I'm trying to check if a class exists in runtime.
Using Class.forName( "com.class.Name" ); is not an option since the class I want to check may be obfuscated using proguard.
Another option is using a try-catch(NoClassDefFoundError) block and trying to access the actual class, but this results in the following logs:
*
*art: Rejecting re-init on previously-failed class on newer os versions with ART - which I was able to avoid by doing Class cls = CheckedClass.class surrounded with a try-catch from a different method.
*dalvikvm: Could not find class... on older os versions using dalvik - which I haven't yet figured out a way to avoid. Any thoughts?
| stackoverflow | {
"language": "en",
"length": 123,
"provenance": "stackexchange_0000F.jsonl.gz:882539",
"question_score": "3",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44596939"
} |
a55ad34e3d13d6bd10347ef721fbb5b8c0101642 | Stackoverflow Stackexchange
Q: Visual Studio 2017 - Design View/Split view I'm using Visual Studio 2017, designing an HTML page with a linked JavaScript page for functionality. Up until today, beneath my main pane where my code is, there was a bar that had 3 options, Design, Split and Source. I'm not sure why or how the bar has vanished, but it has and I'm not sure what it is called or how to display it again. I have tried Microsoft support to no avail, I've googled and not been successful, and of course tried a variety of things within Visual Studio.
A: May be useful for some one coming later
Go to tools-->Option-->Web Forms Designer , Select Split View Vertically option. Restart the Visual Studio
| Q: Visual Studio 2017 - Design View/Split view I'm using Visual Studio 2017, designing an HTML page with a linked JavaScript page for functionality. Up until today, beneath my main pane where my code is, there was a bar that had 3 options, Design, Split and Source. I'm not sure why or how the bar has vanished, but it has and I'm not sure what it is called or how to display it again. I have tried Microsoft support to no avail, I've googled and not been successful, and of course tried a variety of things within Visual Studio.
A: May be useful for some one coming later
Go to tools-->Option-->Web Forms Designer , Select Split View Vertically option. Restart the Visual Studio
| stackoverflow | {
"language": "en",
"length": 123,
"provenance": "stackexchange_0000F.jsonl.gz:882553",
"question_score": "3",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44596969"
} |
10c50fcdadf53edf9c440deca9080b29ac30207f | Stackoverflow Stackexchange
Q: How does google chrome launch desktop apps? i don't really know the terminology so im going to start with an example.
If I click a magnet link, Google chrome asks if i want to launch a torrent client. I click ok and chrome launches that app and the app does some stuff based on the link.
now is there anyway to see how the app gets the info from chrome? and how chrome starts the app?
A: It depends exactly on the OS, but in general, another desktop program can register a specific protocol, or URI scheme, to open up a program. Then, when Chrome doesn't know how to deal with a protocol, it'll just hand it over to the OS to deal with.
In Windows for example, they're configured by putting something into the system registry under a specific key (https://msdn.microsoft.com/en-us/library/aa767914(v=vs.85).aspx).
Most applications will setup themselves as a default for the particular protocol when installed.
| Q: How does google chrome launch desktop apps? i don't really know the terminology so im going to start with an example.
If I click a magnet link, Google chrome asks if i want to launch a torrent client. I click ok and chrome launches that app and the app does some stuff based on the link.
now is there anyway to see how the app gets the info from chrome? and how chrome starts the app?
A: It depends exactly on the OS, but in general, another desktop program can register a specific protocol, or URI scheme, to open up a program. Then, when Chrome doesn't know how to deal with a protocol, it'll just hand it over to the OS to deal with.
In Windows for example, they're configured by putting something into the system registry under a specific key (https://msdn.microsoft.com/en-us/library/aa767914(v=vs.85).aspx).
Most applications will setup themselves as a default for the particular protocol when installed.
A: Chrome is a "desktop" program. It can open any program exposed from the operating system.
A link can contain a specific protocol instead of http://, the OS can have a map that ties protocols directly to installed programs. Chrome is not communicating with the app at any point. It only tells the os to open a resource at a given url with a given program.
| stackoverflow | {
"language": "en",
"length": 223,
"provenance": "stackexchange_0000F.jsonl.gz:882581",
"question_score": "5",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44597071"
} |
0e25e176b0a772a094ce30fc73710f39e27533a6 | Stackoverflow Stackexchange
Q: Apply a directive conditionally I am using Material 2 to add md-raised-button. I want to apply this directive only if certain condition becomes true.
For example:
<button md-raised-button="true"></button>
Another example:
I created a basic dynamic reactive form in plunker.
I am using formArrayName directive of reactive form for array of controls.
I want to apply formArrayName directive only if specific condition becomes true, otherwise don't add formArrayName directive.
Here is a plunker link.
A: I don't know if you can apply directives based on a condition, but a workaround would be having 2 buttons and display them based on a condition.
<button *ngIf="!condition"></button>
<button *ngIf="condition" md-raised-button></button>
Edit: maybe this will be helpful.
| Q: Apply a directive conditionally I am using Material 2 to add md-raised-button. I want to apply this directive only if certain condition becomes true.
For example:
<button md-raised-button="true"></button>
Another example:
I created a basic dynamic reactive form in plunker.
I am using formArrayName directive of reactive form for array of controls.
I want to apply formArrayName directive only if specific condition becomes true, otherwise don't add formArrayName directive.
Here is a plunker link.
A: I don't know if you can apply directives based on a condition, but a workaround would be having 2 buttons and display them based on a condition.
<button *ngIf="!condition"></button>
<button *ngIf="condition" md-raised-button></button>
Edit: maybe this will be helpful.
A: Currently, there is NO way to conditionally apply a directive to a component.This is not supported.The components which you have created can be added or removed conditionally.
There is already an issue created for the same with angular2, so it should be the case with angular4 aswell.
Alternatively you can go for the option with ng-if
<button ngIf="!condition"></button>
<button ngIf="condition" md-raised-button></button>
A: Maybe it will help someone.
In the example below I have the my-button.component.html and I want to apply the *appHasPermission directive to the <button> only if the role attribute is set.
<ng-container *ngIf="role; else buttonNoRole" >
<ng-container *appHasPermission="role">
<!-- button with *appHasPermission -->
<ng-template *ngTemplateOutlet="buttonNoRole;"></ng-template>
</ng-container>
</ng-container>
<ng-template #buttonNoRole>
<!-- button without *appHasPermission -->
<button
mat-raised-button type="button"
[color]="color"
[disabled]="disabled"
[(appClickProgress)]="onClick"
[key]="progressKey">
<mat-icon *ngIf="icon">{{ icon }}</mat-icon> {{ label }}
</button>
</ng-template>
That way you don't duplicate the <button> code.
A: As already noted this does not appear to be possible. One thing that can be used to at least prevent some duplication is ng-template. This allows you to extract the content of the element affected by the ngIf branching.
If you for example want to create a hierarchical menu component using Angular Material:
<!-- Button contents -->
<ng-template #contentTemplate>
<mat-icon *ngIf="item.icon != null">{{ item.icon }}</mat-icon>
{{ item.label }}
</ng-template>
<!-- Leaf button -->
<button *ngIf="item.children == null" mat-menu-item
(click)="executeCommand()"
[disabled]="enabled == false">
<ng-container *ngTemplateOutlet="contentTemplate"></ng-container>
</button>
<!-- Node button -->
<ng-container *ngIf="item.children != null">
<button mat-menu-item
[matMenuTriggerFor]="subMenu">
<ng-container *ngTemplateOutlet="contentTemplate"></ng-container>
</button>
<mat-menu #subMenu="matMenu">
<menu-item *ngFor="let child of item.children" [item]="child"></menu-item>
</mat-menu>
</ng-container>
Here the conditionally applied directive is matMenuTriggerFor, which should only be applied to menu items with children. The contents of the button are inserted in both places via ngTemplateOutlet.
A: This may come late, but it is a viable and elegant method for applying a directive conditionally.
In the directive class create the input variable:
@Input('myDirective') options: any;
When applying the directive, set the apply property of the input variable:
<div [myDirective] = {apply: someCondition}></div>
In the method of the directive check for the variable this.options.apply and apply the directive logic based on the condition:
ngAfterViewInit(): void {
if (!this.options.apply) {
return;
}
// directive logic
}
A: This could be a solution too:
[md-raised-button]="condition ? 'true' : ''"
It's working for angular 4, ionic 3 like this:
[color]="condition ? 'primary' : ''" where condition is a function that decides if this is an active page or not. The whole code look like this:
<button *ngFor="let page of ..." [color]="isActivePage(page) ? 'primary' : ''">{{ page.title }}</button>
A: Passing null to the directive removes it!
<button md-raised-button="condition ? true : null"></button>
A: As others have also stated, directives can't be dynamically applied.
However, if you just want to toggle md-button's style from flat to raised, then this
<button md-button [class.mat-raised-button]="isRaised">Toggle Raised Button</button>
would do the trick. Plunker
A: If you just need to add an attribute in order to trigger CSS rules, you can use the below method: (this does not dynamically create/destroy a directive)
<button [attr.md-raised-button]="condition ? '' : null"></button>
Applied the same to your plunker: fork
Update:
How condition ? '' : null works as the value:
When its the empty string ('') it becomes attr.md-raised-button="", when its null the attribute will not exist.
Update: plunker update: fork (version issues fixed, please note the question was originally based on angular 4)
A: I couldn't find a nice existing solution, so i built my own directive which does this.
import { Directive, ElementRef, Input } from '@angular/core';
@Directive({
selector: '[dynamic-attr]'
})
export class DynamicAttrDirective {
@Input('dynamic-attr') attr: string;
private _el: ElementRef;
constructor(el: ElementRef) {
this._el = el;
}
ngOnInit() {
if (this.attr === '') return null;
const node = document.createAttribute(this.attr);
this._el.nativeElement.setAttributeNode(node);
}
}
Then your html:
<div dynamic-attr="{{hasMargin ? 'margin-left' : ''}}"></div>
A: I am working with Angular Material, adding an element on *ngIf didn't work properly for me (the element would disappear inside many newly generated material HTML tags lol).
I don't know if it's a good practice, but I used OnChanges and I had a sort of conditional directive - and it worked! :)
So this is how I solved it:
import { Directive, Renderer2, ElementRef, Input, OnChanges, SimpleChanges, AfterViewInit } from '@angular/core';
@Directive({
selector: '[appDirtyInputIndicator]'
})
export class DirtyInputIndicatorDirective implements OnChanges, AfterViewInit {
@Input('appDirtyInputIndicator') dirtyInputIndicator: boolean;
span = this.renderer.createElement('span');
constructor(private renderer: Renderer2, private el: ElementRef) {}
ngOnChanges(changes: SimpleChanges): void {
if (changes.dirtyInputIndicator && this.dirtyInputIndicator) {
this.renderer.appendChild(this.el.nativeElement, this.span);
} else {
this.renderer.removeChild(this.el.nativeElement, this.span);
}
}
ngAfterViewInit() {
this.renderer.addClass(this.span, 'dirty_input_badge');
}
}
A: I encountered the same issue, was able to do it this way:
component.html
<tr *ngFor="let Qus of finalModel | paginate:{itemsPerPage: 10, currentPage:p}">
<td appHighlightUsa [isUsa]="Qus.country=='United States' ? true :false">{{Qus.id | flags}}</td>
<td appHighlightUsa [isUsa]="Qus.country=='United States' ? true :false">{{Qus.accociateId}}</td>
<td appHighlightUsa [isUsa]="Qus.country=='United States' ? true :false" >{{Qus.country}}</td>
<td appHighlightUsa [isUsa]="Qus.country=='United States' ? true :false" >{{Qus.country}}</td>
</tr>
A: Use NgClass
[ngClass]="{ 'mat-raised-button': trueCondition }"
example of true condition:
this.element === 'Today'
or a boolean function
getTruth()
full example:
<button [ngClass]="{ 'mat-raised-button': trueCondition }">TEXT</button>
If you want a default class:
<button [ngClass]="{ 'mat-raised-button': trueCondition, 'default-class': !trueCondition }">TEXT</button>
A: As at 18th Jan 2019,
This is how I added a directive conditionally in Angular 5 and above. I needed to change the color of the <app-nav> component based on darkMode. If the page was in dark mode or not.
This worked for me:
<app-nav [color]="darkMode ? 'orange':'green'"></app-nav>
I hope this helps someone.
EDIT
This changes the value of an attribute (color) based on a condition. It just happens that the color is defined using a directive. So anyone reading this please do not get confused, this is not applying a directive conditionally (ie. which means adding or removing a directive to the dom based on a condition)
A: yes it is possible.
html page with appActiveAhover directive :)
<li routerLinkActive="active" #link1="routerLinkActive">
<a [appActiveAhover]='link1.isActive?false:true' routerLink="administration" [ngStyle]="{'background':link1.isActive?domaindata.get_color3():none}">
<i class="fa fa-users fa-lg" aria-hidden="true"></i> Administration</a>
</li>
<li routerLinkActive="active" #link2="routerLinkActive">
<a [appActiveAhover]='link2.isActive?false:true' routerLink="verkaufsburo" [ngStyle]="{'background':link2.isActive?domaindata.get_color3():none,'color':link2.isActive?color2:none}">
<i class="fa fa-truck fa-lg" aria-hidden="true"></i> Verkaufsbüro</a>
</li>
<li routerLinkActive="active" #link3="routerLinkActive">
<a [appActiveAhover]='link3.isActive?false:true' routerLink="preisrechner" [ngStyle]="{'background':link3.isActive?domaindata.get_color3():none}">
<i class="fa fa-calculator fa-lg" aria-hidden="true" *ngIf="routerLinkActive"></i> Preisrechner</a>
</li>
directive
@Directive({
selector: '[appActiveAhover]'
})
export class ActiveAhoverDirective implements OnInit {
@Input() appActiveAhover:boolean;
constructor(public el: ElementRef, public renderer: Renderer, public domaindata: DomainnameDataService) {
}
ngOnInit() {
}
@HostListener('mouseover') onMouseOver() {
if(this.appActiveAhover){
this.renderer.setElementStyle(this.el.nativeElement, 'color', this.domaindata.domaindata.color2);
}
}
@HostListener('mouseout') onMouseOut() {
if(this.appActiveAhover){
this.renderer.setElementStyle(this.el.nativeElement, 'color', 'white');
}
}
}
A: I got another idea about what you could do.
You could store the html you want replaced in a variable as a string and then add / remove the directive from it as you wish, using the bypassSecurityTrustHtml method of the DomSanitizer.
I doesn't result in a clean solution but at least you don't need to repeat the code.
| stackoverflow | {
"language": "en",
"length": 1221,
"provenance": "stackexchange_0000F.jsonl.gz:882582",
"question_score": "152",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44597077"
} |
933f535a2b778fd334bf59176e1b73f1aae90751 | Stackoverflow Stackexchange
Q: How can Spring RestController accept both JSON and XML? I have a Spring controller that works great:
@RestController
@RequestMapping(value = "/widgets")
class WidgetController {
@RequestMapping(method = RequestMethod.POST)
WidgetResponse createWidget(@Valid @RequestBody Widget widget) {
// ...
}
}
Here I can POST a JSON message and my widget instance gets created:
{
"name" : "Widget1",
"type" : "spinning",
"isFizz" : true
}
I would like this endpoint to also accept and deserialize XML widgets like so:
<widget name="Widget1">
<type>spinning</type>
<isFizz>false</isFizz>
</widget>
I'm trying to figure out:
*
*How to allow the endpoint to accept both JSON and XML data, and deserialize them properly; and
*How to validate any XML against a Schema, such as widgets.xsd
Any ideas?
A: With the parameter consumes of annotation @RequestMapping
@RequestMapping(value = "/widgets",consumes={MediaType.APPLICATION_JSON_VALUE,MediaType.APPLICATION_XML_VALUE})
WidgetResponse createWidget(@Valid @RequestBody Widget widget){
///
{
The parameter consumes takes an array of MediaType
| Q: How can Spring RestController accept both JSON and XML? I have a Spring controller that works great:
@RestController
@RequestMapping(value = "/widgets")
class WidgetController {
@RequestMapping(method = RequestMethod.POST)
WidgetResponse createWidget(@Valid @RequestBody Widget widget) {
// ...
}
}
Here I can POST a JSON message and my widget instance gets created:
{
"name" : "Widget1",
"type" : "spinning",
"isFizz" : true
}
I would like this endpoint to also accept and deserialize XML widgets like so:
<widget name="Widget1">
<type>spinning</type>
<isFizz>false</isFizz>
</widget>
I'm trying to figure out:
*
*How to allow the endpoint to accept both JSON and XML data, and deserialize them properly; and
*How to validate any XML against a Schema, such as widgets.xsd
Any ideas?
A: With the parameter consumes of annotation @RequestMapping
@RequestMapping(value = "/widgets",consumes={MediaType.APPLICATION_JSON_VALUE,MediaType.APPLICATION_XML_VALUE})
WidgetResponse createWidget(@Valid @RequestBody Widget widget){
///
{
The parameter consumes takes an array of MediaType
| stackoverflow | {
"language": "en",
"length": 143,
"provenance": "stackexchange_0000F.jsonl.gz:882603",
"question_score": "3",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44597140"
} |
fee74dfabf70935a4edf0a411b60a4e366a296ca | Stackoverflow Stackexchange
Q: Creating a JSON string, PowerShell object I am unable to create a variable on this request so I can later convert the variable to JSON using converttojson
{
"update": {
"comment": [
{
"add": {
"body": "Comment added when resolving issue"
}
}
]
},
"transition": {
"id": "21"
}
}
Tried the below
$jsonRequest = @{
update= @{
comment =@{
add =@{
body = "$Description"
}
}
}
transition =@{
id = $TransactionID
}
}
But get an output as below
{
"transition": {
"id": 1
},
"update": {
"comment": {
"add": "System.Collections.Hashtable"
}
}
}
A: Comment" in your JSON is a list containing a hashtable, in your code it's a hashtable containing a hashtable.
This looks right by making it an array of one item:
$jsonRequest = [ordered]@{
update= @{
comment = @(
@{
add =@{
body = "$Description"
}
}
)
}
transition = @{
id = 21
}
}
$jsonRequest | ConvertTo-Json -Depth 10
And I've made it '[ordered]' so the update and transition come out in the same order as your code, although that shouldn't really matter.
| Q: Creating a JSON string, PowerShell object I am unable to create a variable on this request so I can later convert the variable to JSON using converttojson
{
"update": {
"comment": [
{
"add": {
"body": "Comment added when resolving issue"
}
}
]
},
"transition": {
"id": "21"
}
}
Tried the below
$jsonRequest = @{
update= @{
comment =@{
add =@{
body = "$Description"
}
}
}
transition =@{
id = $TransactionID
}
}
But get an output as below
{
"transition": {
"id": 1
},
"update": {
"comment": {
"add": "System.Collections.Hashtable"
}
}
}
A: Comment" in your JSON is a list containing a hashtable, in your code it's a hashtable containing a hashtable.
This looks right by making it an array of one item:
$jsonRequest = [ordered]@{
update= @{
comment = @(
@{
add =@{
body = "$Description"
}
}
)
}
transition = @{
id = 21
}
}
$jsonRequest | ConvertTo-Json -Depth 10
And I've made it '[ordered]' so the update and transition come out in the same order as your code, although that shouldn't really matter.
| stackoverflow | {
"language": "en",
"length": 184,
"provenance": "stackexchange_0000F.jsonl.gz:882614",
"question_score": "11",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44597175"
} |
e5ff1f88e9c9443a68335fc353f83d9cf829bba7 | Stackoverflow Stackexchange
Q: Javascript ES6 console.log object using template literal I have simple object that I want to display in console
var obj = { name: 'John', age: 22 }
If I type:
console.log(obj)
Object { name: "John", age: 22 }
If I type:
console.log('my object is: ' + obj)
my object is: [object Object]
console.log('my object is: %o', obj)
my object is: Object { name: "John", age: 22 }
How can I achieve this using a template literal?
If I type:
console.log(`my object is: ${obj}`)
my object is: [object Object]
A: You could serialize the object with JSON.stringify.
var obj = { name: 'John', age: 22 };
console.log(`my object is: ${JSON.stringify(obj)}`);
| Q: Javascript ES6 console.log object using template literal I have simple object that I want to display in console
var obj = { name: 'John', age: 22 }
If I type:
console.log(obj)
Object { name: "John", age: 22 }
If I type:
console.log('my object is: ' + obj)
my object is: [object Object]
console.log('my object is: %o', obj)
my object is: Object { name: "John", age: 22 }
How can I achieve this using a template literal?
If I type:
console.log(`my object is: ${obj}`)
my object is: [object Object]
A: You could serialize the object with JSON.stringify.
var obj = { name: 'John', age: 22 };
console.log(`my object is: ${JSON.stringify(obj)}`);
A:
var obj = { name: 'John', age: 22 }
log`This is obj: ${obj}`
function log(strings, ...values) {
console.log(strings.reduce((p, c, i) =>
`${p}${c}${values[i]?JSON.stringify(values[i]):''}`, ''));
}
A: console.logMy object is: ${obj};
There are tildes(`) at both ends of the grayed section, but the formatting is removing them
| stackoverflow | {
"language": "en",
"length": 156,
"provenance": "stackexchange_0000F.jsonl.gz:882622",
"question_score": "18",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44597208"
} |
ef72a691cdf3b4ecde551106bef6b227d4d942e5 | Stackoverflow Stackexchange
Q: get notified when http.Server starts listening When I look at the net/http server interface, I don't see an obvious way to get notified and react when the http.Server comes up and starts listening:
ListenAndServe(":8080", nil)
The function doesn't return until the server actually shuts down. I also looked at the Server type, but there doesn't appear to be anything that lets me tap into that timing. Some function or a channel would have been great but I don't see any.
Is there any way that will let me detect that event, or am I left to just sleeping "enough" to fake it?
A: ListenAndServe is a helper function that opens a listening socket and then serves connections on that socket. Write the code directly in your application to signal when the socket is open:
l, err := net.Listen("tcp", ":8080")
if err != nil {
// handle error
}
// Signal that server is open for business.
if err := http.Serve(l, rootHandler); err != nil {
// handle error
}
If the signalling step does not block, then http.Serve will easily consume any backlog on the listening socket.
Related question: https://stackoverflow.com/a/32742904/5728991
| Q: get notified when http.Server starts listening When I look at the net/http server interface, I don't see an obvious way to get notified and react when the http.Server comes up and starts listening:
ListenAndServe(":8080", nil)
The function doesn't return until the server actually shuts down. I also looked at the Server type, but there doesn't appear to be anything that lets me tap into that timing. Some function or a channel would have been great but I don't see any.
Is there any way that will let me detect that event, or am I left to just sleeping "enough" to fake it?
A: ListenAndServe is a helper function that opens a listening socket and then serves connections on that socket. Write the code directly in your application to signal when the socket is open:
l, err := net.Listen("tcp", ":8080")
if err != nil {
// handle error
}
// Signal that server is open for business.
if err := http.Serve(l, rootHandler); err != nil {
// handle error
}
If the signalling step does not block, then http.Serve will easily consume any backlog on the listening socket.
Related question: https://stackoverflow.com/a/32742904/5728991
| stackoverflow | {
"language": "en",
"length": 191,
"provenance": "stackexchange_0000F.jsonl.gz:882630",
"question_score": "8",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44597248"
} |
0efe5cbc544fd09e65b8f94610082a734bb0cb58 | Stackoverflow Stackexchange
Q: Add new contact in api telegram python telethon How do I save a number in my contacts in telethon python?
from telethon import TelegramClient
from telethon.tl.functions.contacts import GetContactsRequest
from telethon.tl.types import InputPeerUser
client = TelegramClient('arta0', api_id, api_hash)
client.connect()
#number=+19133704541
#name='ali karimi'
What module do I need to add contact?
A: You can create a contact like this:
contact = InputPhoneContact(client_id = 0, phone = "+12345678", first_name="ABC", last_name="abc")
result = client.invoke(ImportContactsRequest([contact], replace=True))
To create a new contact you need to pass 0 for the client_id.
| Q: Add new contact in api telegram python telethon How do I save a number in my contacts in telethon python?
from telethon import TelegramClient
from telethon.tl.functions.contacts import GetContactsRequest
from telethon.tl.types import InputPeerUser
client = TelegramClient('arta0', api_id, api_hash)
client.connect()
#number=+19133704541
#name='ali karimi'
What module do I need to add contact?
A: You can create a contact like this:
contact = InputPhoneContact(client_id = 0, phone = "+12345678", first_name="ABC", last_name="abc")
result = client.invoke(ImportContactsRequest([contact], replace=True))
To create a new contact you need to pass 0 for the client_id.
A: You can create a new contact like this:
from telethon.sync import TelegramClient
from telethon import TelegramClient
from telethon.tl.functions.messages import AddChatUserRequest
from telethon.tl.types import InputPhoneContact
from telethon.tl.functions.contacts import ImportContactsRequest
from telethon import functions, types
# Create Client Object
api_id = xxxxxxx
api_hash = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
phone = '+xxxxxxxxxxxx'
# Login
client = TelegramClient(phone, api_id, api_hash)
client.connect()
if not client.is_user_authorized():
client.send_code_request(phone)
client.sign_in(phone, input('Enter the code: '))
# add user to contact
phoneNum= "+98xxxxxxxxxx"
contact = InputPhoneContact(client_id=0, phone=phoneNum, first_name="", last_name="")
result = client(ImportContactsRequest([contact]))
To create a new contact you need to pass 0 for the client_id.
A: contact = InputPhoneContact(client_id=0, phone='+918962141530', first_name='<First Name its required field>', last_name='<Last Name its optional field>')
client.invoke(ImportContactsRequest[contact],replace=True ))
*** TypeError: __init__() got an unexpected keyword argument 'replace'
You can use
result = client.invoke(ImportContactsRequest([contact]))
After add contact in list you can show all user list
contacts = client(GetContactsRequest(0))
Iterate contacts and show all users info
A: Here's how you'd do it using daniil.it/MadelineProto:
try {
$MadelineProto = \danog\MadelineProto\Serialization::unserialize('session.madeline'); // Unserialize a stored session, if you haven't saved it yet, login first, see below
} catch (\danog\MadelineProto\Exception $e) { // If
$MadelineProto = new \danog\MadelineProto\API();
// Login as a user
$sentCode = $MadelineProto->phone_login($number);
echo 'Enter the code you received: ';
$code = '';
for ($x = 0; $x < $sentCode['type']['length']; $x++) {
$code .= fgetc(STDIN);
}
$MadelineProto->complete_phone_login($code);
}
$inputContacts = [];
$inputContacts[0] = ['_' => 'inputPhoneContact', 'client_id' => 0, 'phone' => '+172737', 'first_name' => 'first', 'last_name' => 'last', ];
$inputContacts[1] = ['_' => 'inputPhoneContact', 'client_id' => 0, 'phone' => '+172737', 'first_name' => 'first', 'last_name' => 'last', ];
// You can add maximum 4000000000 contacts
$contacts_ImportedContacts = $MadelineProto->contacts->importContacts(['contacts' => $InputContacts, 'replace' => false, ]);
$MadelineProto->serialize('session.madeline');
| stackoverflow | {
"language": "en",
"length": 359,
"provenance": "stackexchange_0000F.jsonl.gz:882673",
"question_score": "7",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44597384"
} |
3592f273f8c426e2c7f446b183ec7b3f8f965168 | Stackoverflow Stackexchange
Q: angular 4 unit testing error `TypeError: ctor is not a constructor` I am trying to test my route resolver and while testing I got TypeError: ctor is not a constructor and no idea why it happen while typescript compile time no error.
TypeError: ctor is not a constructor
TypeError: ctor is not a constructor
at _createClass (http://localhost:9877/_karma_webpack_/vendor.bundle.js:42355:26)
at _createProviderInstance$1 (http://localhost:9877/_karma_webpack_/vendor.bundle.js:42330:26)
at resolveNgModuleDep (http://localhost:9877/_karma_webpack_/vendor.bundle.js:42315:17)
at _createClass (http://localhost:9877/_karma_webpack_/vendor.bundle.js:42362:26)
at _createProviderInstance$1 (http://localhost:9877/_karma_webpack_/vendor.bundle.js:42330:26)
at resolveNgModuleDep (http://localhost:9877/_karma_webpack_/vendor.bundle.js:42315:17)
at NgModuleRef_.webpackJsonp../node_modules/@angular/core/@angular/core.es5.js.NgModuleRef_.get (http://localhost:9877/_karma_webpack_/vendor.bundle.js:43401:16)
at TestBed.webpackJsonp../node_modules/@angular/core/@angular/core/testing.es5.js.TestBed.get (http://localhost:9877/_karma_webpack_/vendor.bundle.js:48412:47)
at http://localhost:9877/_karma_webpack_/vendor.bundle.js:48418:61
at Array.map (native)
A: This can be an error in the providers declarations.
When you try to mock a provider and use useClass instead of useValue the error "TypeError: ctor is not a constructor" is fired.
Here is an example that fires the error :
providers: [{provide: OrderService, useClass: new OrderServiceMock()}]
The correct declaration is :
providers: [{provide: OrderService, useValue: new OrderServiceMock()}]
| Q: angular 4 unit testing error `TypeError: ctor is not a constructor` I am trying to test my route resolver and while testing I got TypeError: ctor is not a constructor and no idea why it happen while typescript compile time no error.
TypeError: ctor is not a constructor
TypeError: ctor is not a constructor
at _createClass (http://localhost:9877/_karma_webpack_/vendor.bundle.js:42355:26)
at _createProviderInstance$1 (http://localhost:9877/_karma_webpack_/vendor.bundle.js:42330:26)
at resolveNgModuleDep (http://localhost:9877/_karma_webpack_/vendor.bundle.js:42315:17)
at _createClass (http://localhost:9877/_karma_webpack_/vendor.bundle.js:42362:26)
at _createProviderInstance$1 (http://localhost:9877/_karma_webpack_/vendor.bundle.js:42330:26)
at resolveNgModuleDep (http://localhost:9877/_karma_webpack_/vendor.bundle.js:42315:17)
at NgModuleRef_.webpackJsonp../node_modules/@angular/core/@angular/core.es5.js.NgModuleRef_.get (http://localhost:9877/_karma_webpack_/vendor.bundle.js:43401:16)
at TestBed.webpackJsonp../node_modules/@angular/core/@angular/core/testing.es5.js.TestBed.get (http://localhost:9877/_karma_webpack_/vendor.bundle.js:48412:47)
at http://localhost:9877/_karma_webpack_/vendor.bundle.js:48418:61
at Array.map (native)
A: This can be an error in the providers declarations.
When you try to mock a provider and use useClass instead of useValue the error "TypeError: ctor is not a constructor" is fired.
Here is an example that fires the error :
providers: [{provide: OrderService, useClass: new OrderServiceMock()}]
The correct declaration is :
providers: [{provide: OrderService, useValue: new OrderServiceMock()}]
A: I had the exact same message when building my app with AOT.
My problem was not related to providers as @abahet suggested.
It was because I setup a new library which was not AOT compliant (and didn't have any provider either). The library in question had to export (I'm talking about Typescript export, not the one from Angular module) what was imported in the module (in this case, a component and a pipe).
A: I had this problem with Angular Universal in combination with Firebase in a Firebase Universal Starter project. I had almost lost hope as all the potential fixes on stack overflow didnt help. So I did the following:
*
*Update all npm packages with https://www.npmjs.com/package/npm-check-updates
*Remove node_modules and .package-lock.json and reinstalled them
*Fixed all errors due to changed api's
*Now it was working :-)
I never found out what package caused error, but one approach to find out is to create a MockAppModule where you remove modules one by one. Eventually you will find the one with the problem. But in my case I got lucky I guess due to one of the bugged packages got updated or something.
A: Third possibility for you, I had a module containing other modules and didn't export (Typescript speaking) the other modules.
A: I too had this problem with AOT enabled.I added a new service file. I restarted the compiler and the issue is resolved.
| stackoverflow | {
"language": "en",
"length": 379,
"provenance": "stackexchange_0000F.jsonl.gz:882716",
"question_score": "97",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44597532"
} |
5e5e2a2c19bf1c0234003a2508b3a4f545687dc8 | Stackoverflow Stackexchange
Q: How to use model.fit_generator in keras When and how should I use fit_generator?
What is the difference between fit and fit_generator?
A: If you have prepared your data and labels in all necessary aspects and simply can assign these to an array x and y, than use model.fit(x, y).
If you need to preprocess and/or augment your data while training, than you can take advantage of the generators that Keras provides.
You could for example augment images by applying random transforms (very helpful if you only have little data to train with), pad sequences, tokenize text, let Keras automagically read your data from a folder and assign appropiate classes (flow_from_directory) and much more.
See here for examples and boilerplate code for image preprocessing: https://keras.io/preprocessing/image/
or here for text preprocessing:
https://keras.io/preprocessing/text/
fit_generator also will help you to train in a more memory efficient way since you load data only when needed. The generator function yields (aka "delivers") data to your model batch by batch on demand, so to say.
| Q: How to use model.fit_generator in keras When and how should I use fit_generator?
What is the difference between fit and fit_generator?
A: If you have prepared your data and labels in all necessary aspects and simply can assign these to an array x and y, than use model.fit(x, y).
If you need to preprocess and/or augment your data while training, than you can take advantage of the generators that Keras provides.
You could for example augment images by applying random transforms (very helpful if you only have little data to train with), pad sequences, tokenize text, let Keras automagically read your data from a folder and assign appropiate classes (flow_from_directory) and much more.
See here for examples and boilerplate code for image preprocessing: https://keras.io/preprocessing/image/
or here for text preprocessing:
https://keras.io/preprocessing/text/
fit_generator also will help you to train in a more memory efficient way since you load data only when needed. The generator function yields (aka "delivers") data to your model batch by batch on demand, so to say.
A: They are useful for on-the-fly augmentations, which the previous poster mentioned. This however is not neccessarily restricted to generators, because you can fit for one epoch and then augment your data and fit again.
What does not work with fit is using too much data per epoch though. This means that if you have a dataset of 1 TB and only 8 GB of RAM you can use the generator to load the data on the fly and only hold a couple of batches in memory. This helps tremendously on scaling to huge datasets.
| stackoverflow | {
"language": "en",
"length": 264,
"provenance": "stackexchange_0000F.jsonl.gz:882724",
"question_score": "4",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44597555"
} |
4183db33dd8a0037fc373c2bab65315242581779 | Stackoverflow Stackexchange
Q: Deploy NextJS with Dokku in Production I have set up Dokku and want to deploy my basic NextJs to it. Everything works fine, except for that the application is running in development mode.
When I output the NODE_ENV variable in my JSX, it is first production but changes to development.
const Index = () => (
<div>
<Link href="/about">
<a>About Page</a>
</Link>
{process.env.NODE_ENV}
</div>
That's what I am seeing. The NODE_ENV variable changes during the page load.
package.json:
"scripts": {
"start": "next src",
"build": "next build src"
},
App.json:
{
"scripts": {
"dokku": {
"predeploy": "npm run build"
}
}
}
Procfile:
web: npm start -- --port $PORT
In addition I set two configs for my dokku application:
dokku config:set my-app NPM_CONFIG_PRODUCTION=false
dokku config:set my-app HOST=0.0.0.0 NODE_ENV=production
What am I missing to get it into production mode?
A: Solved it by setting up an own express server.
package.json
"scripts": {
"dev": "node server.js",
"build": "next build",
"start": "NODE_ENV=production node server.js"
},
app.json
{
"scripts": {
"dokku": {
"predeploy": "npm run build"
}
}
}
Procfile
web: npm start -- --port $PORT
| Q: Deploy NextJS with Dokku in Production I have set up Dokku and want to deploy my basic NextJs to it. Everything works fine, except for that the application is running in development mode.
When I output the NODE_ENV variable in my JSX, it is first production but changes to development.
const Index = () => (
<div>
<Link href="/about">
<a>About Page</a>
</Link>
{process.env.NODE_ENV}
</div>
That's what I am seeing. The NODE_ENV variable changes during the page load.
package.json:
"scripts": {
"start": "next src",
"build": "next build src"
},
App.json:
{
"scripts": {
"dokku": {
"predeploy": "npm run build"
}
}
}
Procfile:
web: npm start -- --port $PORT
In addition I set two configs for my dokku application:
dokku config:set my-app NPM_CONFIG_PRODUCTION=false
dokku config:set my-app HOST=0.0.0.0 NODE_ENV=production
What am I missing to get it into production mode?
A: Solved it by setting up an own express server.
package.json
"scripts": {
"dev": "node server.js",
"build": "next build",
"start": "NODE_ENV=production node server.js"
},
app.json
{
"scripts": {
"dokku": {
"predeploy": "npm run build"
}
}
}
Procfile
web: npm start -- --port $PORT
A: According to this github issue comment you need to have the application listen to the PORT environment variable.
I could not get this to work though. There are examples of how you can get a npm-script to consume environment variables, but I didn't want to go down that road just now. (see this question for more info on that.)
However, I did notice that Next.js listen to port 3000 by default, and dokku uses port 5000 internally, so I got it to work by simply changing the default npm start script to next -p 5000, that is I hardcoded the Next.js app to use port 5000.
This works for now, but I've only tested it with a clean, minimal project, so not sure if there are other blockers down the road.
Also, it seems like Next.js does in fact pick up on env variables from .env files, but that isn't reflected in the port the app is served on for some reason:
| stackoverflow | {
"language": "en",
"length": 345,
"provenance": "stackexchange_0000F.jsonl.gz:882745",
"question_score": "3",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44597608"
} |
e9ba46829bb39a7a8658c4341b0bdf186c771420 | Stackoverflow Stackexchange
Q: Filter nested object java 8 I have these classes
class LivingOrganism
{
List<Domain> listOfDomain;
}
class Domain
{
List<Species> listOfSpecies;
}
class Species
{
List<Color> listOfColor;
}
class Color
{
String color;
}
From the top to bottom, it won't have any duplicated entries until I reach to color. So some species even if they are in another domain can have the same color. And one single species can have different color.
Given a parent LivingOrganism, I want to filter a listOfDomain with a certain color.
I did it in a classic nested for loop, but with 4 nest for, the code doesn't look pretty. I was trying to use java 8 flatmap and filter to get some more elegant code, but I spent hours without success.
I even made a badly drawn graph in MSPaint
Let's say I want to get List<Species> that can be blue or List<Domain> with all the Species that can be blue. How do I proceed?
Any help would be appreciated
A: Try this.
List<Domain> result = livingOrganism.listOfDomain.stream()
.filter(d -> d.listOfSpecies.stream()
.flatMap(s -> s.listOfColor.stream())
.anyMatch(c -> c.equals("blue")))
.collect(Collectors.toList());
| Q: Filter nested object java 8 I have these classes
class LivingOrganism
{
List<Domain> listOfDomain;
}
class Domain
{
List<Species> listOfSpecies;
}
class Species
{
List<Color> listOfColor;
}
class Color
{
String color;
}
From the top to bottom, it won't have any duplicated entries until I reach to color. So some species even if they are in another domain can have the same color. And one single species can have different color.
Given a parent LivingOrganism, I want to filter a listOfDomain with a certain color.
I did it in a classic nested for loop, but with 4 nest for, the code doesn't look pretty. I was trying to use java 8 flatmap and filter to get some more elegant code, but I spent hours without success.
I even made a badly drawn graph in MSPaint
Let's say I want to get List<Species> that can be blue or List<Domain> with all the Species that can be blue. How do I proceed?
Any help would be appreciated
A: Try this.
List<Domain> result = livingOrganism.listOfDomain.stream()
.filter(d -> d.listOfSpecies.stream()
.flatMap(s -> s.listOfColor.stream())
.anyMatch(c -> c.equals("blue")))
.collect(Collectors.toList());
A: Your Color btw looks a lot like an enum, probably should make it as such. Also the names of the methods should be listOfDomains, listOfSpecies and listOfColors (notice the s at the end).
String colorOfIntereset = "red";
LivingOrganism one = new LivingOrganism...
List<Domain> domains = one.getListOfDomain()
.stream()
.filter(d -> {
return d.getListOfSpecies()
.stream()
.flatMap(s -> s.getListOfColor().stream())
.anyMatch(c -> c.getColor().equals(colorOfIntereset));
})
.collect(Collectors.toList());
| stackoverflow | {
"language": "en",
"length": 246,
"provenance": "stackexchange_0000F.jsonl.gz:882749",
"question_score": "7",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44597617"
} |
0718030fd03d301bcdd47c4fe23d611af2f63cd6 | Stackoverflow Stackexchange
Q: Using both INSERTED and DELETED from and OUTPUT In my UPDATE query, I would like to store the deleted and inserted values into separate database tables.
So my query needs to look something like this:
UPDATE A_TABLE
SET table_column = 'something'
OUTPUT deleted.*
INTO audit.deleted_content
OUTPUT inserted.*
INTO audit.inserted_content
WHERE blah = 'something else'
However I am getting an issue in my second INTO keyword (incorrect syntax near 'into'). Is there a correct way to accomplish the above?
A: You can use a temporary table to hold the results and do your inserts after like so:
create table #audit (
deleted_table_column varchar(32)
, inserted_table_column varchar(32)
)
update A_table
set table_column = 'something'
output deleted.table_column, inserted.table_column
into #audit
where blah = 'something else'
insert into audit.deleted_content
select deleted_table_column from #audit;
insert into audit.inserted_content
select inserted_table_column from #audit;
rextester demo: http://rextester.com/BVSFRJ92976
| Q: Using both INSERTED and DELETED from and OUTPUT In my UPDATE query, I would like to store the deleted and inserted values into separate database tables.
So my query needs to look something like this:
UPDATE A_TABLE
SET table_column = 'something'
OUTPUT deleted.*
INTO audit.deleted_content
OUTPUT inserted.*
INTO audit.inserted_content
WHERE blah = 'something else'
However I am getting an issue in my second INTO keyword (incorrect syntax near 'into'). Is there a correct way to accomplish the above?
A: You can use a temporary table to hold the results and do your inserts after like so:
create table #audit (
deleted_table_column varchar(32)
, inserted_table_column varchar(32)
)
update A_table
set table_column = 'something'
output deleted.table_column, inserted.table_column
into #audit
where blah = 'something else'
insert into audit.deleted_content
select deleted_table_column from #audit;
insert into audit.inserted_content
select inserted_table_column from #audit;
rextester demo: http://rextester.com/BVSFRJ92976
| stackoverflow | {
"language": "en",
"length": 140,
"provenance": "stackexchange_0000F.jsonl.gz:882763",
"question_score": "3",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44597666"
} |
f72de1df80ac9692836097d178b6680618f34e67 | Stackoverflow Stackexchange
Q: How to clear react native webview cookies? How to clear react native webview cookies?
When I re-open the page, it remembers my account if I logged in on a website. But I don't want that.
Can I do this by injecting javascript?
A: This is the solution without using any third party libraries.
var RCTNetworking = require(“RCTNetworking”);
RCTNetworking.clearCookies(() => {});
| Q: How to clear react native webview cookies? How to clear react native webview cookies?
When I re-open the page, it remembers my account if I logged in on a website. But I don't want that.
Can I do this by injecting javascript?
A: This is the solution without using any third party libraries.
var RCTNetworking = require(“RCTNetworking”);
RCTNetworking.clearCookies(() => {});
A: Solved by using this: https://github.com/react-native-community/react-native-cookies
CookieManager.clearAll();
A: I used incognito mode as a turnaround and it worked perfect for me.
<WebView
incognito={true}
userAgent="Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; AS; rv:11.0) like Gecko"
source={{
uri:
'your url',
}}
style={{ flex: 1 }}
/>
A: At the time I'm posting, CookieManager has poor support for Expo, I had problems clearing cookies after changing a password.
In 2020, for those using Expo, this works:
const RCTNetworking = require('react-native/Libraries/Network/RCTNetworking')
RCTNetworking.clearCookies(() => { })
| stackoverflow | {
"language": "en",
"length": 141,
"provenance": "stackexchange_0000F.jsonl.gz:882786",
"question_score": "15",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44597732"
} |
05207d17c8c47bf25f597ee785f1e2bf44fc4709 | Stackoverflow Stackexchange
Q: "git blame" a file access control modifier I noticed that a couple .txt files in my git repository have execute permissions. I also noticed that when I did chmod a-x *.txt the repo actually showed changes. Here is the output of the git diff after updating the files.
diff --git a/requirements.txt b/requirements.txt
old mode 100755
new mode 100644
Is there a way to blame permissions of a file? (specifically I'd like to find out who added the a+x permissions to these files.
A: You have probably used git diff command with some commits specified to get the results shown in your question. Let's assume the command was:
git diff goodcommit..badcommit requirements.txt
In case you omitted ..badcommit part, assume the badcommit is HEAD. You can easily find the offending commit (and a culprit) by running following sequence of commands:
git bisect start badcommit goodcommit
git bisect run test ! -x requirements.txt
and wait for finish. At the end you will get a message like:
running test ! -x requirements.txt
8088473809f905bd8f3d5825983e8c9fe82b10c6 is the first bad commit
commit 8088473809f905bd8f3d5825983e8c9fe82b10c6
Author: author
Date: Fri Jun 16 23:05:49 2017 +0100
commit message
To get back to normal work, just run:
git bisect reset
| Q: "git blame" a file access control modifier I noticed that a couple .txt files in my git repository have execute permissions. I also noticed that when I did chmod a-x *.txt the repo actually showed changes. Here is the output of the git diff after updating the files.
diff --git a/requirements.txt b/requirements.txt
old mode 100755
new mode 100644
Is there a way to blame permissions of a file? (specifically I'd like to find out who added the a+x permissions to these files.
A: You have probably used git diff command with some commits specified to get the results shown in your question. Let's assume the command was:
git diff goodcommit..badcommit requirements.txt
In case you omitted ..badcommit part, assume the badcommit is HEAD. You can easily find the offending commit (and a culprit) by running following sequence of commands:
git bisect start badcommit goodcommit
git bisect run test ! -x requirements.txt
and wait for finish. At the end you will get a message like:
running test ! -x requirements.txt
8088473809f905bd8f3d5825983e8c9fe82b10c6 is the first bad commit
commit 8088473809f905bd8f3d5825983e8c9fe82b10c6
Author: author
Date: Fri Jun 16 23:05:49 2017 +0100
commit message
To get back to normal work, just run:
git bisect reset
A: Git only stores the file contents, and execute bit value. See this answer for further info. So it will not reflect any other permission changes.
You can use:
git log --follow -p -- a/requirements.txt
to view the history of a file.
| stackoverflow | {
"language": "en",
"length": 241,
"provenance": "stackexchange_0000F.jsonl.gz:882881",
"question_score": "4",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44598046"
} |
6fad6ab056a402d88695c95074c4ca6d5cff0954 | Stackoverflow Stackexchange
Q: How to remove xticks and yticks from all axes? I have three axes in figure and I want to remove xtick and ytick from all of them.
I wrote below code but it works just on current axes, not all of them:
set(gca,'xtick',[],'ytick',[]);
How to remove xticks and yticks from all axes?
A: As a more general solution inspired by @Luis Mendo's answer, use findobj to get the axes. This will avoid getting all children of the parent figure which could include "non-axes" elements:
set( findobj( gcf, 'Type', 'axes' ), 'XTick', [], 'YTick', [] );
| Q: How to remove xticks and yticks from all axes? I have three axes in figure and I want to remove xtick and ytick from all of them.
I wrote below code but it works just on current axes, not all of them:
set(gca,'xtick',[],'ytick',[]);
How to remove xticks and yticks from all axes?
A: As a more general solution inspired by @Luis Mendo's answer, use findobj to get the axes. This will avoid getting all children of the parent figure which could include "non-axes" elements:
set( findobj( gcf, 'Type', 'axes' ), 'XTick', [], 'YTick', [] );
A: This should work:
set(get(gcf,'Children'),'Xtick',[],'Ytick',[]);
| stackoverflow | {
"language": "en",
"length": 101,
"provenance": "stackexchange_0000F.jsonl.gz:882887",
"question_score": "4",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44598075"
} |
f4f93911e1da40f0b107478f1f5b82b3eae4c926 | Stackoverflow Stackexchange
Q: Firebase Functions: Unclear "connection error" I am getting this error every so many runs with my HTTP Firebase Cloud Function:
Function execution took ****ms, finished with status: 'connection error'
It happens inconsistently but I can't quite narrow down what the problem is. I don't believe the error is in my app as it's not showing an error printout. And my own connection with firebase while running this cloud function isn't cutting out.
Any ideas why Firebase randomly fails cloud function executions with "connection error"?
A: Even though this question has an approved answer, you may have followed the steps in that answer and still reached a point where the error was still occurring.
In that case, we were informed by GCP that there's a known issue with Node 8 CFs and this connection error, for which the workaround is to update the node version to 10.
Related github issue: https://github.com/firebase/firebase-functions/issues/429
Specific comment: https://github.com/firebase/firebase-functions/issues/429#issuecomment-577324193
| Q: Firebase Functions: Unclear "connection error" I am getting this error every so many runs with my HTTP Firebase Cloud Function:
Function execution took ****ms, finished with status: 'connection error'
It happens inconsistently but I can't quite narrow down what the problem is. I don't believe the error is in my app as it's not showing an error printout. And my own connection with firebase while running this cloud function isn't cutting out.
Any ideas why Firebase randomly fails cloud function executions with "connection error"?
A: Even though this question has an approved answer, you may have followed the steps in that answer and still reached a point where the error was still occurring.
In that case, we were informed by GCP that there's a known issue with Node 8 CFs and this connection error, for which the workaround is to update the node version to 10.
Related github issue: https://github.com/firebase/firebase-functions/issues/429
Specific comment: https://github.com/firebase/firebase-functions/issues/429#issuecomment-577324193
A: Function execution took ****ms, finished with status: 'connection error' or ECONNRESET usually happens when a function doesn’t know whether a promise resolved or not.
Every promise must be returned, as mentioned in the docs here. There is also a blog post (with helpful video!) about this.
A couple of examples of unreturned promises:
exports.someFunc = functions.database.ref('/some/path').onCreate(event => {
let db = admin.database();
// UNRETURNED PROMISE
db.ref('/some/path').remove();
return db.ref('/some/other/path').set(event.data.val());
});
exports.makeUppercase = functions.database.ref('/hello/{pushId}').onWrite(event => {
return event.data.ref.set('world').then(snap => {
// UNRETURNED PROMISE
admin.database().ref('lastwrite').set(admin.database.ServerValue.TIMESTAMP);
});
});
exports.makeUppercase = functions.database.ref('/hello/{pushId}').onWrite(event => {
// UNRETURNED PROMISE
event.data.ref.set('world').then(snap => {
return admin.database().ref('lastwrite').set(admin.database.ServerValue.TIMESTAMP);
});
});
To help catch this mistake before deploying code, check out this eslint rule.
For an in-depth look at promises, here are some helpful resources:
*
*Mozilla docs
*Ponyfoo promises deep dive
*Links to the ECMA standard
*Egghead.io course
A: I think it might be too many simultaneous firebase database connections :/ https://groups.google.com/forum/#!topic/firebase-talk/4RjyYIDqMVQ
A: I faced the same issue while deploying uninstallTracking event to firebase for android device,
Turns out that the property I was trying to access was available for only some users ,
So when it couldn't find the property for those other users it gives this error
So first just check the property you are trying to access is there or not
A: I've been getting this on an HTTP trigger that immediately calls response.end() with no other code!
I had a very complex function that was working great then it stopped working due to this error. I tried for hours playing with my code until there was nothing left but a response.end() and still the error persisted.
I found that by deleting the trigger (deploying my triggers with the offending trigger commented out), then deploying again with the trigger uncommented seems to have fixed it.
Perhaps there is a bug that works in that gets reset when you delete the trigger in the cloud.
Hope this saves somebody some frustration.
A: it could be outdated libraries.
*
*go to terminal
*inside functions folder write command
npm outdated
this will show all libraries to require to be updated.
*To update libraries write command
npm update
*deploy cloud functions with
firebase deploy --only functions
A: For debugging purposes, I did the following:
response.send(someArray.length)
...which resulted in the following:
response.send(218)
...which resulted in a bodyless response, just a "status code" (namely 218) being sent. To fix this, I did:
response.send("count: " + someArray.length)
| stackoverflow | {
"language": "en",
"length": 557,
"provenance": "stackexchange_0000F.jsonl.gz:882894",
"question_score": "32",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44598089"
} |
c457db54d613a312b84ce4db9c186f9e89b4aa2b | Stackoverflow Stackexchange
Q: How to check block is called using rspec I want to check whether the block is called in my function using rspec. Below is my code:
class SP
def speak(options={},&block)
puts "speak called"
block.call()
rescue ZeroDivisionError => e
end
end
describe SP do
it "testing speak functionality can receive a block" do
sp = SP.new
def test_func
a = 1
end
sp_mock = double(sp)
expect(sp_mock).to receive(:speak).with(test_func)
sp.speak(test_func)
end
end
Below is my error:
SP testing speak functionality can receive a block
Failure/Error: block.call()
NoMethodError:
undefined method `call' for nil:NilClass
# ./test.rb:9:in `speak'
# ./test.rb:25:in `block (2 levels) in <top (required)>'
Could you please help. I spent lots of time in that.
A: I think Stefan provided the best answer. However I wanted to point out that you should be testing the behaviour of the code instead of implementation details.
describe SP do
it "testing speak functionality can receive a block" do
sp = SP.new
called = false
test_func = -> () { called = true }
sp.speak(&test_func)
expect(called).to eql(true)
end
end
| Q: How to check block is called using rspec I want to check whether the block is called in my function using rspec. Below is my code:
class SP
def speak(options={},&block)
puts "speak called"
block.call()
rescue ZeroDivisionError => e
end
end
describe SP do
it "testing speak functionality can receive a block" do
sp = SP.new
def test_func
a = 1
end
sp_mock = double(sp)
expect(sp_mock).to receive(:speak).with(test_func)
sp.speak(test_func)
end
end
Below is my error:
SP testing speak functionality can receive a block
Failure/Error: block.call()
NoMethodError:
undefined method `call' for nil:NilClass
# ./test.rb:9:in `speak'
# ./test.rb:25:in `block (2 levels) in <top (required)>'
Could you please help. I spent lots of time in that.
A: I think Stefan provided the best answer. However I wanted to point out that you should be testing the behaviour of the code instead of implementation details.
describe SP do
it "testing speak functionality can receive a block" do
sp = SP.new
called = false
test_func = -> () { called = true }
sp.speak(&test_func)
expect(called).to eql(true)
end
end
A: You have to use one of RSpec's yield matcher:
describe SP do
it "testing speak functionality can receive a block" do
sp = SP.new
expect { |b| sp.speak(&b) }.to yield_control
end
end
| stackoverflow | {
"language": "en",
"length": 205,
"provenance": "stackexchange_0000F.jsonl.gz:882895",
"question_score": "6",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44598091"
} |
8ba1e93f4bc88a78adab6a6ae9aabe9b7446d6d2 | Stackoverflow Stackexchange
Q: Valid ways to interrupt an OSGi component from being activated I have an OSGi component:
@Component(immediate=true)
public class SomeComponent implements SomeInterface {...}
Note that the component is immediate. Also, since the component class is implementing an interface, I understand that a service will be registered automatically upon (as part of the) activation of the component.
Now, I would like to do be able to dynamically interrupt the component activation and service registration if certain conditions are met. Throwing a 'ComponentException' in a component activator doesn't seem to do the job:
@Activate
public void activate() {
if (notReady)
throw new ComponentException("Component not ready");
}
Any suggestions? Thanks!
A: You cannot preempt the service registration which happens before activation. You would be better off with two components. One which is immediate with no service which decides whether to enable or disable the second component which has the service. This second component can be disabled by default.
| Q: Valid ways to interrupt an OSGi component from being activated I have an OSGi component:
@Component(immediate=true)
public class SomeComponent implements SomeInterface {...}
Note that the component is immediate. Also, since the component class is implementing an interface, I understand that a service will be registered automatically upon (as part of the) activation of the component.
Now, I would like to do be able to dynamically interrupt the component activation and service registration if certain conditions are met. Throwing a 'ComponentException' in a component activator doesn't seem to do the job:
@Activate
public void activate() {
if (notReady)
throw new ComponentException("Component not ready");
}
Any suggestions? Thanks!
A: You cannot preempt the service registration which happens before activation. You would be better off with two components. One which is immediate with no service which decides whether to enable or disable the second component which has the service. This second component can be disabled by default.
| stackoverflow | {
"language": "en",
"length": 155,
"provenance": "stackexchange_0000F.jsonl.gz:882896",
"question_score": "3",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44598099"
} |
aeb419b4e959be37c80f782b110b1182ad2efc2f | Stackoverflow Stackexchange
Q: Detect AWS host environment from within a Docker Container From within a Docker container, how can I detect that I am running inside an AWS environment? I want the same container to optionally execute some AWS commands on startup IF running from within AWS, but skip those if running in a local environment.
Currently, I am thinking that the simple way is to set an environment variable when running in AWS.
Is there another way?
A: If your Docker container lacks curl or wget, you may use this trick in Bash:
if ( exec 2>/dev/null ; echo > /dev/tcp/169.254.169.254/80 ) ; then
echo "AWS"
fi
| Q: Detect AWS host environment from within a Docker Container From within a Docker container, how can I detect that I am running inside an AWS environment? I want the same container to optionally execute some AWS commands on startup IF running from within AWS, but skip those if running in a local environment.
Currently, I am thinking that the simple way is to set an environment variable when running in AWS.
Is there another way?
A: If your Docker container lacks curl or wget, you may use this trick in Bash:
if ( exec 2>/dev/null ; echo > /dev/tcp/169.254.169.254/80 ) ; then
echo "AWS"
fi
A: For instance:
curl -i http://169.254.169.254/ | grep "200 OK"
Docs: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/identify_ec2_instances.html
A: Crude way of checking if you are running on AWS. All instances that are running on AWS have access to an internal metadata server with IP: 169.254.169.254. If you can connect to it, then you are on AWS. Otherwise you are not.
$ curl -s --connect-timeout 2 169.254.169.254 > /dev/null
$ echo $?
0
On non AWS machine:
$ curl -s --connect-timeout 2 169.254.169.254 > /dev/null
$ echo $?
28
| stackoverflow | {
"language": "en",
"length": 189,
"provenance": "stackexchange_0000F.jsonl.gz:882926",
"question_score": "3",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44598176"
} |
91a2c474aa39b04ab085eab39d8dcb5eb358e0b7 | Stackoverflow Stackexchange
Q: ScrollView not show content React-native I'm building an application using react-native was all right until I needed to add ScrollView, no matter what screen I add and the content stops being rendered, no error is displayed, whatever is occurring is silent, I've tried Several things I saw on the internet but nothing solved my problem, I put an example code below...
My code:
export default () => (
<View style={{flex: 1}}>
<ScrollView style={{flex: 1}}>
// My page content here
</ScrollView>
</View>
);
Apparently it's something simple but I did not find the solution, if anyone can help me thank you.
A: Try adding a height and width to the scroll view's style, and then work your way from there.
Or
make it absolute position and set the top, left, bottom and right position to 0 as such
position: 'absolute',
top: 0,
left: 0,
right: 0,
bottom: 0
| Q: ScrollView not show content React-native I'm building an application using react-native was all right until I needed to add ScrollView, no matter what screen I add and the content stops being rendered, no error is displayed, whatever is occurring is silent, I've tried Several things I saw on the internet but nothing solved my problem, I put an example code below...
My code:
export default () => (
<View style={{flex: 1}}>
<ScrollView style={{flex: 1}}>
// My page content here
</ScrollView>
</View>
);
Apparently it's something simple but I did not find the solution, if anyone can help me thank you.
A: Try adding a height and width to the scroll view's style, and then work your way from there.
Or
make it absolute position and set the top, left, bottom and right position to 0 as such
position: 'absolute',
top: 0,
left: 0,
right: 0,
bottom: 0
A: I think because you used flex style in root and in scrollview. That's effect to your display data not fit in screen. Try to remove flex all of them and run it without flex style first.
A: if you can add some space to end of scrollview , you will probably be able to see all content
export default () => (
<ScrollView>
<View style={{flex: 1,paddingBottom: 300}}>
// Page content here
</View>
</ScrollView>
);
A: The code that fixed this for me was to use contentContainerStyle={{flex: 1}} rather than just style. This sets the flex on the outer container of the scrollview, rather than the inner container.
A: This should fix problem:
export default () => (
<View style={{flex: 1}}>
<ScrollView }>
// Page content
</ScrollView >
</View>
);
A: The 'flex: 1' is causing the issue, because ScrollView is used whenever the content overflows, so obviously you shouldn't be specifying it's width. Try this instead-
export default () => (
<ScrollView>
<View style={{flex: 1}}>
// Page content here
</View>
</ScrollView>
);
A: The ScrollView component inherit the View height.
You can try this
<View style={{flex: 1, height: 100}}>
<ScrollView style={{flex: 1}}>
// My page content here
</ScrollView>
</View>
It's late, but maybe it's still helpful.
A: Well, my problem was that I was using percentage on height for each View, it seems that scrollview doesn't works in that way
For example: height :30% won't work
height 300 will do
A: The ScrollView gets it's height (Flex) from the view that wrapps it , you might want to wrap it between (<> </>)
A: Gonna thow my 2 pents in here.
I had this issue with a ScollList that was part of a child component.
The child component was being consumed by a parent component.
The parent component was imported and displayed in a Screen.
Screen top View styles: must include flex:1
Parent Component top View style: must include flex:1
Child component that contains ScrollList: requires only cosmetic styles no flex.
| stackoverflow | {
"language": "en",
"length": 480,
"provenance": "stackexchange_0000F.jsonl.gz:882940",
"question_score": "4",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44598217"
} |
8ad86b89f8c6bf6733e4111a8ce156b8214befff | Stackoverflow Stackexchange
Q: It is posible to get URL to current pipeline execution in Bitbucket Pipelines? Every pipeline execution has their own URL (I think is a kind of UUID?).
It's possible to get that URL or UUID to compose the URL?
I have a command inside my tasks triggering messages using a REST/API call, but I want to include a link to the pipeline that triggered the message.
A: You can try doing it with default variables in pipelines. The ones you need are:
*
*$BITBUCKET_GIT_HTTP_ORIGIN - the URL for the origin (e.g. http://bitbucket.org/<account>/<repo>).
*$BITBUCKET_BUILD_NUMBER - the unique identifier for your build.
For the time of writing this answer, the following combination works:
$BITBUCKET_GIT_HTTP_ORIGIN/addon/pipelines/home#!/results/$BITBUCKET_BUILD_NUMBER
| Q: It is posible to get URL to current pipeline execution in Bitbucket Pipelines? Every pipeline execution has their own URL (I think is a kind of UUID?).
It's possible to get that URL or UUID to compose the URL?
I have a command inside my tasks triggering messages using a REST/API call, but I want to include a link to the pipeline that triggered the message.
A: You can try doing it with default variables in pipelines. The ones you need are:
*
*$BITBUCKET_GIT_HTTP_ORIGIN - the URL for the origin (e.g. http://bitbucket.org/<account>/<repo>).
*$BITBUCKET_BUILD_NUMBER - the unique identifier for your build.
For the time of writing this answer, the following combination works:
$BITBUCKET_GIT_HTTP_ORIGIN/addon/pipelines/home#!/results/$BITBUCKET_BUILD_NUMBER
| stackoverflow | {
"language": "en",
"length": 113,
"provenance": "stackexchange_0000F.jsonl.gz:882963",
"question_score": "6",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44598273"
} |
7f2a8811f132a46d72eec056d08dc40ee54ed507 | Stackoverflow Stackexchange
Q: ERR Client sent AUTH, but no password is set I am using jedis 2.8.0 and getting following exception:
Caused by: redis.clients.jedis.exceptions.JedisDataException: ERR Client sent AUTH, but no password is set
at redis.clients.jedis.Protocol.processError(Protocol.java:123)
at redis.clients.jedis.Protocol.process(Protocol.java:157)
at redis.clients.jedis.Protocol.read(Protocol.java:211)
at redis.clients.jedis.Connection.readProtocolWithCheckingBroken(Connection.java:297)
at redis.clients.jedis.Connection.getStatusCodeReply(Connection.java:196)
at redis.clients.jedis.BinaryJedis.auth(BinaryJedis.java:2049)
at redis.clients.jedis.JedisFactory.makeObject(JedisFactory.java:89)
at org.apache.commons.pool2.impl.GenericObjectPool.create(GenericObjectPool.java:868)
at org.apache.commons.pool2.impl.GenericObjectPool.borrowObject(GenericObjectPool.java:458)
at org.apache.commons.pool2.impl.GenericObjectPool.borrowObject(GenericObjectPool.java:363)
at redis.clients.util.Pool.getResource(Pool.java:49)
... 4 more
Following is the api I am using:
jedisPool = new JedisPool(jedisPoolConfig, host, port, IDLE_CONNECTION_TIMEOUT, authSecret);
Notes:
*
*I am able to successfully send commands to redis using the exact
same password using the redis-cli -a option.
*I also printed the password in the logs to see if I am sending wrong
password, but the password passed to JedisPool is proper.
*I also tried to "redis-cli monitor" calls on redis but I dont see any AUTH
request.
A: This will might work if you don't want to set the password.
spring.redis.host=127.0.0.1
spring.redis.port=6379
spring.redis.password=
Leave the password field as blank.
OR
Use the requirepass a password option
redis 127.0.0.1:6379> AUTH PASSWORD
(error) ERR Client sent AUTH, but no password is set.
redis 127.0.0.1:6379> CONFIG SET requirepass "mypass"
OK
redis 127.0.0.1:6379> AUTH mypass
OK
| Q: ERR Client sent AUTH, but no password is set I am using jedis 2.8.0 and getting following exception:
Caused by: redis.clients.jedis.exceptions.JedisDataException: ERR Client sent AUTH, but no password is set
at redis.clients.jedis.Protocol.processError(Protocol.java:123)
at redis.clients.jedis.Protocol.process(Protocol.java:157)
at redis.clients.jedis.Protocol.read(Protocol.java:211)
at redis.clients.jedis.Connection.readProtocolWithCheckingBroken(Connection.java:297)
at redis.clients.jedis.Connection.getStatusCodeReply(Connection.java:196)
at redis.clients.jedis.BinaryJedis.auth(BinaryJedis.java:2049)
at redis.clients.jedis.JedisFactory.makeObject(JedisFactory.java:89)
at org.apache.commons.pool2.impl.GenericObjectPool.create(GenericObjectPool.java:868)
at org.apache.commons.pool2.impl.GenericObjectPool.borrowObject(GenericObjectPool.java:458)
at org.apache.commons.pool2.impl.GenericObjectPool.borrowObject(GenericObjectPool.java:363)
at redis.clients.util.Pool.getResource(Pool.java:49)
... 4 more
Following is the api I am using:
jedisPool = new JedisPool(jedisPoolConfig, host, port, IDLE_CONNECTION_TIMEOUT, authSecret);
Notes:
*
*I am able to successfully send commands to redis using the exact
same password using the redis-cli -a option.
*I also printed the password in the logs to see if I am sending wrong
password, but the password passed to JedisPool is proper.
*I also tried to "redis-cli monitor" calls on redis but I dont see any AUTH
request.
A: This will might work if you don't want to set the password.
spring.redis.host=127.0.0.1
spring.redis.port=6379
spring.redis.password=
Leave the password field as blank.
OR
Use the requirepass a password option
redis 127.0.0.1:6379> AUTH PASSWORD
(error) ERR Client sent AUTH, but no password is set.
redis 127.0.0.1:6379> CONFIG SET requirepass "mypass"
OK
redis 127.0.0.1:6379> AUTH mypass
OK
A: There is no password set in your Redis instance. Password sent with redis-cli -a is just being ignored.
Now, you may set the password in redis instance. To do so, edit redis configuration file, set requirepass option and restart redis instance.
Or, just ignore password option while communicating with redis. E.g.
jedisPool = new JedisPool(jedisPoolConfig, host, port, IDLE_CONNECTION_TIMEOUT);
| stackoverflow | {
"language": "en",
"length": 246,
"provenance": "stackexchange_0000F.jsonl.gz:882973",
"question_score": "18",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44598321"
} |
1e94e0a9534839d2c1678ddb95e773b913a01296 | Stackoverflow Stackexchange
Q: GDB show R/W Permissions in specific memory address I have a simulation in Matlab Simulink which uses an SO library file with my algo code. I want to know the read/write permissions of a specific memory address using GDB. For example I care about knowing the permissions on the memory of this variable:
(gdb) p &CalValid
$3 = (const WORD *) 0x91f6005c <CalValid>
If I use (gdb) maintenance info sections it shows only the memory space of the matlab process and no the loaded library.
Exec file:
`/appl/matlab2010a32/bin/glnx86/MATLAB', file type elf32-i386.
0x8048134->0x8048147 at 0x00000134: .interp ALLOC LOAD READONLY DATA HAS_CONTENTS
0x8048148->0x8048168 at 0x00000148: .note.ABI-tag ALLOC LOAD READONLY DATA HAS_CONTENTS
...
0x0000->0x00c4 at 0x00001fa0: .comment READONLY HAS_CONTENTS
0x0000->0x0010 at 0x00002064: .gnu_debuglink READONLY HAS_CONTENTS
The reason I need this is because even though the variable is const I have mprotect() elsewhere in the code changing memory permissions of cont variables.
| Q: GDB show R/W Permissions in specific memory address I have a simulation in Matlab Simulink which uses an SO library file with my algo code. I want to know the read/write permissions of a specific memory address using GDB. For example I care about knowing the permissions on the memory of this variable:
(gdb) p &CalValid
$3 = (const WORD *) 0x91f6005c <CalValid>
If I use (gdb) maintenance info sections it shows only the memory space of the matlab process and no the loaded library.
Exec file:
`/appl/matlab2010a32/bin/glnx86/MATLAB', file type elf32-i386.
0x8048134->0x8048147 at 0x00000134: .interp ALLOC LOAD READONLY DATA HAS_CONTENTS
0x8048148->0x8048168 at 0x00000148: .note.ABI-tag ALLOC LOAD READONLY DATA HAS_CONTENTS
...
0x0000->0x00c4 at 0x00001fa0: .comment READONLY HAS_CONTENTS
0x0000->0x0010 at 0x00002064: .gnu_debuglink READONLY HAS_CONTENTS
The reason I need this is because even though the variable is const I have mprotect() elsewhere in the code changing memory permissions of cont variables.
| stackoverflow | {
"language": "en",
"length": 149,
"provenance": "stackexchange_0000F.jsonl.gz:882992",
"question_score": "3",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44598378"
} |
76434b237036aa89dfcae94df8d40df07bed9e42 | Stackoverflow Stackexchange
Q: Sync data from Amazon Aurora to Redshift I am trying to setup a sync between AWS Aurora and Redshift. What is the best way to achieve this sync?
Possible ways to sync can be: -
*
*Query table to find changes in a table(since I am only doing inserts, updates don't matter), export these changes to a flat file in S3 bucket and use Redshift copy command to insert into Redshift.
*Use python publisher and Boto3 to publish changes into a Kinesis stream and then consume this stream in Firehose from where I can copy directly into Redshift.
*Use Kinesis Agent to detect changes in binlog (Is it possible to detect changes int binlog using Kinesis Agent) and publish it to Firehose and from there copy into Firehose.
I haven't explored AWS Datapipeline yet.
A: As pointed out by @Mark B, the AWS Database Migration Service can migrate data between databases. This can be done as a one-off exercise, or it can run continuously, keeping two databases in sync.
The documentation shows that Amazon Aurora can be a source and Amazon Redshift can be a target.
| Q: Sync data from Amazon Aurora to Redshift I am trying to setup a sync between AWS Aurora and Redshift. What is the best way to achieve this sync?
Possible ways to sync can be: -
*
*Query table to find changes in a table(since I am only doing inserts, updates don't matter), export these changes to a flat file in S3 bucket and use Redshift copy command to insert into Redshift.
*Use python publisher and Boto3 to publish changes into a Kinesis stream and then consume this stream in Firehose from where I can copy directly into Redshift.
*Use Kinesis Agent to detect changes in binlog (Is it possible to detect changes int binlog using Kinesis Agent) and publish it to Firehose and from there copy into Firehose.
I haven't explored AWS Datapipeline yet.
A: As pointed out by @Mark B, the AWS Database Migration Service can migrate data between databases. This can be done as a one-off exercise, or it can run continuously, keeping two databases in sync.
The documentation shows that Amazon Aurora can be a source and Amazon Redshift can be a target.
A: AWS has just announced this new feature: Amazon Aurora zero-ETL integration with Amazon Redshift
This natively provides near real-time (second) synchronization from Aurora to Redshift.
A: You can also use federated queries: https://docs.aws.amazon.com/redshift/latest/dg/federated-overview.html
| stackoverflow | {
"language": "en",
"length": 221,
"provenance": "stackexchange_0000F.jsonl.gz:883031",
"question_score": "6",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44598515"
} |
6199946448e0d766ff7f48fdd896da8fd72eb19c | Stackoverflow Stackexchange
Q: Static polymorphism: function overloads vs template specializations Suppose I want to create a generic Serialize function that allows me to serialize built-in types, composed types and user-defined types. Then I see two ways to do it:
*
*Overload based:
*
*Serialize() is overloaded for all (supported) builtin-types
*function template overloads are added for vector/map/tuple which delegate to the Serialize overloads for the element types
*user-defined overloads are found with ADL
*Class template specialization based:
*
*one single Serialize function that delegates to SerializeHelper<T>()::serialize(...)
*SerializeHelper is specialized for all built-in types
*partial template specialization can be used to implement specialization for vector/map/tuple generically
*users can provide specializations for their own types
What would be the pros and cons of each approach?
Also how do they compare concerning compile-time performance?
Note: despite the similar title, this question is not a duplicate of Template Specialization VS Function Overloading because that question is about ADL vs function template specialization (and not class template specialization).
| Q: Static polymorphism: function overloads vs template specializations Suppose I want to create a generic Serialize function that allows me to serialize built-in types, composed types and user-defined types. Then I see two ways to do it:
*
*Overload based:
*
*Serialize() is overloaded for all (supported) builtin-types
*function template overloads are added for vector/map/tuple which delegate to the Serialize overloads for the element types
*user-defined overloads are found with ADL
*Class template specialization based:
*
*one single Serialize function that delegates to SerializeHelper<T>()::serialize(...)
*SerializeHelper is specialized for all built-in types
*partial template specialization can be used to implement specialization for vector/map/tuple generically
*users can provide specializations for their own types
What would be the pros and cons of each approach?
Also how do they compare concerning compile-time performance?
Note: despite the similar title, this question is not a duplicate of Template Specialization VS Function Overloading because that question is about ADL vs function template specialization (and not class template specialization).
| stackoverflow | {
"language": "en",
"length": 161,
"provenance": "stackexchange_0000F.jsonl.gz:883057",
"question_score": "3",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44598597"
} |
d00d7be050d1b47b77ce43232779df0d2cb03357 | Stackoverflow Stackexchange
Q: Netty multiple channels? I want to implement Slack API client with Netty. Slack has dozens of methods, each with different URL and response format.
Should I create a channel for each of this methods and reuse single bootstrap or should I use single channel for all the communications? I am doing this project to learn Netty, actually, and I cannot understand the strategy from the docs and examples (which are pretty basic).
With channel per API method I can use different handlers to process different datatypes, but with single channel I cannot do that, do I?
Can I store channel as an instance field (of a "client" class) or is it a short-lived object?
Also, what are the memory and CPU impacts of both ways?
A: For different slack API methods, they have different business logic, but the underlying transport is same. So, from my point of view, you should separate your http logic from business logic.
For example, you can use one handler to process http request/response, and the other handler to process slack method request/response
| Q: Netty multiple channels? I want to implement Slack API client with Netty. Slack has dozens of methods, each with different URL and response format.
Should I create a channel for each of this methods and reuse single bootstrap or should I use single channel for all the communications? I am doing this project to learn Netty, actually, and I cannot understand the strategy from the docs and examples (which are pretty basic).
With channel per API method I can use different handlers to process different datatypes, but with single channel I cannot do that, do I?
Can I store channel as an instance field (of a "client" class) or is it a short-lived object?
Also, what are the memory and CPU impacts of both ways?
A: For different slack API methods, they have different business logic, but the underlying transport is same. So, from my point of view, you should separate your http logic from business logic.
For example, you can use one handler to process http request/response, and the other handler to process slack method request/response
| stackoverflow | {
"language": "en",
"length": 178,
"provenance": "stackexchange_0000F.jsonl.gz:883066",
"question_score": "3",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44598615"
} |
2b396f2bd5f43e9f217f7f4d196cf33625fba05a | Stackoverflow Stackexchange
Q: Regular Expression for any non-empty consecutive sequence of 0's enclosed between 1 I need to find all the groups in a String having consecutive sequence of 0's enclosed between 1
100001abc101 // 2 groups `100001` and `101`
1001ab010abc01001 // 2 groups `1001' and `1001`
1001010001 // 3 groups `1001`, `101` and `10001` --> This is a special case
My Regex for the same is: 1(0+)1 this is working good for 1st and 2nd case but in 3rd test case It's only matching 1001 and 10001 not 101
Please suggest what I am missing.
The problem is match starts from the next character of last matched group, it should start from the same matched character itself.
A: Try a look-behind and look-ahead instead, since you don't actually want to match the 1s:
/(?<=1)0+(?=1)/
https://regex101.com/r/IGygJj/3
| Q: Regular Expression for any non-empty consecutive sequence of 0's enclosed between 1 I need to find all the groups in a String having consecutive sequence of 0's enclosed between 1
100001abc101 // 2 groups `100001` and `101`
1001ab010abc01001 // 2 groups `1001' and `1001`
1001010001 // 3 groups `1001`, `101` and `10001` --> This is a special case
My Regex for the same is: 1(0+)1 this is working good for 1st and 2nd case but in 3rd test case It's only matching 1001 and 10001 not 101
Please suggest what I am missing.
The problem is match starts from the next character of last matched group, it should start from the same matched character itself.
A: Try a look-behind and look-ahead instead, since you don't actually want to match the 1s:
/(?<=1)0+(?=1)/
https://regex101.com/r/IGygJj/3
A: To match overlapping matches, you should be using a capturing group inside a lookahead like this:
(?=(10+1))
RegEx Demo
Since we are only asserting matches instead of matching them, regex engine is able to return all possible combinations of 10+1 even if they are overlapping.
A: Use something like (?=(10+1))10+
where group 1 contains the sequence while
not matching the last 1.
https://regex101.com/r/uH5OrS/1
In general, you'd want to move the position with the last 10+.
In this case it's not necessary, but I wouldn't get into the habit
of not including it, it will bite you some day.
| stackoverflow | {
"language": "en",
"length": 233,
"provenance": "stackexchange_0000F.jsonl.gz:883093",
"question_score": "3",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44598694"
} |
c81d7e2ff70397fe23cf2efaea084dda9c6765f3 | Stackoverflow Stackexchange
Q: python odo sql AssertionError: datashape must be Record type, got 0 * {...} I'm trying to import a CSV into MySQL using odo but am getting a datashape error.
My understanding is that datashape takes the format:
var * {
column: type
...
}
where var means a variable number of rows. I'm getting the following error:
AssertionError: datashape must be Record type, got 0 * {
tod: ?string,
interval: ?string,
iops: float64,
mb_per_sec: float64
}
I'm not sure where that 0 number of rows is coming from. I've tried explicitly setting the datashape using dshape(), but continue to get the same error.
Here's a stripped down version of the code that recreates the error:
from odo import odo
odo('test.csv', mysql_database_uri)
I'm running Ubuntu 16.04 and Python 3.6.1 using Conda.
Thanks for any input.
A: I had this error, needed to specify table
# error
odo('data.csv', 'postgresql://usr:pwd@ip/db')
# works
odo('data.csv', 'postgresql://usr:pwd@ip/db::table')
| Q: python odo sql AssertionError: datashape must be Record type, got 0 * {...} I'm trying to import a CSV into MySQL using odo but am getting a datashape error.
My understanding is that datashape takes the format:
var * {
column: type
...
}
where var means a variable number of rows. I'm getting the following error:
AssertionError: datashape must be Record type, got 0 * {
tod: ?string,
interval: ?string,
iops: float64,
mb_per_sec: float64
}
I'm not sure where that 0 number of rows is coming from. I've tried explicitly setting the datashape using dshape(), but continue to get the same error.
Here's a stripped down version of the code that recreates the error:
from odo import odo
odo('test.csv', mysql_database_uri)
I'm running Ubuntu 16.04 and Python 3.6.1 using Conda.
Thanks for any input.
A: I had this error, needed to specify table
# error
odo('data.csv', 'postgresql://usr:pwd@ip/db')
# works
odo('data.csv', 'postgresql://usr:pwd@ip/db::table')
A: Try replacing
odo('test.csv', mysql_database_uri)
with
odo(pandas.read_csv('test.csv') , mysql_database_uri)
A: Odo seems to be buggy and discontinued. As an alternative you can use d6tstack which has fast pandas to SQL functionality because it uses native DB import commands. It supports Postgres, MYSQL and MS SQL,
cfg_uri_mysql = 'mysql+mysqlconnector://testusr:testpwd@localhost/testdb'
d6tstack.combine_csv.CombinerCSV(glob.glob('*.csv'),
apply_after_read=apply_fun).to_mysql_combine(uri_psql, 'table')
Also particularly useful for importing multiple CSV with data schema changes and/or preprocess with pandas before writing to db, see further down in examples notebook
| stackoverflow | {
"language": "en",
"length": 228,
"provenance": "stackexchange_0000F.jsonl.gz:883126",
"question_score": "5",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44598799"
} |
a8b12c724cd0e46af7cc7dceb45fe1fc6a60b91c | Stackoverflow Stackexchange
Q: What does request.GET.get mean? What does request.GET.get mean? I see something like this in Django
page = request.GET.get('page', 1)
which I think is connected to something like
<li><a href="?page={{ users.previous_page_number }}">«</a></li>
How do they work?
A: The request object contains information about the user's request. What data they've sent to the page, where they are coming from, etc.
request.GET contains the GET variables. These are what you see in your browser's address bar. The .get() method is a method used for dictionaries. What your snippet of code is doing is saying, "Get the value of a GET variable with name 'page', and if it doesn't exist, return 1".
Likewise, you will see request.POST used when a user submits a form.
You can read more about GET vs. POST here.
| Q: What does request.GET.get mean? What does request.GET.get mean? I see something like this in Django
page = request.GET.get('page', 1)
which I think is connected to something like
<li><a href="?page={{ users.previous_page_number }}">«</a></li>
How do they work?
A: The request object contains information about the user's request. What data they've sent to the page, where they are coming from, etc.
request.GET contains the GET variables. These are what you see in your browser's address bar. The .get() method is a method used for dictionaries. What your snippet of code is doing is saying, "Get the value of a GET variable with name 'page', and if it doesn't exist, return 1".
Likewise, you will see request.POST used when a user submits a form.
You can read more about GET vs. POST here.
A: request.GET is the dictionary of the GET variables in the http request made to your server for example:
www.google.com?thisIsAGetVarKey=3&thisIsAnotherOne=hello
request.GET would be: {"thisIsAGetVarKey": 3, "thisIsAnotherOne":"hello"}
Because request.GET is a dictionary, it has the method .get() which retrieves a value for a key in the dictionary
dict_b = {'number': 8, 'alphabet':'A'}
print dict_a['number'] #prints 8
print dict_a.get('alphabet') #prints A
print dict_a['bob'] #throws KeyError
print dict_a.get('bob') #prints None
print dict_a.get('bob', default=8) #prints 8
A: request.GET is the dictionary of the 'GET' variables in the http request made to your server for example:
www.google.com?thisIsAGetVarKey=3&thisIsAnotherOne=hello
request.GET would be: {"thisIsAGetVarKey": 3, "thisIsAnotherOne":"hello"}
Because request.GET is a dictionary, it has the method .get() which retrieves a value for a key in the dictionary -
dict_a = {'age': 3}
print dict_a['age'] #prints 3
print dict_a.get('a') #also prints 3
print dict_a['hello'] #throws KeyError
print dict_a.get('hello') #prints None
print dict_a.get('hello', default=3) #prints 3
| stackoverflow | {
"language": "en",
"length": 275,
"provenance": "stackexchange_0000F.jsonl.gz:883173",
"question_score": "14",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44598962"
} |
e14d02a245ca522bfd1fd298c6484a0f36276fe1 | Stackoverflow Stackexchange
Q: Instant messaging with kafka I'm trying to design an instant messaging or more precisely I'm doing turn based game whose mechanism is the following:
Player "A" doing something and send information about his steps to player "B".
Player "B" receive this information also doing something and response back to player "A".
I'm thinking about kafka sink connector if it can be hosted outside of kafka just as a client? Because it's a good way to avoid to write a lot of code and to have a good mechanism of message receiver to client with partitioning, schemas e.t.c.
I want to do something as following, for instance:
Two clients can be subscribed to one topic and can exchange messages between each other with some information.
Is it possible something like that?
Or if it's not possible, I'm just thinking.. What, if to do 2 listeners one in the source connector and another one in the sink for sending and receiving messages in any way managed by kafka. Does it make sense, what do you think?
I'm just gathering ideas how to implement it best and fast as possible with good performance, scalability and less effort. :)
| Q: Instant messaging with kafka I'm trying to design an instant messaging or more precisely I'm doing turn based game whose mechanism is the following:
Player "A" doing something and send information about his steps to player "B".
Player "B" receive this information also doing something and response back to player "A".
I'm thinking about kafka sink connector if it can be hosted outside of kafka just as a client? Because it's a good way to avoid to write a lot of code and to have a good mechanism of message receiver to client with partitioning, schemas e.t.c.
I want to do something as following, for instance:
Two clients can be subscribed to one topic and can exchange messages between each other with some information.
Is it possible something like that?
Or if it's not possible, I'm just thinking.. What, if to do 2 listeners one in the source connector and another one in the sink for sending and receiving messages in any way managed by kafka. Does it make sense, what do you think?
I'm just gathering ideas how to implement it best and fast as possible with good performance, scalability and less effort. :)
| stackoverflow | {
"language": "en",
"length": 196,
"provenance": "stackexchange_0000F.jsonl.gz:883191",
"question_score": "3",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44599016"
} |
d9939871cf533e63cf5910b1c7ce1a942c8b1ae7 | Stackoverflow Stackexchange
Q: What benefits come from using Progressive Web Apps (PWA) guidelines? I was reading about advantages of PWAs, but I really can't understand why we have to use this technology. Is this similar to SPA? Or this is completely something else?
A: A SPA is something different than a PWA. You can view PWA as an enhancement of your website. By adding a service worker and a manifest you can bring your website (SPA or not) out of the browser, work like a mobile app and make it perform better.
| Q: What benefits come from using Progressive Web Apps (PWA) guidelines? I was reading about advantages of PWAs, but I really can't understand why we have to use this technology. Is this similar to SPA? Or this is completely something else?
A: A SPA is something different than a PWA. You can view PWA as an enhancement of your website. By adding a service worker and a manifest you can bring your website (SPA or not) out of the browser, work like a mobile app and make it perform better.
A: A Single Page Application (SPA) can be a Progressive Web App (PWA) but a PWA doesn't need to be a SPA. They are two different things. A PWA as defined by Google is:
Reliable - Load instantly and never show the downasaur, even in uncertain network conditions.
Fast - Respond quickly to user interactions with silky smooth animations and no janky scrolling.
Engaging - Feel like a natural app on the device, with an immersive user experience.
The Google PWA site is a good place to start learning about PWA and why and how you would build one.
https://developers.google.com/web/progressive-web-apps/
A SPA is simply a web app where a single page is loaded in the browser and all subsequent routing and page view rendering is handled on the client (browser) using JavaScript.
Properly combining the principles of PWA and SPA can result in a much improved user experience especially on mobile phones with unreliable internet connections.
Probably the best example today of a well implemented PWA is Twitter Lite. Check it out here:
https://mobile.twitter.com/home
...and read about it here:
https://blog.twitter.com/en_us/topics/product/2017/introducing-twitter-lite.html
| stackoverflow | {
"language": "en",
"length": 270,
"provenance": "stackexchange_0000F.jsonl.gz:883194",
"question_score": "8",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44599026"
} |
5c13822a82c01679dda4507c79b3fc5a40c3622c | Stackoverflow Stackexchange
Q: TypeScript compile-time constants? I'm writing a library in TypeScript and I want to target both Node and Browser. Right now I'm running tsc twice with two different targets. That part is working OK.
However, a tiny portion of my code is target-specific. I need to be able to do something like:
if(BUILD_TARGET === 'node') {
// do something
} else {
// do something else
}
Is there any way I can inject these constants at compile-time so that they can either be optimized away by tsc itself, or via a pass through UglifyJS (or some such tool)?
A: https://github.com/Morglod/tsts
Currently working on compile time transformer
const numbers = [ 1, 2, 3, 4, 5, 6, 7, 8 ];
const result = comptime(() => {
return numbers.reduce((total, x) => sum(total, x), 0);
});
to
const result = (() => { return (36); })();
| Q: TypeScript compile-time constants? I'm writing a library in TypeScript and I want to target both Node and Browser. Right now I'm running tsc twice with two different targets. That part is working OK.
However, a tiny portion of my code is target-specific. I need to be able to do something like:
if(BUILD_TARGET === 'node') {
// do something
} else {
// do something else
}
Is there any way I can inject these constants at compile-time so that they can either be optimized away by tsc itself, or via a pass through UglifyJS (or some such tool)?
A: https://github.com/Morglod/tsts
Currently working on compile time transformer
const numbers = [ 1, 2, 3, 4, 5, 6, 7, 8 ];
const result = comptime(() => {
return numbers.reduce((total, x) => sum(total, x), 0);
});
to
const result = (() => { return (36); })();
A: The closest thing to compile-time constants in typescript are const enums - according to the documentation, "unlike regular enums they are completely removed during compilation".
This comes at a price however - the biggest caveat is that const enums are incompatible with isolatedModules = true mode which is often used by bundlers.
Also, Typescript won't do dead code elimination, you will have to use some other tool to remove if (0 === 1) branch afterwards.
You will need two different tsconfig.json files, each will include a file that will have a different definition for the same enum type.
tsconfig.browser.json
{
"files": [
"t.ts",
"target-enum-browser.d.ts"
]
}
target-enum-browser.d.ts
declare module 'target-enum' {
export const enum Target { Node, Browser, Current = Browser }
}
tsconfig.node.json
{
"files": [
"t.ts",
"target-enum-node.d.ts"
]
}
target-enum-node.d.ts
declare module 'target-enum' {
export const enum Target { Node, Browser, Current = Node }
}
t.ts
import {Target} from 'target-enum';
if (Target.Current === Target.Browser) {
console.log('browser');
} else if (Target.Current === Target.Node) {
console.log('node');
} else {
console.log('?');
}
compiled with tsc --project tsconfig.browser.json
"use strict";
exports.__esModule = true;
if (1 /* Current */ === 1 /* Browser */) {
console.log('browser');
}
else if (1 /* Current */ === 0 /* Node */) {
console.log('node');
}
else {
console.log('?');
}
compiled with tsc --project tsconfig.node.json
"use strict";
exports.__esModule = true;
if (0 /* Current */ === 1 /* Browser */) {
console.log('browser');
}
else if (0 /* Current */ === 0 /* Node */) {
console.log('node');
}
else {
console.log('?');
}
| stackoverflow | {
"language": "en",
"length": 399,
"provenance": "stackexchange_0000F.jsonl.gz:883229",
"question_score": "11",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44599160"
} |
a130f18d07c7895b68bc0c7106b6d9f6a622c2a8 | Stackoverflow Stackexchange
Q: ruby 2.4.1 ERROR: Failed to build gem native extension I upgraded ruby from 2.1.. to 2.4.1 on windows, and installed the msys2 toolkit.
however each time install a gem calling Makefile I am getting a ERROR: Failed to build gem native extension
c:\>gem install oj
Temporarily enhancing PATH for MSYS/MINGW...
Building native extensions. This could take a while...
ERROR: Error installing oj
ERROR: Failed to build gem native extension.
current directory: C:/tools/ruby24/lib/ruby/gems/2.4.0/gems/oj-3.1.3/ext/oj
C:/tools/ruby24/bin/ruby.exe -r ./siteconf20170616-1724-1fw22px.rb extconf.rb
Creating Makefile for ruby version 2.4.1 on x64-mingw32
creating Makefile
extconf.rb:68:in ``': No such file or directory - make clean
(Errno::ENOENT)
from extconf.rb:68:in `<main>'
extconf failed, exit code 1
I am also getting similar errors when installing some other gems like json
| Q: ruby 2.4.1 ERROR: Failed to build gem native extension I upgraded ruby from 2.1.. to 2.4.1 on windows, and installed the msys2 toolkit.
however each time install a gem calling Makefile I am getting a ERROR: Failed to build gem native extension
c:\>gem install oj
Temporarily enhancing PATH for MSYS/MINGW...
Building native extensions. This could take a while...
ERROR: Error installing oj
ERROR: Failed to build gem native extension.
current directory: C:/tools/ruby24/lib/ruby/gems/2.4.0/gems/oj-3.1.3/ext/oj
C:/tools/ruby24/bin/ruby.exe -r ./siteconf20170616-1724-1fw22px.rb extconf.rb
Creating Makefile for ruby version 2.4.1 on x64-mingw32
creating Makefile
extconf.rb:68:in ``': No such file or directory - make clean
(Errno::ENOENT)
from extconf.rb:68:in `<main>'
extconf failed, exit code 1
I am also getting similar errors when installing some other gems like json
| stackoverflow | {
"language": "en",
"length": 119,
"provenance": "stackexchange_0000F.jsonl.gz:883304",
"question_score": "4",
"source": "stackexchange",
"timestamp": "2023-03-29T00:00:00",
"url": "https://stackoverflow.com/questions/44599394"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.