query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
sequencelengths
19
20
metadata
dict
itkTernaryAddImageFilterIF3IF3IF3IF3_Superclass_cast(itkLightObject obj) > itkTernaryAddImageFilterIF3IF3IF3IF3_Superclass
def itkTernaryAddImageFilterIF3IF3IF3IF3_Superclass_cast(*args): return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIF3IF3IF3IF3_Superclass_cast(*args)
[ "def itkNotImageFilterIF3IF3_Superclass_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIF3IF3_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIF3IF3_Superclass_cast(obj)", "def itkTernaryAddImageFilterID3ID3ID3ID3_Superclass_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterID3ID3ID3ID3_Superclass_cast(*args)", "def itkTernaryAddImageFilterIUL3IUL3IUL3IUL3_Superclass_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUL3IUL3IUL3IUL3_Superclass_cast(*args)", "def itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_Superclass_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_Superclass_cast(*args)", "def itkNotImageFilterISS3ISS3_Superclass_cast(obj: 'itkLightObject') -> \"itkNotImageFilterISS3ISS3_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterISS3ISS3_Superclass_cast(obj)", "def itkSLICImageFilterIF3IULL3_Superclass_cast(obj: 'itkLightObject') -> \"itkSLICImageFilterIF3IULL3_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIF3IULL3_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkNotImageFilterIF3IF3_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIF3IF3_Superclass_cast(obj)", "def itkNotImageFilterIUC3IUC3_Superclass_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUC3IUC3_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUC3IUC3_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkSLICImageFilterIF3IULL3_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIF3IULL3_Superclass_cast(obj)", "def itkSLICImageFilterVIF3IULL3_Superclass_cast(obj: 'itkLightObject') -> \"itkSLICImageFilterVIF3IULL3_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIF3IULL3_Superclass_cast(obj)", "def itkSLICImageFilterVIUS3IULL3_Superclass_cast(obj: 'itkLightObject') -> \"itkSLICImageFilterVIUS3IULL3_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIUS3IULL3_Superclass_cast(obj)", "def itkSLICImageFilterIF3IUS3_Superclass_cast(obj: 'itkLightObject') -> \"itkSLICImageFilterIF3IUS3_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIF3IUS3_Superclass_cast(obj)", "def itkProjectedLandweberDeconvolutionImageFilterIF3IF3_Superclass_cast(obj: 'itkLightObject') -> \"itkProjectedLandweberDeconvolutionImageFilterIF3IF3_Superclass *\":\n return _itkProjectedLandweberDeconvolutionImageFilterPython.itkProjectedLandweberDeconvolutionImageFilterIF3IF3_Superclass_cast(obj)", "def itkSLICImageFilterVISS3IULL3_Superclass_cast(obj: 'itkLightObject') -> \"itkSLICImageFilterVISS3IULL3_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVISS3IULL3_Superclass_cast(obj)", "def itkNotImageFilterIUS3IUS3_Superclass_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUS3IUS3_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUS3IUS3_Superclass_cast(obj)", "def itkSLICImageFilterVIUC3IULL3_Superclass_cast(obj: 'itkLightObject') -> \"itkSLICImageFilterVIUC3IULL3_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIUC3IULL3_Superclass_cast(obj)", "def itkSLICImageFilterVIF3IUS3_Superclass_cast(obj: 'itkLightObject') -> \"itkSLICImageFilterVIF3IUS3_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIF3IUS3_Superclass_cast(obj)", "def itkSLICImageFilterVIUS3IUS3_Superclass_cast(obj: 'itkLightObject') -> \"itkSLICImageFilterVIUS3IUS3_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIUS3IUS3_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkSLICImageFilterVIUS3IULL3_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIUS3IULL3_Superclass_cast(obj)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
New() > itkTernaryAddImageFilterIUC2IUC2IUC2IUC2_Superclass Create a new object of the class itkTernaryAddImageFilterIUC2IUC2IUC2IUC2_Superclass and set the input and the parameters if some named or nonnamed arguments are passed to that method. New() tries to assign all the non named parameters to the input of the new objects the first non named parameter in the first input, etc. The named parameters are used by calling the method with the same name prefixed by 'Set'.
def New(*args, **kargs): obj = itkTernaryAddImageFilterIUC2IUC2IUC2IUC2_Superclass.__New_orig__() import itkTemplate itkTemplate.New(obj, *args, **kargs) return obj
[ "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUL2IUL2IUL2IUL2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterID2ID2ID2ID2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIF2IF2IF2IF2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUL3IUL3IUL3IUL3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUS2IUS2IUS2IUS2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIUC2IUC2IUC2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterID3ID3ID3ID3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkCosImageFilterIF2IF2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIUL2IUL2IUL2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIUC3IUC3IUC3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkScalarChanAndVeseSparseLevelSetImageFilterIF2IF2IF2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkNotImageFilterIUC2IUC2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUS3IUS3IUS3IUS3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkBoundedReciprocalImageFilterIUC2IUC2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSubtractImageFilterIUC2IUC2IUC2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUC2IUC2IUC2IUC2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIF3IF3IF3IF3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIF2IF2IF2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
itkTernaryAddImageFilterIUC2IUC2IUC2IUC2_Superclass_cast(itkLightObject obj) > itkTernaryAddImageFilterIUC2IUC2IUC2IUC2_Superclass
def itkTernaryAddImageFilterIUC2IUC2IUC2IUC2_Superclass_cast(*args): return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUC2IUC2IUC2IUC2_Superclass_cast(*args)
[ "def itkNotImageFilterIUC2IUC2_Superclass_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUC2IUC2_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUC2IUC2_Superclass_cast(obj)", "def itkNotImageFilterIF2IF2_Superclass_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIF2IF2_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIF2IF2_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUC2IUC2_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUC2IUC2_Superclass_cast(obj)", "def itkTernaryAddImageFilterIUL2IUL2IUL2IUL2_Superclass_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUL2IUL2IUL2IUL2_Superclass_cast(*args)", "def itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_Superclass_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_Superclass_cast(*args)", "def itkNotImageFilterIUS2IUS2_Superclass_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUS2IUS2_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUS2IUS2_Superclass_cast(obj)", "def itkSLICImageFilterVIUC2IULL2_Superclass_cast(obj: 'itkLightObject') -> \"itkSLICImageFilterVIUC2IULL2_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIUC2IULL2_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkSLICImageFilterVIUC2IULL2_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIUC2IULL2_Superclass_cast(obj)", "def itkProjectedLandweberDeconvolutionImageFilterIUC2IUC2_Superclass_cast(obj: 'itkLightObject') -> \"itkProjectedLandweberDeconvolutionImageFilterIUC2IUC2_Superclass *\":\n return _itkProjectedLandweberDeconvolutionImageFilterPython.itkProjectedLandweberDeconvolutionImageFilterIUC2IUC2_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkNotImageFilterIF2IF2_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIF2IF2_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkSLICImageFilterIUC2IULL2_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIUC2IULL2_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkSLICImageFilterVIUS2IULL2_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIUS2IULL2_Superclass_cast(obj)", "def itkSLICImageFilterVIUS2IULL2_Superclass_cast(obj: 'itkLightObject') -> \"itkSLICImageFilterVIUS2IULL2_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIUS2IULL2_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUC3IUC3_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUC3IUC3_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkProjectedLandweberDeconvolutionImageFilterIUC2IUC2_Superclass *\":\n return _itkProjectedLandweberDeconvolutionImageFilterPython.itkProjectedLandweberDeconvolutionImageFilterIUC2IUC2_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkSLICImageFilterIF2IULL2_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIF2IULL2_Superclass_cast(obj)", "def itkSLICImageFilterIF2IULL2_Superclass_cast(obj: 'itkLightObject') -> \"itkSLICImageFilterIF2IULL2_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIF2IULL2_Superclass_cast(obj)", "def itkSLICImageFilterIUC2IULL2_Superclass_cast(obj: 'itkLightObject') -> \"itkSLICImageFilterIUC2IULL2_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIUC2IULL2_Superclass_cast(obj)", "def itkNotImageFilterISS2ISS2_Superclass_cast(obj: 'itkLightObject') -> \"itkNotImageFilterISS2ISS2_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterISS2ISS2_Superclass_cast(obj)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
New() > itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_Superclass Create a new object of the class itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_Superclass and set the input and the parameters if some named or nonnamed arguments are passed to that method. New() tries to assign all the non named parameters to the input of the new objects the first non named parameter in the first input, etc. The named parameters are used by calling the method with the same name prefixed by 'Set'.
def New(*args, **kargs): obj = itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_Superclass.__New_orig__() import itkTemplate itkTemplate.New(obj, *args, **kargs) return obj
[ "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterID3ID3ID3ID3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUL3IUL3IUL3IUL3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIF3IF3IF3IF3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUC2IUC2IUC2IUC2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUS3IUS3IUS3IUS3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUL2IUL2IUL2IUL2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIUC3IUC3IUC3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIF3IF3IF3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIUL3IUL3IUL3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkCosImageFilterIF3IF3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterID3ID3ID3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkScalarChanAndVeseSparseLevelSetImageFilterIF3IF3IF3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkScalarChanAndVeseSparseLevelSetImageFilterID3ID3ID3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUS2IUS2IUS2IUS2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterID2ID2ID2ID2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkNotImageFilterIUC3IUC3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSLICImageFilterIF3IULL3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkCosImageFilterID3ID3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkNotImageFilterISS3ISS3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_Superclass_cast(itkLightObject obj) > itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_Superclass
def itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_Superclass_cast(*args): return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_Superclass_cast(*args)
[ "def itkNotImageFilterIUC3IUC3_Superclass_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUC3IUC3_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUC3IUC3_Superclass_cast(obj)", "def itkTernaryAddImageFilterIUL3IUL3IUL3IUL3_Superclass_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUL3IUL3IUL3IUL3_Superclass_cast(*args)", "def itkNotImageFilterIF3IF3_Superclass_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIF3IF3_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIF3IF3_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUC3IUC3_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUC3IUC3_Superclass_cast(obj)", "def itkNotImageFilterISS3ISS3_Superclass_cast(obj: 'itkLightObject') -> \"itkNotImageFilterISS3ISS3_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterISS3ISS3_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkSLICImageFilterVIUC3IULL3_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIUC3IULL3_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkNotImageFilterIF3IF3_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIF3IF3_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkSLICImageFilterVIUS3IULL3_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIUS3IULL3_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkSLICImageFilterIUC3IULL3_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIUC3IULL3_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkSLICImageFilterIF3IULL3_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIF3IULL3_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkNotImageFilterISS3ISS3_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterISS3ISS3_Superclass_cast(obj)", "def itkNotImageFilterIUC2IUC2_Superclass_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUC2IUC2_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUC2IUC2_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkSLICImageFilterVIUC3IUS3_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIUC3IUS3_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUS3IUS3_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUS3IUS3_Superclass_cast(obj)", "def itkNotImageFilterIUS3IUS3_Superclass_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUS3IUS3_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUS3IUS3_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUC2IUC2_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUC2IUC2_Superclass_cast(obj)", "def itkSLICImageFilterVIUS3IULL3_Superclass_cast(obj: 'itkLightObject') -> \"itkSLICImageFilterVIUS3IULL3_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIUS3IULL3_Superclass_cast(obj)", "def itkTernaryAddImageFilterID3ID3ID3ID3_Superclass_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterID3ID3ID3ID3_Superclass_cast(*args)", "def itkNotImageFilterIF2IF2_Superclass_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIF2IF2_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIF2IF2_Superclass_cast(obj)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
New() > itkTernaryAddImageFilterIUL2IUL2IUL2IUL2_Superclass Create a new object of the class itkTernaryAddImageFilterIUL2IUL2IUL2IUL2_Superclass and set the input and the parameters if some named or nonnamed arguments are passed to that method. New() tries to assign all the non named parameters to the input of the new objects the first non named parameter in the first input, etc. The named parameters are used by calling the method with the same name prefixed by 'Set'.
def New(*args, **kargs): obj = itkTernaryAddImageFilterIUL2IUL2IUL2IUL2_Superclass.__New_orig__() import itkTemplate itkTemplate.New(obj, *args, **kargs) return obj
[ "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUL3IUL3IUL3IUL3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUC2IUC2IUC2IUC2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterID2ID2ID2ID2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIF2IF2IF2IF2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUS2IUS2IUS2IUS2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterID3ID3ID3ID3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUS3IUS3IUS3IUS3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIF3IF3IF3IF3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIUL2IUL2IUL2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkCosImageFilterIF2IF2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIUL3IUL3IUL3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIF2IF2IF2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkRegistrationParameterScalesFromPhysicalShiftEBPSTPSMPSF3_Superclass_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIUS2IUS2IUS2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIUC2IUC2IUC2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkRegistrationParameterScalesFromPhysicalShiftEDPSTPSMPSF3_Superclass_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkNotImageFilterISS2ISS2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkScalarChanAndVeseSparseLevelSetImageFilterIF2IF2IF2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
itkTernaryAddImageFilterIUL2IUL2IUL2IUL2_Superclass_cast(itkLightObject obj) > itkTernaryAddImageFilterIUL2IUL2IUL2IUL2_Superclass
def itkTernaryAddImageFilterIUL2IUL2IUL2IUL2_Superclass_cast(*args): return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUL2IUL2IUL2IUL2_Superclass_cast(*args)
[ "def itkNotImageFilterIF2IF2_Superclass_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIF2IF2_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIF2IF2_Superclass_cast(obj)", "def itkNotImageFilterIUS2IUS2_Superclass_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUS2IUS2_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUS2IUS2_Superclass_cast(obj)", "def itkNotImageFilterIUC2IUC2_Superclass_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUC2IUC2_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUC2IUC2_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkNotImageFilterIF2IF2_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIF2IF2_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkSLICImageFilterVIUS2IULL2_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIUS2IULL2_Superclass_cast(obj)", "def itkTernaryAddImageFilterIUL3IUL3IUL3IUL3_Superclass_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUL3IUL3IUL3IUL3_Superclass_cast(*args)", "def cast(obj: 'itkLightObject') -> \"itkSLICImageFilterIF2IULL2_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIF2IULL2_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUC2IUC2_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUC2IUC2_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkSLICImageFilterVIUC2IULL2_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIUC2IULL2_Superclass_cast(obj)", "def itkNotImageFilterISS2ISS2_Superclass_cast(obj: 'itkLightObject') -> \"itkNotImageFilterISS2ISS2_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterISS2ISS2_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUS2IUS2_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUS2IUS2_Superclass_cast(obj)", "def itkSLICImageFilterVIUS2IULL2_Superclass_cast(obj: 'itkLightObject') -> \"itkSLICImageFilterVIUS2IULL2_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIUS2IULL2_Superclass_cast(obj)", "def itkSLICImageFilterIF2IULL2_Superclass_cast(obj: 'itkLightObject') -> \"itkSLICImageFilterIF2IULL2_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIF2IULL2_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUC3IUC3_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUC3IUC3_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkNotImageFilterISS2ISS2_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterISS2ISS2_Superclass_cast(obj)", "def itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_Superclass_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_Superclass_cast(*args)", "def itkSLICImageFilterVIUC2IULL2_Superclass_cast(obj: 'itkLightObject') -> \"itkSLICImageFilterVIUC2IULL2_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIUC2IULL2_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkSLICImageFilterIUC2IULL2_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIUC2IULL2_Superclass_cast(obj)", "def itkTernaryAddImageFilterIUC2IUC2IUC2IUC2_Superclass_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUC2IUC2IUC2IUC2_Superclass_cast(*args)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
New() > itkTernaryAddImageFilterIUL3IUL3IUL3IUL3_Superclass Create a new object of the class itkTernaryAddImageFilterIUL3IUL3IUL3IUL3_Superclass and set the input and the parameters if some named or nonnamed arguments are passed to that method. New() tries to assign all the non named parameters to the input of the new objects the first non named parameter in the first input, etc. The named parameters are used by calling the method with the same name prefixed by 'Set'.
def New(*args, **kargs): obj = itkTernaryAddImageFilterIUL3IUL3IUL3IUL3_Superclass.__New_orig__() import itkTemplate itkTemplate.New(obj, *args, **kargs) return obj
[ "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterID3ID3ID3ID3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIF3IF3IF3IF3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUL2IUL2IUL2IUL2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUS3IUS3IUS3IUS3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUC2IUC2IUC2IUC2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIUL3IUL3IUL3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIF3IF3IF3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterID3ID3ID3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIUC3IUC3IUC3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterID2ID2ID2ID2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUS2IUS2IUS2IUS2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSLICImageFilterIF3IULL3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkCosImageFilterIF3IF3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkScalarChanAndVeseSparseLevelSetImageFilterIF3IF3IF3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkScalarChanAndVeseSparseLevelSetImageFilterID3ID3ID3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkNotImageFilterISS3ISS3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSLICImageFilterVIUS3IULL3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIF2IF2IF2IF2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
itkTernaryAddImageFilterIUL3IUL3IUL3IUL3_Superclass_cast(itkLightObject obj) > itkTernaryAddImageFilterIUL3IUL3IUL3IUL3_Superclass
def itkTernaryAddImageFilterIUL3IUL3IUL3IUL3_Superclass_cast(*args): return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUL3IUL3IUL3IUL3_Superclass_cast(*args)
[ "def itkNotImageFilterIF3IF3_Superclass_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIF3IF3_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIF3IF3_Superclass_cast(obj)", "def itkNotImageFilterISS3ISS3_Superclass_cast(obj: 'itkLightObject') -> \"itkNotImageFilterISS3ISS3_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterISS3ISS3_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUC3IUC3_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUC3IUC3_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkSLICImageFilterVIUS3IULL3_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIUS3IULL3_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkNotImageFilterIF3IF3_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIF3IF3_Superclass_cast(obj)", "def itkNotImageFilterIUC3IUC3_Superclass_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUC3IUC3_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUC3IUC3_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkSLICImageFilterIF3IULL3_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIF3IULL3_Superclass_cast(obj)", "def itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_Superclass_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_Superclass_cast(*args)", "def cast(obj: 'itkLightObject') -> \"itkNotImageFilterISS3ISS3_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterISS3ISS3_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkSLICImageFilterVIUC3IULL3_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIUC3IULL3_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkSLICImageFilterIUC3IULL3_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIUC3IULL3_Superclass_cast(obj)", "def itkSLICImageFilterVIUS3IULL3_Superclass_cast(obj: 'itkLightObject') -> \"itkSLICImageFilterVIUS3IULL3_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIUS3IULL3_Superclass_cast(obj)", "def itkSLICImageFilterIF3IULL3_Superclass_cast(obj: 'itkLightObject') -> \"itkSLICImageFilterIF3IULL3_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIF3IULL3_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUS3IUS3_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUS3IUS3_Superclass_cast(obj)", "def itkNotImageFilterIUS3IUS3_Superclass_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUS3IUS3_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUS3IUS3_Superclass_cast(obj)", "def itkNotImageFilterIF2IF2_Superclass_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIF2IF2_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIF2IF2_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkSLICImageFilterISS3IULL3_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterISS3IULL3_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkSLICImageFilterIF3IUS3_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIF3IUS3_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkSLICImageFilterVIUC3IUS3_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIUC3IUS3_Superclass_cast(obj)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
New() > itkTernaryAddImageFilterIUS2IUS2IUS2IUS2_Superclass Create a new object of the class itkTernaryAddImageFilterIUS2IUS2IUS2IUS2_Superclass and set the input and the parameters if some named or nonnamed arguments are passed to that method. New() tries to assign all the non named parameters to the input of the new objects the first non named parameter in the first input, etc. The named parameters are used by calling the method with the same name prefixed by 'Set'.
def New(*args, **kargs): obj = itkTernaryAddImageFilterIUS2IUS2IUS2IUS2_Superclass.__New_orig__() import itkTemplate itkTemplate.New(obj, *args, **kargs) return obj
[ "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterID2ID2ID2ID2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIUS2IUS2IUS2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUS3IUS3IUS3IUS3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUL2IUL2IUL2IUL2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUC2IUC2IUC2IUC2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSLICImageFilterIF2IUS2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSLICImageFilterIUS2IUS2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIF2IF2IF2IF2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkNotImageFilterIUS2IUS2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSLICImageFilterISS2IUS2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUL3IUL3IUL3IUL3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSLICImageFilterVIUS2IUS2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSLICImageFilterIUC2IUS2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSubtractImageFilterIUS2IUS2IUS2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSLICImageFilterVISS2IUS2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterID3ID3ID3ID3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkBoundedReciprocalImageFilterIUS2IUS2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSLICImageFilterVIUC2IUS2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
itkTernaryAddImageFilterIUS2IUS2IUS2IUS2_Superclass_cast(itkLightObject obj) > itkTernaryAddImageFilterIUS2IUS2IUS2IUS2_Superclass
def itkTernaryAddImageFilterIUS2IUS2IUS2IUS2_Superclass_cast(*args): return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUS2IUS2IUS2IUS2_Superclass_cast(*args)
[ "def itkNotImageFilterIUS2IUS2_Superclass_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUS2IUS2_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUS2IUS2_Superclass_cast(obj)", "def itkSLICImageFilterIF2IUS2_Superclass_cast(obj: 'itkLightObject') -> \"itkSLICImageFilterIF2IUS2_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIF2IUS2_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUS2IUS2_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUS2IUS2_Superclass_cast(obj)", "def itkSLICImageFilterVIUC2IUS2_Superclass_cast(obj: 'itkLightObject') -> \"itkSLICImageFilterVIUC2IUS2_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIUC2IUS2_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkSLICImageFilterIF2IUS2_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIF2IUS2_Superclass_cast(obj)", "def itkSLICImageFilterVIUS2IUS2_Superclass_cast(obj: 'itkLightObject') -> \"itkSLICImageFilterVIUS2IUS2_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIUS2IUS2_Superclass_cast(obj)", "def itkProjectedLandweberDeconvolutionImageFilterIUS2IUS2_Superclass_cast(obj: 'itkLightObject') -> \"itkProjectedLandweberDeconvolutionImageFilterIUS2IUS2_Superclass *\":\n return _itkProjectedLandweberDeconvolutionImageFilterPython.itkProjectedLandweberDeconvolutionImageFilterIUS2IUS2_Superclass_cast(obj)", "def itkSLICImageFilterIUS2IUS2_Superclass_cast(obj: 'itkLightObject') -> \"itkSLICImageFilterIUS2IUS2_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIUS2IUS2_Superclass_cast(obj)", "def itkSLICImageFilterVIF2IUS2_Superclass_cast(obj: 'itkLightObject') -> \"itkSLICImageFilterVIF2IUS2_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIF2IUS2_Superclass_cast(obj)", "def itkNotImageFilterIF2IF2_Superclass_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIF2IF2_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIF2IF2_Superclass_cast(obj)", "def itkNotImageFilterIUS3IUS3_Superclass_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUS3IUS3_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUS3IUS3_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkSLICImageFilterIUS2IULL2_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIUS2IULL2_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUS3IUS3_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUS3IUS3_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkSLICImageFilterVIUC2IUS2_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIUC2IUS2_Superclass_cast(obj)", "def itkTernaryAddImageFilterIUL2IUL2IUL2IUL2_Superclass_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUL2IUL2IUL2IUL2_Superclass_cast(*args)", "def itkSLICImageFilterVISS2IUS2_Superclass_cast(obj: 'itkLightObject') -> \"itkSLICImageFilterVISS2IUS2_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVISS2IUS2_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkSLICImageFilterVIUS2IUS2_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIUS2IUS2_Superclass_cast(obj)", "def itkNotImageFilterIUC2IUC2_Superclass_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUC2IUC2_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUC2IUC2_Superclass_cast(obj)", "def itkSLICImageFilterIUC2IUS2_Superclass_cast(obj: 'itkLightObject') -> \"itkSLICImageFilterIUC2IUS2_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIUC2IUS2_Superclass_cast(obj)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
New() > itkTernaryAddImageFilterIUS3IUS3IUS3IUS3_Superclass Create a new object of the class itkTernaryAddImageFilterIUS3IUS3IUS3IUS3_Superclass and set the input and the parameters if some named or nonnamed arguments are passed to that method. New() tries to assign all the non named parameters to the input of the new objects the first non named parameter in the first input, etc. The named parameters are used by calling the method with the same name prefixed by 'Set'.
def New(*args, **kargs): obj = itkTernaryAddImageFilterIUS3IUS3IUS3IUS3_Superclass.__New_orig__() import itkTemplate itkTemplate.New(obj, *args, **kargs) return obj
[ "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterID3ID3ID3ID3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUS2IUS2IUS2IUS2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIUS3IUS3IUS3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUL3IUL3IUL3IUL3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSLICImageFilterIF3IUS3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIF3IF3IF3IF3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSLICImageFilterISS3IUS3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSLICImageFilterIUS3IUS3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUL2IUL2IUL2IUL2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkNotImageFilterIUS3IUS3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSLICImageFilterVIUS3IUS3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSLICImageFilterIUC3IUS3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSLICImageFilterVISS3IUS3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUC2IUC2IUC2IUC2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSubtractImageFilterIUS3IUS3IUS3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSLICImageFilterVIF3IUS3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSLICImageFilterVIUC3IUS3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkBoundedReciprocalImageFilterIUS3IUS3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
itkTernaryAddImageFilterIUS3IUS3IUS3IUS3_Superclass_cast(itkLightObject obj) > itkTernaryAddImageFilterIUS3IUS3IUS3IUS3_Superclass
def itkTernaryAddImageFilterIUS3IUS3IUS3IUS3_Superclass_cast(*args): return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUS3IUS3IUS3IUS3_Superclass_cast(*args)
[ "def itkNotImageFilterIUS3IUS3_Superclass_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUS3IUS3_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUS3IUS3_Superclass_cast(obj)", "def itkNotImageFilterIUS2IUS2_Superclass_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUS2IUS2_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUS2IUS2_Superclass_cast(obj)", "def itkSLICImageFilterIF3IUS3_Superclass_cast(obj: 'itkLightObject') -> \"itkSLICImageFilterIF3IUS3_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIF3IUS3_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUS3IUS3_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUS3IUS3_Superclass_cast(obj)", "def itkSLICImageFilterVIUS3IUS3_Superclass_cast(obj: 'itkLightObject') -> \"itkSLICImageFilterVIUS3IUS3_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIUS3IUS3_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkSLICImageFilterIF3IUS3_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIF3IUS3_Superclass_cast(obj)", "def itkSLICImageFilterVISS3IUS3_Superclass_cast(obj: 'itkLightObject') -> \"itkSLICImageFilterVISS3IUS3_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVISS3IUS3_Superclass_cast(obj)", "def itkTernaryAddImageFilterIUL3IUL3IUL3IUL3_Superclass_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUL3IUL3IUL3IUL3_Superclass_cast(*args)", "def itkNotImageFilterIF3IF3_Superclass_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIF3IF3_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIF3IF3_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUS2IUS2_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUS2IUS2_Superclass_cast(obj)", "def itkNotImageFilterISS3ISS3_Superclass_cast(obj: 'itkLightObject') -> \"itkNotImageFilterISS3ISS3_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterISS3ISS3_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkSLICImageFilterVIUS3IUS3_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIUS3IUS3_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkSLICImageFilterVIUC3IUS3_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIUC3IUS3_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkSLICImageFilterIUS3IULL3_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIUS3IULL3_Superclass_cast(obj)", "def itkSLICImageFilterVIF3IUS3_Superclass_cast(obj: 'itkLightObject') -> \"itkSLICImageFilterVIF3IUS3_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIF3IUS3_Superclass_cast(obj)", "def itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_Superclass_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_Superclass_cast(*args)", "def itkSLICImageFilterVIUC3IUS3_Superclass_cast(obj: 'itkLightObject') -> \"itkSLICImageFilterVIUC3IUS3_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIUC3IUS3_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkSLICImageFilterIUS3IUS3_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIUS3IUS3_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkSLICImageFilterISS3IUS3_Superclass *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterISS3IUS3_Superclass_cast(obj)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
New() > itkTernaryAddImageFilterID2ID2ID2ID2 Create a new object of the class itkTernaryAddImageFilterID2ID2ID2ID2 and set the input and the parameters if some named or nonnamed arguments are passed to that method. New() tries to assign all the non named parameters to the input of the new objects the first non named parameter in the first input, etc. The named parameters are used by calling the method with the same name prefixed by 'Set'.
def New(*args, **kargs): obj = itkTernaryAddImageFilterID2ID2ID2ID2.__New_orig__() import itkTemplate itkTemplate.New(obj, *args, **kargs) return obj
[ "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterID2ID2ID2ID2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIF2IF2IF2IF2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIF2IF2IF2IF2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUL2IUL2IUL2IUL2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkBilateralImageFilterID2ID2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterID2ID2ID2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIF2IF2IF2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterID2ID2ID2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIF2IF2IF2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkHConcaveImageFilterID2ID2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkFiniteDifferenceImageFilterID2ID2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUL2IUL2IUL2IUL2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSLICImageFilterIF2IULL2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSLICImageFilterIF2IULL2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUS2IUS2IUS2IUS2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUC2IUC2IUC2IUC2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkConstrainedValueAdditionImageFilterIF2IF2IF2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUS2IUS2IUS2IUS2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkBilateralImageFilterIF2IF2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
itkTernaryAddImageFilterID2ID2ID2ID2_cast(itkLightObject obj) > itkTernaryAddImageFilterID2ID2ID2ID2
def itkTernaryAddImageFilterID2ID2ID2ID2_cast(*args): return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterID2ID2ID2ID2_cast(*args)
[ "def itkConstrainedValueAdditionImageFilterIF2IF2IF2_cast(obj: 'itkLightObject') -> \"itkConstrainedValueAdditionImageFilterIF2IF2IF2 *\":\n return _itkConstrainedValueAdditionImageFilterPython.itkConstrainedValueAdditionImageFilterIF2IF2IF2_cast(obj)", "def itkAbsoluteValueDifferenceImageFilterIF2IF2IF2_cast(obj: 'itkLightObject') -> \"itkAbsoluteValueDifferenceImageFilterIF2IF2IF2 *\":\n return _itkAbsoluteValueDifferenceImageFilterPython.itkAbsoluteValueDifferenceImageFilterIF2IF2IF2_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkConstrainedValueAdditionImageFilterIF2IF2IF2 *\":\n return _itkConstrainedValueAdditionImageFilterPython.itkConstrainedValueAdditionImageFilterIF2IF2IF2_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkAbsoluteValueDifferenceImageFilterIF2IF2IF2 *\":\n return _itkAbsoluteValueDifferenceImageFilterPython.itkAbsoluteValueDifferenceImageFilterIF2IF2IF2_cast(obj)", "def itkNotImageFilterIF2IF2_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIF2IF2 *\":\n return _itkNotImageFilterPython.itkNotImageFilterIF2IF2_cast(obj)", "def itkTernaryAddImageFilterIF2IF2IF2IF2_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIF2IF2IF2IF2_cast(*args)", "def cast(obj: 'itkLightObject') -> \"itkNotImageFilterIF2IF2 *\":\n return _itkNotImageFilterPython.itkNotImageFilterIF2IF2_cast(obj)", "def itkExpImageFilterIF2IF2_cast(obj: 'itkLightObject') -> \"itkExpImageFilterIF2IF2 *\":\n return _itkExpImageFilterPython.itkExpImageFilterIF2IF2_cast(obj)", "def itkSLICImageFilterIF2IULL2_cast(obj: 'itkLightObject') -> \"itkSLICImageFilterIF2IULL2 *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIF2IULL2_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkSLICImageFilterIF2IULL2 *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIF2IULL2_cast(obj)", "def itkTernaryAddImageFilterIUL2IUL2IUL2IUL2_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUL2IUL2IUL2IUL2_cast(*args)", "def cast(obj: 'itkLightObject') -> \"itkExpImageFilterIF2IF2 *\":\n return _itkExpImageFilterPython.itkExpImageFilterIF2IF2_cast(obj)", "def itkNoiseBaseImageFilterIF2IF2_cast(obj: 'itkLightObject') -> \"itkNoiseBaseImageFilterIF2IF2 *\":\n return _itkNoiseBaseImageFilterPython.itkNoiseBaseImageFilterIF2IF2_cast(obj)", "def itkSquaredDifferenceImageFilterIF2IF2IF2_cast(obj: 'itkLightObject') -> \"itkSquaredDifferenceImageFilterIF2IF2IF2 *\":\n return _itkSquaredDifferenceImageFilterPython.itkSquaredDifferenceImageFilterIF2IF2IF2_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIF2IUS2 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIF2IUS2_cast(obj)", "def itkHuangThresholdImageFilterIF2IUS2_cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIF2IUS2 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIF2IUS2_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkSquaredDifferenceImageFilterIF2IF2IF2 *\":\n return _itkSquaredDifferenceImageFilterPython.itkSquaredDifferenceImageFilterIF2IF2IF2_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkAbsoluteValueDifferenceImageFilterIUC2IUC2IUC2 *\":\n return _itkAbsoluteValueDifferenceImageFilterPython.itkAbsoluteValueDifferenceImageFilterIUC2IUC2IUC2_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIF2ISS2 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIF2ISS2_cast(obj)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
New() > itkTernaryAddImageFilterID3ID3ID3ID3 Create a new object of the class itkTernaryAddImageFilterID3ID3ID3ID3 and set the input and the parameters if some named or nonnamed arguments are passed to that method. New() tries to assign all the non named parameters to the input of the new objects the first non named parameter in the first input, etc. The named parameters are used by calling the method with the same name prefixed by 'Set'.
def New(*args, **kargs): obj = itkTernaryAddImageFilterID3ID3ID3ID3.__New_orig__() import itkTemplate itkTemplate.New(obj, *args, **kargs) return obj
[ "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIF3IF3IF3IF3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIF3IF3IF3IF3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterID3ID3ID3ID3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUL3IUL3IUL3IUL3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIF3IF3IF3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIF3IF3IF3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkConstrainedValueAdditionImageFilterIF3IF3IF3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterID3ID3ID3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUS3IUS3IUS3IUS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUL3IUL3IUL3IUL3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUC3IUC3IUC3IUC3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkScalarChanAndVeseSparseLevelSetImageFilterID3ID3ID3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUS3IUS3IUS3IUS3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkBilateralImageFilterID3ID3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkScalarChanAndVeseSparseLevelSetImageFilterID3ID3ID3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkFiniteDifferenceImageFilterID3ID3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterID3ID3ID3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkBilateralImageFilterIF3IF3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkHConcaveImageFilterID3ID3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
itkTernaryAddImageFilterID3ID3ID3ID3_cast(itkLightObject obj) > itkTernaryAddImageFilterID3ID3ID3ID3
def itkTernaryAddImageFilterID3ID3ID3ID3_cast(*args): return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterID3ID3ID3ID3_cast(*args)
[ "def itkTernaryAddImageFilterIF3IF3IF3IF3_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIF3IF3IF3IF3_cast(*args)", "def itkNotImageFilterIF3IF3_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIF3IF3 *\":\n return _itkNotImageFilterPython.itkNotImageFilterIF3IF3_cast(obj)", "def itkConstrainedValueAdditionImageFilterIF3IF3IF3_cast(obj: 'itkLightObject') -> \"itkConstrainedValueAdditionImageFilterIF3IF3IF3 *\":\n return _itkConstrainedValueAdditionImageFilterPython.itkConstrainedValueAdditionImageFilterIF3IF3IF3_cast(obj)", "def itkExpImageFilterIF3IF3_cast(obj: 'itkLightObject') -> \"itkExpImageFilterIF3IF3 *\":\n return _itkExpImageFilterPython.itkExpImageFilterIF3IF3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkConstrainedValueAdditionImageFilterIF3IF3IF3 *\":\n return _itkConstrainedValueAdditionImageFilterPython.itkConstrainedValueAdditionImageFilterIF3IF3IF3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkAbsoluteValueDifferenceImageFilterIF3IF3IF3 *\":\n return _itkAbsoluteValueDifferenceImageFilterPython.itkAbsoluteValueDifferenceImageFilterIF3IF3IF3_cast(obj)", "def itkAbsoluteValueDifferenceImageFilterIF3IF3IF3_cast(obj: 'itkLightObject') -> \"itkAbsoluteValueDifferenceImageFilterIF3IF3IF3 *\":\n return _itkAbsoluteValueDifferenceImageFilterPython.itkAbsoluteValueDifferenceImageFilterIF3IF3IF3_cast(obj)", "def itkTernaryAddImageFilterIUL3IUL3IUL3IUL3_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUL3IUL3IUL3IUL3_cast(*args)", "def itkNoiseBaseImageFilterIF3IF3_cast(obj: 'itkLightObject') -> \"itkNoiseBaseImageFilterIF3IF3 *\":\n return _itkNoiseBaseImageFilterPython.itkNoiseBaseImageFilterIF3IF3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkExpImageFilterIF3IF3 *\":\n return _itkExpImageFilterPython.itkExpImageFilterIF3IF3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkNotImageFilterIF3IF3 *\":\n return _itkNotImageFilterPython.itkNotImageFilterIF3IF3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkSLICImageFilterIF3IULL3 *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIF3IULL3_cast(obj)", "def itkSLICImageFilterIF3IULL3_cast(obj: 'itkLightObject') -> \"itkSLICImageFilterIF3IULL3 *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIF3IULL3_cast(obj)", "def itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_cast(*args)", "def cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIF3IUS3 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIF3IUS3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIF3ISS3 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIF3ISS3_cast(obj)", "def itkHuangThresholdImageFilterIF3ISS3_cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIF3ISS3 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIF3ISS3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkNoiseBaseImageFilterIF3IF3 *\":\n return _itkNoiseBaseImageFilterPython.itkNoiseBaseImageFilterIF3IF3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkProjectedLandweberDeconvolutionImageFilterIF3IF3 *\":\n return _itkProjectedLandweberDeconvolutionImageFilterPython.itkProjectedLandweberDeconvolutionImageFilterIF3IF3_cast(obj)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
New() > itkTernaryAddImageFilterIF2IF2IF2IF2 Create a new object of the class itkTernaryAddImageFilterIF2IF2IF2IF2 and set the input and the parameters if some named or nonnamed arguments are passed to that method. New() tries to assign all the non named parameters to the input of the new objects the first non named parameter in the first input, etc. The named parameters are used by calling the method with the same name prefixed by 'Set'.
def New(*args, **kargs): obj = itkTernaryAddImageFilterIF2IF2IF2IF2.__New_orig__() import itkTemplate itkTemplate.New(obj, *args, **kargs) return obj
[ "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIF2IF2IF2IF2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterID2ID2ID2ID2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIF2IF2IF2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterID2ID2ID2ID2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIF2IF2IF2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkBilateralImageFilterIF2IF2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkConstrainedValueAdditionImageFilterIF2IF2IF2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUL2IUL2IUL2IUL2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkHConcaveImageFilterIF2IF2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkExpImageFilterIF2IF2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkFiniteDifferenceImageFilterIF2IF2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSLICImageFilterIF2IULL2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkNotImageFilterIF2IF2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkCosImageFilterIF2IF2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSquaredDifferenceImageFilterIF2IF2IF2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUC2IUC2IUC2IUC2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSLICImageFilterIF2IULL2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkGrayscaleMorphologicalOpeningImageFilterIF2IF2SE2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkBilateralImageFilterID2ID2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
itkTernaryAddImageFilterIF2IF2IF2IF2_cast(itkLightObject obj) > itkTernaryAddImageFilterIF2IF2IF2IF2
def itkTernaryAddImageFilterIF2IF2IF2IF2_cast(*args): return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIF2IF2IF2IF2_cast(*args)
[ "def itkNotImageFilterIF2IF2_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIF2IF2 *\":\n return _itkNotImageFilterPython.itkNotImageFilterIF2IF2_cast(obj)", "def itkConstrainedValueAdditionImageFilterIF2IF2IF2_cast(obj: 'itkLightObject') -> \"itkConstrainedValueAdditionImageFilterIF2IF2IF2 *\":\n return _itkConstrainedValueAdditionImageFilterPython.itkConstrainedValueAdditionImageFilterIF2IF2IF2_cast(obj)", "def itkExpImageFilterIF2IF2_cast(obj: 'itkLightObject') -> \"itkExpImageFilterIF2IF2 *\":\n return _itkExpImageFilterPython.itkExpImageFilterIF2IF2_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkConstrainedValueAdditionImageFilterIF2IF2IF2 *\":\n return _itkConstrainedValueAdditionImageFilterPython.itkConstrainedValueAdditionImageFilterIF2IF2IF2_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkNotImageFilterIF2IF2 *\":\n return _itkNotImageFilterPython.itkNotImageFilterIF2IF2_cast(obj)", "def itkNoiseBaseImageFilterIF2IF2_cast(obj: 'itkLightObject') -> \"itkNoiseBaseImageFilterIF2IF2 *\":\n return _itkNoiseBaseImageFilterPython.itkNoiseBaseImageFilterIF2IF2_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkExpImageFilterIF2IF2 *\":\n return _itkExpImageFilterPython.itkExpImageFilterIF2IF2_cast(obj)", "def itkAbsoluteValueDifferenceImageFilterIF2IF2IF2_cast(obj: 'itkLightObject') -> \"itkAbsoluteValueDifferenceImageFilterIF2IF2IF2 *\":\n return _itkAbsoluteValueDifferenceImageFilterPython.itkAbsoluteValueDifferenceImageFilterIF2IF2IF2_cast(obj)", "def itkBinaryContourImageFilterIF2IF2_cast(obj: 'itkLightObject') -> \"itkBinaryContourImageFilterIF2IF2 *\":\n return _itkBinaryContourImageFilterPython.itkBinaryContourImageFilterIF2IF2_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkNoiseBaseImageFilterIF2IF2 *\":\n return _itkNoiseBaseImageFilterPython.itkNoiseBaseImageFilterIF2IF2_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkAbsoluteValueDifferenceImageFilterIF2IF2IF2 *\":\n return _itkAbsoluteValueDifferenceImageFilterPython.itkAbsoluteValueDifferenceImageFilterIF2IF2IF2_cast(obj)", "def itkIsolatedConnectedImageFilterIF2IF2_cast(obj: 'itkLightObject') -> \"itkIsolatedConnectedImageFilterIF2IF2 *\":\n return _itkIsolatedConnectedImageFilterPython.itkIsolatedConnectedImageFilterIF2IF2_cast(obj)", "def itkShotNoiseImageFilterIF2IF2_cast(obj: 'itkLightObject') -> \"itkShotNoiseImageFilterIF2IF2 *\":\n return _itkShotNoiseImageFilterPython.itkShotNoiseImageFilterIF2IF2_cast(obj)", "def itkResampleImageFilterIF2IF2_cast(obj: 'itkLightObject') -> \"itkResampleImageFilterIF2IF2 *\":\n return _itkResampleImageFilterPython.itkResampleImageFilterIF2IF2_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkSLICImageFilterIF2IULL2 *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIF2IULL2_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIF2IUS2 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIF2IUS2_cast(obj)", "def itkSquareImageFilterIF2IF2_cast(obj: 'itkLightObject') -> \"itkSquareImageFilterIF2IF2 *\":\n return _itkSquareImageFilterPython.itkSquareImageFilterIF2IF2_cast(obj)", "def itkSLICImageFilterIF2IULL2_cast(obj: 'itkLightObject') -> \"itkSLICImageFilterIF2IULL2 *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIF2IULL2_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIF2ISS2 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIF2ISS2_cast(obj)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
New() > itkTernaryAddImageFilterIF3IF3IF3IF3 Create a new object of the class itkTernaryAddImageFilterIF3IF3IF3IF3 and set the input and the parameters if some named or nonnamed arguments are passed to that method. New() tries to assign all the non named parameters to the input of the new objects the first non named parameter in the first input, etc. The named parameters are used by calling the method with the same name prefixed by 'Set'.
def New(*args, **kargs): obj = itkTernaryAddImageFilterIF3IF3IF3IF3.__New_orig__() import itkTemplate itkTemplate.New(obj, *args, **kargs) return obj
[ "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIF3IF3IF3IF3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterID3ID3ID3ID3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIF3IF3IF3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkConstrainedValueAdditionImageFilterIF3IF3IF3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterID3ID3ID3ID3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIF3IF3IF3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkBilateralImageFilterIF3IF3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUL3IUL3IUL3IUL3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkHConcaveImageFilterIF3IF3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkCosImageFilterIF3IF3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkFiniteDifferenceImageFilterIF3IF3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkScalarChanAndVeseSparseLevelSetImageFilterIF3IF3IF3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkNotImageFilterIF3IF3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkExpImageFilterIF3IF3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkNoiseBaseImageFilterIF3IF3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUS3IUS3IUS3IUS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUC3IUC3IUC3IUC3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkCosImageFilterIF3IF3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkScalarChanAndVeseSparseLevelSetImageFilterIF3IF3IF3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
itkTernaryAddImageFilterIF3IF3IF3IF3_cast(itkLightObject obj) > itkTernaryAddImageFilterIF3IF3IF3IF3
def itkTernaryAddImageFilterIF3IF3IF3IF3_cast(*args): return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIF3IF3IF3IF3_cast(*args)
[ "def itkNotImageFilterIF3IF3_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIF3IF3 *\":\n return _itkNotImageFilterPython.itkNotImageFilterIF3IF3_cast(obj)", "def itkExpImageFilterIF3IF3_cast(obj: 'itkLightObject') -> \"itkExpImageFilterIF3IF3 *\":\n return _itkExpImageFilterPython.itkExpImageFilterIF3IF3_cast(obj)", "def itkNoiseBaseImageFilterIF3IF3_cast(obj: 'itkLightObject') -> \"itkNoiseBaseImageFilterIF3IF3 *\":\n return _itkNoiseBaseImageFilterPython.itkNoiseBaseImageFilterIF3IF3_cast(obj)", "def itkConstrainedValueAdditionImageFilterIF3IF3IF3_cast(obj: 'itkLightObject') -> \"itkConstrainedValueAdditionImageFilterIF3IF3IF3 *\":\n return _itkConstrainedValueAdditionImageFilterPython.itkConstrainedValueAdditionImageFilterIF3IF3IF3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkNotImageFilterIF3IF3 *\":\n return _itkNotImageFilterPython.itkNotImageFilterIF3IF3_cast(obj)", "def itkTernaryAddImageFilterID3ID3ID3ID3_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterID3ID3ID3ID3_cast(*args)", "def cast(obj: 'itkLightObject') -> \"itkConstrainedValueAdditionImageFilterIF3IF3IF3 *\":\n return _itkConstrainedValueAdditionImageFilterPython.itkConstrainedValueAdditionImageFilterIF3IF3IF3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkExpImageFilterIF3IF3 *\":\n return _itkExpImageFilterPython.itkExpImageFilterIF3IF3_cast(obj)", "def itkTernaryAddImageFilterIUL3IUL3IUL3IUL3_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUL3IUL3IUL3IUL3_cast(*args)", "def itkHuangThresholdImageFilterIF3ISS3_cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIF3ISS3 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIF3ISS3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIF3IUS3 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIF3IUS3_cast(obj)", "def itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_cast(*args)", "def itkHuangThresholdImageFilterIF3IUS3_cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIF3IUS3 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIF3IUS3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIF3ISS3 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIF3ISS3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkNoiseBaseImageFilterIF3IF3 *\":\n return _itkNoiseBaseImageFilterPython.itkNoiseBaseImageFilterIF3IF3_cast(obj)", "def itkShotNoiseImageFilterIF3IF3_cast(obj: 'itkLightObject') -> \"itkShotNoiseImageFilterIF3IF3 *\":\n return _itkShotNoiseImageFilterPython.itkShotNoiseImageFilterIF3IF3_cast(obj)", "def itkSLICImageFilterIF3IULL3_cast(obj: 'itkLightObject') -> \"itkSLICImageFilterIF3IULL3 *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIF3IULL3_cast(obj)", "def itkBinaryContourImageFilterIF3IF3_cast(obj: 'itkLightObject') -> \"itkBinaryContourImageFilterIF3IF3 *\":\n return _itkBinaryContourImageFilterPython.itkBinaryContourImageFilterIF3IF3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkSLICImageFilterIF3IULL3 *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIF3IULL3_cast(obj)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
New() > itkTernaryAddImageFilterIUC2IUC2IUC2IUC2 Create a new object of the class itkTernaryAddImageFilterIUC2IUC2IUC2IUC2 and set the input and the parameters if some named or nonnamed arguments are passed to that method. New() tries to assign all the non named parameters to the input of the new objects the first non named parameter in the first input, etc. The named parameters are used by calling the method with the same name prefixed by 'Set'.
def New(*args, **kargs): obj = itkTernaryAddImageFilterIUC2IUC2IUC2IUC2.__New_orig__() import itkTemplate itkTemplate.New(obj, *args, **kargs) return obj
[ "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUC2IUC2IUC2IUC2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUL2IUL2IUL2IUL2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIF2IF2IF2IF2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIF2IF2IF2IF2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUL2IUL2IUL2IUL2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIUC2IUC2IUC2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkConstrainedValueAdditionImageFilterIUC2IUC2IUC2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterID2ID2ID2ID2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIUC2IUC2IUC2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkHConcaveImageFilterIUC2IUC2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterID2ID2ID2ID2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkBilateralImageFilterIUC2IUC2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUC3IUC3IUC3IUC3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkHuangThresholdImageFilterIUC2IUC2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUS2IUS2IUS2IUS2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUS2IUS2IUS2IUS2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkExpImageFilterIUC2IUC2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkHConcaveImageFilterIUL2IUL2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
itkTernaryAddImageFilterIUC2IUC2IUC2IUC2_cast(itkLightObject obj) > itkTernaryAddImageFilterIUC2IUC2IUC2IUC2
def itkTernaryAddImageFilterIUC2IUC2IUC2IUC2_cast(*args): return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUC2IUC2IUC2IUC2_cast(*args)
[ "def itkAbsoluteValueDifferenceImageFilterIUC2IUC2IUC2_cast(obj: 'itkLightObject') -> \"itkAbsoluteValueDifferenceImageFilterIUC2IUC2IUC2 *\":\n return _itkAbsoluteValueDifferenceImageFilterPython.itkAbsoluteValueDifferenceImageFilterIUC2IUC2IUC2_cast(obj)", "def itkConstrainedValueAdditionImageFilterIUC2IUC2IUC2_cast(obj: 'itkLightObject') -> \"itkConstrainedValueAdditionImageFilterIUC2IUC2IUC2 *\":\n return _itkConstrainedValueAdditionImageFilterPython.itkConstrainedValueAdditionImageFilterIUC2IUC2IUC2_cast(obj)", "def itkNotImageFilterIUC2IUC2_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUC2IUC2 *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUC2IUC2_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkConstrainedValueAdditionImageFilterIUC2IUC2IUC2 *\":\n return _itkConstrainedValueAdditionImageFilterPython.itkConstrainedValueAdditionImageFilterIUC2IUC2IUC2_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkAbsoluteValueDifferenceImageFilterIUC2IUC2IUC2 *\":\n return _itkAbsoluteValueDifferenceImageFilterPython.itkAbsoluteValueDifferenceImageFilterIUC2IUC2IUC2_cast(obj)", "def itkHuangThresholdImageFilterIUC2IUC2_cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIUC2IUC2 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIUC2IUC2_cast(obj)", "def itkHuangThresholdImageFilterIF2IUC2_cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIF2IUC2 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIF2IUC2_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIF2IUC2 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIF2IUC2_cast(obj)", "def itkNoiseBaseImageFilterIUC2IUC2_cast(obj: 'itkLightObject') -> \"itkNoiseBaseImageFilterIUC2IUC2 *\":\n return _itkNoiseBaseImageFilterPython.itkNoiseBaseImageFilterIUC2IUC2_cast(obj)", "def itkExpImageFilterIUC2IUC2_cast(obj: 'itkLightObject') -> \"itkExpImageFilterIUC2IUC2 *\":\n return _itkExpImageFilterPython.itkExpImageFilterIUC2IUC2_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUC2IUC2 *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUC2IUC2_cast(obj)", "def itkHuangThresholdImageFilterIF2IUS2_cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIF2IUS2 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIF2IUS2_cast(obj)", "def itkTernaryAddImageFilterIUL2IUL2IUL2IUL2_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUL2IUL2IUL2IUL2_cast(*args)", "def cast(obj: 'itkLightObject') -> \"itkConstrainedValueAdditionImageFilterIF2IF2IF2 *\":\n return _itkConstrainedValueAdditionImageFilterPython.itkConstrainedValueAdditionImageFilterIF2IF2IF2_cast(obj)", "def itkConstrainedValueAdditionImageFilterIF2IF2IF2_cast(obj: 'itkLightObject') -> \"itkConstrainedValueAdditionImageFilterIF2IF2IF2 *\":\n return _itkConstrainedValueAdditionImageFilterPython.itkConstrainedValueAdditionImageFilterIF2IF2IF2_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIF2IUS2 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIF2IUS2_cast(obj)", "def itkLabelVotingImageFilterIUC2IUC2_cast(obj: 'itkLightObject') -> \"itkLabelVotingImageFilterIUC2IUC2 *\":\n return _itkLabelVotingImageFilterPython.itkLabelVotingImageFilterIUC2IUC2_cast(obj)", "def itkBinaryContourImageFilterIUC2IUC2_cast(obj: 'itkLightObject') -> \"itkBinaryContourImageFilterIUC2IUC2 *\":\n return _itkBinaryContourImageFilterPython.itkBinaryContourImageFilterIUC2IUC2_cast(obj)", "def itkLabelStatisticsImageFilterIUC2IUC2_cast(obj: 'itkLightObject') -> \"itkLabelStatisticsImageFilterIUC2IUC2 *\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUC2IUC2_cast(obj)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
New() > itkTernaryAddImageFilterIUC3IUC3IUC3IUC3 Create a new object of the class itkTernaryAddImageFilterIUC3IUC3IUC3IUC3 and set the input and the parameters if some named or nonnamed arguments are passed to that method. New() tries to assign all the non named parameters to the input of the new objects the first non named parameter in the first input, etc. The named parameters are used by calling the method with the same name prefixed by 'Set'.
def New(*args, **kargs): obj = itkTernaryAddImageFilterIUC3IUC3IUC3IUC3.__New_orig__() import itkTemplate itkTemplate.New(obj, *args, **kargs) return obj
[ "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIF3IF3IF3IF3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIF3IF3IF3IF3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUL3IUL3IUL3IUL3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterID3ID3ID3ID3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterID3ID3ID3ID3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUL3IUL3IUL3IUL3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUS3IUS3IUS3IUS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUS3IUS3IUS3IUS3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIUC3IUC3IUC3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkConstrainedValueAdditionImageFilterIF3IF3IF3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkConstrainedValueAdditionImageFilterIUC3IUC3IUC3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkHConcaveImageFilterIUC3IUC3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkBilateralImageFilterIUC3IUC3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkScalarChanAndVeseSparseLevelSetImageFilterIF3IF3IF3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkHuangThresholdImageFilterIUC3IUC3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkScalarChanAndVeseSparseLevelSetImageFilterIF3IF3IF3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkHConcaveImageFilterIUL3IUL3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkHuangThresholdImageFilterIF3IUC3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_cast(itkLightObject obj) > itkTernaryAddImageFilterIUC3IUC3IUC3IUC3
def itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_cast(*args): return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_cast(*args)
[ "def itkTernaryAddImageFilterIUL3IUL3IUL3IUL3_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUL3IUL3IUL3IUL3_cast(*args)", "def itkNotImageFilterIUC3IUC3_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUC3IUC3 *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUC3IUC3_cast(obj)", "def itkConstrainedValueAdditionImageFilterIUC3IUC3IUC3_cast(obj: 'itkLightObject') -> \"itkConstrainedValueAdditionImageFilterIUC3IUC3IUC3 *\":\n return _itkConstrainedValueAdditionImageFilterPython.itkConstrainedValueAdditionImageFilterIUC3IUC3IUC3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkConstrainedValueAdditionImageFilterIUC3IUC3IUC3 *\":\n return _itkConstrainedValueAdditionImageFilterPython.itkConstrainedValueAdditionImageFilterIUC3IUC3IUC3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIUC3IUC3 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIUC3IUC3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIF3IUC3 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIF3IUC3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkConstrainedValueAdditionImageFilterIF3IF3IF3 *\":\n return _itkConstrainedValueAdditionImageFilterPython.itkConstrainedValueAdditionImageFilterIF3IF3IF3_cast(obj)", "def itkAbsoluteValueDifferenceImageFilterIUC3IUC3IUC3_cast(obj: 'itkLightObject') -> \"itkAbsoluteValueDifferenceImageFilterIUC3IUC3IUC3 *\":\n return _itkAbsoluteValueDifferenceImageFilterPython.itkAbsoluteValueDifferenceImageFilterIUC3IUC3IUC3_cast(obj)", "def itkHuangThresholdImageFilterIUC3IUC3_cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIUC3IUC3 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIUC3IUC3_cast(obj)", "def itkHuangThresholdImageFilterIF3IUC3_cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIF3IUC3 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIF3IUC3_cast(obj)", "def itkTernaryAddImageFilterIF3IF3IF3IF3_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIF3IF3IF3IF3_cast(*args)", "def itkConstrainedValueAdditionImageFilterIF3IF3IF3_cast(obj: 'itkLightObject') -> \"itkConstrainedValueAdditionImageFilterIF3IF3IF3 *\":\n return _itkConstrainedValueAdditionImageFilterPython.itkConstrainedValueAdditionImageFilterIF3IF3IF3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIF3IUS3 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIF3IUS3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkAbsoluteValueDifferenceImageFilterIUC3IUC3IUC3 *\":\n return _itkAbsoluteValueDifferenceImageFilterPython.itkAbsoluteValueDifferenceImageFilterIUC3IUC3IUC3_cast(obj)", "def itkNoiseBaseImageFilterIUC3IUC3_cast(obj: 'itkLightObject') -> \"itkNoiseBaseImageFilterIUC3IUC3 *\":\n return _itkNoiseBaseImageFilterPython.itkNoiseBaseImageFilterIUC3IUC3_cast(obj)", "def itkHuangThresholdImageFilterIF3IUS3_cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIF3IUS3 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIF3IUS3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIUC3IUS3 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIUC3IUS3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUC3IUC3 *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUC3IUC3_cast(obj)", "def itkTernaryAddImageFilterID3ID3ID3ID3_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterID3ID3ID3ID3_cast(*args)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
New() > itkTernaryAddImageFilterIUL2IUL2IUL2IUL2 Create a new object of the class itkTernaryAddImageFilterIUL2IUL2IUL2IUL2 and set the input and the parameters if some named or nonnamed arguments are passed to that method. New() tries to assign all the non named parameters to the input of the new objects the first non named parameter in the first input, etc. The named parameters are used by calling the method with the same name prefixed by 'Set'.
def New(*args, **kargs): obj = itkTernaryAddImageFilterIUL2IUL2IUL2IUL2.__New_orig__() import itkTemplate itkTemplate.New(obj, *args, **kargs) return obj
[ "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUL2IUL2IUL2IUL2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIF2IF2IF2IF2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIF2IF2IF2IF2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterID2ID2ID2ID2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterID2ID2ID2ID2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUC2IUC2IUC2IUC2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUC2IUC2IUC2IUC2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUS2IUS2IUS2IUS2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUL3IUL3IUL3IUL3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUS2IUS2IUS2IUS2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUL3IUL3IUL3IUL3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIUL2IUL2IUL2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIUL2IUL2IUL2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkBilateralImageFilterIUL2IUL2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSLICImageFilterIF2IULL2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkHConcaveImageFilterIUL2IUL2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUC3IUC3IUC3IUC3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIF2IF2IF2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
itkTernaryAddImageFilterIUL2IUL2IUL2IUL2_cast(itkLightObject obj) > itkTernaryAddImageFilterIUL2IUL2IUL2IUL2
def itkTernaryAddImageFilterIUL2IUL2IUL2IUL2_cast(*args): return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUL2IUL2IUL2IUL2_cast(*args)
[ "def cast(obj: 'itkLightObject') -> \"itkAbsoluteValueDifferenceImageFilterIF2IF2IF2 *\":\n return _itkAbsoluteValueDifferenceImageFilterPython.itkAbsoluteValueDifferenceImageFilterIF2IF2IF2_cast(obj)", "def itkAbsoluteValueDifferenceImageFilterIF2IF2IF2_cast(obj: 'itkLightObject') -> \"itkAbsoluteValueDifferenceImageFilterIF2IF2IF2 *\":\n return _itkAbsoluteValueDifferenceImageFilterPython.itkAbsoluteValueDifferenceImageFilterIF2IF2IF2_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkAbsoluteValueDifferenceImageFilterIUC2IUC2IUC2 *\":\n return _itkAbsoluteValueDifferenceImageFilterPython.itkAbsoluteValueDifferenceImageFilterIUC2IUC2IUC2_cast(obj)", "def itkAbsoluteValueDifferenceImageFilterIUC2IUC2IUC2_cast(obj: 'itkLightObject') -> \"itkAbsoluteValueDifferenceImageFilterIUC2IUC2IUC2 *\":\n return _itkAbsoluteValueDifferenceImageFilterPython.itkAbsoluteValueDifferenceImageFilterIUC2IUC2IUC2_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkConstrainedValueAdditionImageFilterIF2IF2IF2 *\":\n return _itkConstrainedValueAdditionImageFilterPython.itkConstrainedValueAdditionImageFilterIF2IF2IF2_cast(obj)", "def itkConstrainedValueAdditionImageFilterIF2IF2IF2_cast(obj: 'itkLightObject') -> \"itkConstrainedValueAdditionImageFilterIF2IF2IF2 *\":\n return _itkConstrainedValueAdditionImageFilterPython.itkConstrainedValueAdditionImageFilterIF2IF2IF2_cast(obj)", "def itkTernaryAddImageFilterIUC2IUC2IUC2IUC2_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUC2IUC2IUC2IUC2_cast(*args)", "def itkTernaryAddImageFilterIF2IF2IF2IF2_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIF2IF2IF2IF2_cast(*args)", "def cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIF2IUS2 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIF2IUS2_cast(obj)", "def itkTernaryAddImageFilterIUL3IUL3IUL3IUL3_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUL3IUL3IUL3IUL3_cast(*args)", "def itkHuangThresholdImageFilterIF2IUS2_cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIF2IUS2 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIF2IUS2_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkConstrainedValueAdditionImageFilterIUC2IUC2IUC2 *\":\n return _itkConstrainedValueAdditionImageFilterPython.itkConstrainedValueAdditionImageFilterIUC2IUC2IUC2_cast(obj)", "def itkNotImageFilterIF2IF2_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIF2IF2 *\":\n return _itkNotImageFilterPython.itkNotImageFilterIF2IF2_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkAbsoluteValueDifferenceImageFilterIUC3IUC3IUC3 *\":\n return _itkAbsoluteValueDifferenceImageFilterPython.itkAbsoluteValueDifferenceImageFilterIUC3IUC3IUC3_cast(obj)", "def itkConstrainedValueAdditionImageFilterIUC2IUC2IUC2_cast(obj: 'itkLightObject') -> \"itkConstrainedValueAdditionImageFilterIUC2IUC2IUC2 *\":\n return _itkConstrainedValueAdditionImageFilterPython.itkConstrainedValueAdditionImageFilterIUC2IUC2IUC2_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkNotImageFilterIF2IF2 *\":\n return _itkNotImageFilterPython.itkNotImageFilterIF2IF2_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIF2IUC2 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIF2IUC2_cast(obj)", "def itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_cast(*args)", "def itkNotImageFilterIUC2IUC2_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUC2IUC2 *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUC2IUC2_cast(obj)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
New() > itkTernaryAddImageFilterIUL3IUL3IUL3IUL3 Create a new object of the class itkTernaryAddImageFilterIUL3IUL3IUL3IUL3 and set the input and the parameters if some named or nonnamed arguments are passed to that method. New() tries to assign all the non named parameters to the input of the new objects the first non named parameter in the first input, etc. The named parameters are used by calling the method with the same name prefixed by 'Set'.
def New(*args, **kargs): obj = itkTernaryAddImageFilterIUL3IUL3IUL3IUL3.__New_orig__() import itkTemplate itkTemplate.New(obj, *args, **kargs) return obj
[ "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIF3IF3IF3IF3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIF3IF3IF3IF3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterID3ID3ID3ID3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUL3IUL3IUL3IUL3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterID3ID3ID3ID3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUC3IUC3IUC3IUC3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUS3IUS3IUS3IUS3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUS3IUS3IUS3IUS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkConstrainedValueAdditionImageFilterIF3IF3IF3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIF3IF3IF3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIF3IF3IF3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIUL3IUL3IUL3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUL2IUL2IUL2IUL2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUL2IUL2IUL2IUL2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkScalarChanAndVeseSparseLevelSetImageFilterIF3IF3IF3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkScalarChanAndVeseSparseLevelSetImageFilterIF3IF3IF3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkBilateralImageFilterIF3IF3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkBilateralImageFilterIUL3IUL3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
itkTernaryAddImageFilterIUL3IUL3IUL3IUL3_cast(itkLightObject obj) > itkTernaryAddImageFilterIUL3IUL3IUL3IUL3
def itkTernaryAddImageFilterIUL3IUL3IUL3IUL3_cast(*args): return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUL3IUL3IUL3IUL3_cast(*args)
[ "def itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_cast(*args)", "def cast(obj: 'itkLightObject') -> \"itkAbsoluteValueDifferenceImageFilterIF3IF3IF3 *\":\n return _itkAbsoluteValueDifferenceImageFilterPython.itkAbsoluteValueDifferenceImageFilterIF3IF3IF3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkConstrainedValueAdditionImageFilterIF3IF3IF3 *\":\n return _itkConstrainedValueAdditionImageFilterPython.itkConstrainedValueAdditionImageFilterIF3IF3IF3_cast(obj)", "def itkTernaryAddImageFilterIF3IF3IF3IF3_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIF3IF3IF3IF3_cast(*args)", "def itkTernaryAddImageFilterID3ID3ID3ID3_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterID3ID3ID3ID3_cast(*args)", "def cast(obj: 'itkLightObject') -> \"itkAbsoluteValueDifferenceImageFilterIUC3IUC3IUC3 *\":\n return _itkAbsoluteValueDifferenceImageFilterPython.itkAbsoluteValueDifferenceImageFilterIUC3IUC3IUC3_cast(obj)", "def itkAbsoluteValueDifferenceImageFilterIF3IF3IF3_cast(obj: 'itkLightObject') -> \"itkAbsoluteValueDifferenceImageFilterIF3IF3IF3 *\":\n return _itkAbsoluteValueDifferenceImageFilterPython.itkAbsoluteValueDifferenceImageFilterIF3IF3IF3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIF3IUS3 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIF3IUS3_cast(obj)", "def itkConstrainedValueAdditionImageFilterIF3IF3IF3_cast(obj: 'itkLightObject') -> \"itkConstrainedValueAdditionImageFilterIF3IF3IF3 *\":\n return _itkConstrainedValueAdditionImageFilterPython.itkConstrainedValueAdditionImageFilterIF3IF3IF3_cast(obj)", "def itkNotImageFilterIF3IF3_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIF3IF3 *\":\n return _itkNotImageFilterPython.itkNotImageFilterIF3IF3_cast(obj)", "def itkAbsoluteValueDifferenceImageFilterIUC3IUC3IUC3_cast(obj: 'itkLightObject') -> \"itkAbsoluteValueDifferenceImageFilterIUC3IUC3IUC3 *\":\n return _itkAbsoluteValueDifferenceImageFilterPython.itkAbsoluteValueDifferenceImageFilterIUC3IUC3IUC3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIF3ISS3 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIF3ISS3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkNotImageFilterIF3IF3 *\":\n return _itkNotImageFilterPython.itkNotImageFilterIF3IF3_cast(obj)", "def itkHuangThresholdImageFilterIF3IUS3_cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIF3IUS3 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIF3IUS3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkAbsoluteValueDifferenceImageFilterISS3ISS3ISS3 *\":\n return _itkAbsoluteValueDifferenceImageFilterPython.itkAbsoluteValueDifferenceImageFilterISS3ISS3ISS3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkConstrainedValueAdditionImageFilterIUC3IUC3IUC3 *\":\n return _itkConstrainedValueAdditionImageFilterPython.itkConstrainedValueAdditionImageFilterIUC3IUC3IUC3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIF3IUC3 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIF3IUC3_cast(obj)", "def itkHuangThresholdImageFilterIF3ISS3_cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIF3ISS3 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIF3ISS3_cast(obj)", "def itkAbsoluteValueDifferenceImageFilterISS3ISS3ISS3_cast(obj: 'itkLightObject') -> \"itkAbsoluteValueDifferenceImageFilterISS3ISS3ISS3 *\":\n return _itkAbsoluteValueDifferenceImageFilterPython.itkAbsoluteValueDifferenceImageFilterISS3ISS3ISS3_cast(obj)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
New() > itkTernaryAddImageFilterIUS2IUS2IUS2IUS2 Create a new object of the class itkTernaryAddImageFilterIUS2IUS2IUS2IUS2 and set the input and the parameters if some named or nonnamed arguments are passed to that method. New() tries to assign all the non named parameters to the input of the new objects the first non named parameter in the first input, etc. The named parameters are used by calling the method with the same name prefixed by 'Set'.
def New(*args, **kargs): obj = itkTernaryAddImageFilterIUS2IUS2IUS2IUS2.__New_orig__() import itkTemplate itkTemplate.New(obj, *args, **kargs) return obj
[ "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUS2IUS2IUS2IUS2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIUS2IUS2IUS2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkConstrainedValueAdditionImageFilterIUS2IUS2IUS2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSLICImageFilterIF2IUS2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkBilateralImageFilterIUS2IUS2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSLICImageFilterIUS2IUS2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkHConcaveImageFilterIUS2IUS2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIUS2IUS2IUS2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSLICImageFilterIUS2IUS2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSLICImageFilterIUS2IULL2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSquaredDifferenceImageFilterIUS2IUS2IUS2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSLICImageFilterIUS2IULL2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSLICImageFilterIF2IUS2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIF2IF2IF2IF2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkExpImageFilterIUS2IUS2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUL2IUL2IUL2IUL2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIF2IF2IF2IF2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterID2ID2ID2ID2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUS3IUS3IUS3IUS3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
New() > itkTernaryAddImageFilterIUS3IUS3IUS3IUS3 Create a new object of the class itkTernaryAddImageFilterIUS3IUS3IUS3IUS3 and set the input and the parameters if some named or nonnamed arguments are passed to that method. New() tries to assign all the non named parameters to the input of the new objects the first non named parameter in the first input, etc. The named parameters are used by calling the method with the same name prefixed by 'Set'.
def New(*args, **kargs): obj = itkTernaryAddImageFilterIUS3IUS3IUS3IUS3.__New_orig__() import itkTemplate itkTemplate.New(obj, *args, **kargs) return obj
[ "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUS3IUS3IUS3IUS3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIUS3IUS3IUS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkConstrainedValueAdditionImageFilterIUS3IUS3IUS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkHConcaveImageFilterIUS3IUS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIUS3IUS3IUS3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSLICImageFilterIF3IUS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIF3IF3IF3IF3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkBilateralImageFilterIUS3IUS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkHuangThresholdImageFilterIUS3IUS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIF3IF3IF3IF3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkNoiseBaseImageFilterIUS3IUS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSLICImageFilterIUS3IUS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUL3IUL3IUL3IUL3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkExpImageFilterIUS3IUS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterID3ID3ID3ID3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSLICImageFilterIUS3IUS3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkNotImageFilterIUS3IUS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkSLICImageFilterIF3IUS3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkIsolatedConnectedImageFilterIUS3IUS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
itkTernaryAddImageFilterIUS3IUS3IUS3IUS3_cast(itkLightObject obj) > itkTernaryAddImageFilterIUS3IUS3IUS3IUS3
def itkTernaryAddImageFilterIUS3IUS3IUS3IUS3_cast(*args): return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUS3IUS3IUS3IUS3_cast(*args)
[ "def itkHuangThresholdImageFilterIUS3IUS3_cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIUS3IUS3 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIUS3IUS3_cast(obj)", "def itkHuangThresholdImageFilterIF3IUS3_cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIF3IUS3 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIF3IUS3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIUS3IUS3 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIUS3IUS3_cast(obj)", "def itkNoiseBaseImageFilterIUS3IUS3_cast(obj: 'itkLightObject') -> \"itkNoiseBaseImageFilterIUS3IUS3 *\":\n return _itkNoiseBaseImageFilterPython.itkNoiseBaseImageFilterIUS3IUS3_cast(obj)", "def itkConstrainedValueAdditionImageFilterIUS3IUS3IUS3_cast(obj: 'itkLightObject') -> \"itkConstrainedValueAdditionImageFilterIUS3IUS3IUS3 *\":\n return _itkConstrainedValueAdditionImageFilterPython.itkConstrainedValueAdditionImageFilterIUS3IUS3IUS3_cast(obj)", "def itkNotImageFilterIUS3IUS3_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUS3IUS3 *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUS3IUS3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIF3IUS3 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIF3IUS3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkConstrainedValueAdditionImageFilterIUS3IUS3IUS3 *\":\n return _itkConstrainedValueAdditionImageFilterPython.itkConstrainedValueAdditionImageFilterIUS3IUS3IUS3_cast(obj)", "def itkTernaryAddImageFilterIUL3IUL3IUL3IUL3_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUL3IUL3IUL3IUL3_cast(*args)", "def itkSLICImageFilterIF3IUS3_cast(obj: 'itkLightObject') -> \"itkSLICImageFilterIF3IUS3 *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIF3IUS3_cast(obj)", "def itkAbsoluteValueDifferenceImageFilterIUS3IUS3IUS3_cast(obj: 'itkLightObject') -> \"itkAbsoluteValueDifferenceImageFilterIUS3IUS3IUS3 *\":\n return _itkAbsoluteValueDifferenceImageFilterPython.itkAbsoluteValueDifferenceImageFilterIUS3IUS3IUS3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUS3IUS3 *\":\n return _itkNotImageFilterPython.itkNotImageFilterIUS3IUS3_cast(obj)", "def itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUC3IUC3IUC3IUC3_cast(*args)", "def itkTernaryAddImageFilterID3ID3ID3ID3_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterID3ID3ID3ID3_cast(*args)", "def itkShotNoiseImageFilterIUS3IUS3_cast(obj: 'itkLightObject') -> \"itkShotNoiseImageFilterIUS3IUS3 *\":\n return _itkShotNoiseImageFilterPython.itkShotNoiseImageFilterIUS3IUS3_cast(obj)", "def itkTernaryAddImageFilterIF3IF3IF3IF3_cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIF3IF3IF3IF3_cast(*args)", "def cast(obj: 'itkLightObject') -> \"itkNoiseBaseImageFilterIUS3IUS3 *\":\n return _itkNoiseBaseImageFilterPython.itkNoiseBaseImageFilterIUS3IUS3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkSLICImageFilterIF3IUS3 *\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIF3IUS3_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIUC3IUS3 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIUC3IUS3_cast(obj)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the aperture radius necessary to have a certain SPAXEL SCALE [in mas] at a certain WAVELENGTH [in microns] That would be the aperture radius in an array ranging from [1, 1] in physical length For example, if rho = 0.5, then the necessary aperture is a circle of half the size of the array We can use the inverse of that to get the "oversize" in physical units in our arrays to match a given scale
def rho_spaxel_scale(spaxel_scale=4.0, wavelength=1.0): scale_rad = spaxel_scale / MILIARCSECS_IN_A_RAD rho = scale_rad * ELT_DIAM / (wavelength * 1e-6) return rho
[ "def rho_spaxel_scale(spaxel_scale=4.0, wavelength=1.5):\n\n scale_rad = spaxel_scale / MILIARCSECS_IN_A_RAD\n rho = scale_rad * ELT_DIAM / (wavelength * 1e-6)\n return rho", "def bound_spectral_radius(arr, bound=1.2):\n spectral_radius = abs(np.linalg.eigvals(ma.assert_numpy(arr))).max()\n arr[...] *= bound / spectral_radius", "def sphere_volume(r) :\n return (4 / 3) * np.pi * r ** 3", "def check_spaxel_scale(rho_aper, wavelength):\n\n SPAXEL_RAD = rho_aper * wavelength / ELT_DIAM * 1e-6\n SPAXEL_MAS = SPAXEL_RAD * MILIARCSECS_IN_A_RAD\n print('%.2f mas spaxels at %.2f microns' %(SPAXEL_MAS, wavelength))", "def _compute_mass(box_size, evo_config):\n\n # ensure format\n standard_volume = evo_config['individuals']['standard_volume']\n if isinstance(box_size, list):\n if len(box_size) == 1: # sphere\n box_size = box_size[0]\n box_size = np.asarray(box_size)\n\n if np.prod(box_size.shape) < 2: # sphere\n return 4 / 3 * np.pi * box_size**3 / standard_volume\n else: # box\n if np.ndim(box_size) == 1:\n return np.prod(box_size * 2) / standard_volume\n else:\n return np.prod(box_size * 2, axis=1) / standard_volume", "def sphere_volume(sphere_radius):\n return (4 / 3 * np.pi * sphere_radius**3)", "def soma_radius(morph):\n return morph.soma.radius", "def totalMass(self, trunc=None):\n if trunc is None:\n trunc = self.trunc\n rVir = self.U.rVir(m, z)\n rS, rhoS, c = self.rS_rhoS_c(m, z)\n # truncation radius over scale radius\n xMax = trunc * rVir/rS\n result = 4./3. * np.pi * rS**3 * rhoS\n result = xMax - np.log(1 + xMax)\n return result", "def getSphereRadius(self):\n return 1.5", "def radius_to_mass(R, mean_dens):\n return 4*c.pi*R**3*mean_dens/3", "def sphere_volume(r):\n return (4/3) * 3.14159 * r**3", "def sphere_volume(r):\n\treturn 4/3. * math.pi * r ** 3", "def sphere_volume(r):\n return 4*3.14159*(r**3)/3", "def scale_mag(mag,threshold=0.001):\n mmag = mag.transpose()\n mm = np.array([im.dot(im) for im in mmag]) # norms square\n mm = np.sqrt(mm) # vector with norms\n if max(mm)<threshold: # if small do nothing\n return mag\n else:\n return mag/max(mm)", "def internal_mass(r,rho,autoDebug=True):\n\tmass = (4.0 / 3.0) * sam.CONSTANT_pi * r**3.0 * rho\n\treturn mass", "def polarizability_sphere(radius, eps, wavelength, eps_b=1, dynamic_correction=True):\n radius = np.atleast_1d(radius)\n alpha_0 = 4*np.pi*eps_b*constants.epsilon_0*radius**3 * (eps - eps_b)/(eps + 2*eps_b)\n k = 2*np.pi*eps_b**0.5/wavelength\n\n correction = 1\n if dynamic_correction:\n correction -= 1j*k**3*alpha_0/(6*np.pi*eps_b*constants.epsilon_0)\n correction -= k**2*alpha_0/(6*np.pi*eps_b*constants.epsilon_0*radius)\n\n alpha = alpha_0/correction\n\n return alpha_0, alpha", "def sphrad(vol):\n return (3.*vol/(4.*np.pi))**(1./3.)", "def sphere_area(rad):\n \n #Compute area of a sphere in squae units compatible with input\n \n A = 4. * pi * rad**2\n \n return (A)", "def calcSphereSimilarity(self):\n # Get the theoretical radius a sphere should have for the hotspot volume\n # Remember the sphere volume formula: V = (4/3)*pi*r^3\n expectedRadius = ((3*self.volume)/(4*npy.pi))**(1/3.)\n expectedDiameter = expectedRadius*2 + self.spacing\n\n # Calculate relation extension/expectedRadius\n self.sphereRatios = self.extension/expectedDiameter # times 2 because extension would be a diameter\n self.sphereindex = npy.linalg.norm(self.sphereRatios-1) + 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks the spaxel scale at a certain wavelength, for a given aperture radius defined for a [1, 1] physical array
def check_spaxel_scale(rho_aper, wavelength): SPAXEL_RAD = rho_aper * wavelength / ELT_DIAM * 1e-6 SPAXEL_MAS = SPAXEL_RAD * MILIARCSECS_IN_A_RAD print('%.2f mas spaxels at %.2f microns' %(SPAXEL_MAS, wavelength))
[ "def bound_spectral_radius(arr, bound=1.2):\n spectral_radius = abs(np.linalg.eigvals(ma.assert_numpy(arr))).max()\n arr[...] *= bound / spectral_radius", "def rho_spaxel_scale(spaxel_scale=4.0, wavelength=1.5):\n\n scale_rad = spaxel_scale / MILIARCSECS_IN_A_RAD\n rho = scale_rad * ELT_DIAM / (wavelength * 1e-6)\n return rho", "def rho_spaxel_scale(spaxel_scale=4.0, wavelength=1.0):\n\n scale_rad = spaxel_scale / MILIARCSECS_IN_A_RAD\n rho = scale_rad * ELT_DIAM / (wavelength * 1e-6)\n return rho", "def guess_scaling(name, spectrum):\n spectra = '%s/disp/%s.1d.fits' % (name, zerocount(spectrum))\n skyname = '%s/sky.1d.fits' % name\n spectrafits = pyfits.open(spectra)\n skyfits = pyfits.open(skyname)\n scalings = []\n for line in LINES:\n spec_peak, spec_cont = get_peak_cont(spectrafits, line, 5)\n sky_peak, sky_cont = get_peak_cont(skyfits, line, 5)\n scale = ((spec_peak - spec_cont) / (sky_peak - sky_cont))\n scalings.append(scale)\n return avg(*scalings)", "def __call__(self, freq, amp, pdur):\n min_f_size = self.min_rho**2 / self.rho**2\n F_size = self.a5 * amp * self.scale_threshold(pdur) + self.a6\n if self.engine == 'jax':\n return jnp.maximum(F_size, min_f_size)\n else:\n return np.maximum(F_size, min_f_size)", "def validate(self, size):\n msg = 'scale and array size must match, ' \\\n 'but were scale: {self.scale.n_bands}, array size: {size}'\n\n if size != len(self.scale):\n raise ValueError(msg.format(**locals()))", "def _setSimplexWithinRangeBoundary(self, radius=None):\n x0 = self.population[0]\n #code modified from park-1.2/park/simplex.py (version 1257)\n if self._useStrictRange:\n x0 = self._clipGuessWithinRangeBoundary(x0)\n\n if radius is None: radius = 0.05 # nonzdelt=0.05 from scipy-0.9\n val = x0*(1+radius)\n val[val==0] = (radius**2) * 0.1 # zdelt=0.00025 update from scipy-0.9\n if not self._useStrictRange:\n self.population[0] = x0\n return val\n\n lo = self._strictMin\n hi = self._strictMax\n radius = clip(radius,0,0.5)\n # rescale val by bounded range...\n # (increases fit for tight bounds; makes worse[?] for large bounds)\n bounded = ~numpy.isinf(lo) & ~numpy.isinf(hi)\n val[bounded] = x0[bounded] + (hi[bounded]-lo[bounded])*radius\n # crop val at bounds\n settings = numpy.seterr(all='ignore')\n val[val<lo] = lo[val<lo]\n val[val>hi] = hi[val>hi]\n numpy.seterr(**settings)\n # handle collisions (when val[i] == x0[i])\n collision = val==x0\n if numpy.any(collision):\n rval = x0*(1-radius)\n rval[rval==0] = -radius\n rval[bounded] = x0[bounded] - (hi[bounded]-lo[bounded])*radius\n val[collision] = rval[collision]\n # make tolerance relative for bounded parameters\n # tol = numpy.ones(x0.shape)*xtol\n # tol[bounded] = (hi[bounded]-lo[bounded])*xtol\n # xtol = tol\n self.population[0] = x0\n return val", "def analysis_function_rms_wfe(system, wavelength_idx, config, spaxels_per_slice, surface, pupil_sampling,\n remove_slicer=False):\n if config % 20 == 0:\n print(config)\n\n # Set Current Configuration\n system.MCE.SetCurrentConfiguration(config)\n\n # [WARNING]: for the 4x4 spaxel scale we noticed that a significant fraction of the rays get vignetted at the slicer\n # this introduces a bias in the RMS WFE calculation. To avoid this, we modify the Image Slicer aperture definition\n # so that all rays get through. Consequently, enough pupil rays are traced to get an unbiased estimation of RMS WFE\n if remove_slicer is True:\n\n expand_slicer_aperture(system)\n\n # [1] Some housekeeping and pre-processing operations\n # Get the Field Points for that configuration\n sysField = system.SystemData.Fields\n # Problem with the MC files. Before, all the E2E files had only 3 fields, now there's more, some spurious ones\n # So N_fields is no longer 3. Let's just hardcode the value to 3 temporarily\n # N_fields = sysField.NumberOfFields\n N_fields = 3\n N_waves = len(wavelength_idx)\n N_rays = N_waves * spaxels_per_slice\n\n # The only valid Field Points we should care about are 1-3 as defined by Matthias\n # The default Field Point definition of the E2E files is 1 & 3 are the edges of the slice, 2 is the centre\n fx_min, fy_min = sysField.GetField(1).X, sysField.GetField(1).Y\n fx_max, fy_max = sysField.GetField(3).X, sysField.GetField(3).Y\n\n # Note that this assumes Rectangular Normalization, the default in the E2E files.\n X_MAX = np.max([np.abs(sysField.GetField(i + 1).X) for i in range(N_fields)])\n Y_MAX = np.max([np.abs(sysField.GetField(i + 1).Y) for i in range(N_fields)])\n\n # Normalized field coordinates (hx, hy)\n hx_min, hx_max = fx_min / X_MAX, fx_max / X_MAX\n hy_min, hy_max = fy_min / Y_MAX, fy_max / Y_MAX\n\n # Sample between the edges of the slice as given by \"spaxels_per_slice\" to include as many points as we want\n hx = np.linspace(hx_min, hx_max, spaxels_per_slice)\n hy = np.linspace(hy_min, hy_max, spaxels_per_slice)\n\n # The useful data that we'll store\n obj_xy = np.array([X_MAX * hx, Y_MAX * hy]).T # The Field coordinates for the Object plane\n RMS_WFE = np.empty((N_waves, spaxels_per_slice)) # The RMS WFE results\n foc_xy = np.empty((N_waves, spaxels_per_slice, 2)) # The Chief Ray coordinates at the Detector\n\n # [2] This is where the core of the RMS WFE calculation takes place\n # First, we begin by defining the Raytrace\n raytrace = system.Tools.OpenBatchRayTrace()\n normUnPolData = raytrace.CreateNormUnpol(N_rays, constants.RaysType_Real, surface)\n\n # Start creating the Merit Function\n theMFE = system.MFE\n\n # Clear any operands that could be left from the E2E files\n nops = theMFE.NumberOfOperands\n theMFE.RemoveOperandsAt(1, nops)\n\n # Build the Merit Function\n # Set first operand to current configuration\n op = theMFE.GetOperandAt(1)\n op.ChangeType(constants.MeritOperandType_CONF)\n op.GetOperandCell(constants.MeritColumn_Param1).Value = config\n wfe_op = constants.MeritOperandType_RWRE # The Type of RMS WFE Operand: RWRE rectangular\n\n # Populate the Merit Function with RMS WFE Operands\n # Loop over the wavelengths\n for i_wave, wave_idx in enumerate(wavelength_idx):\n\n # Loop over all Spaxels in the Slice\n for j_field, (h_x, h_y) in enumerate(zip(hx, hy)):\n\n op = theMFE.AddOperand()\n op.ChangeType(wfe_op)\n op.GetOperandCell(constants.MeritColumn_Param1).Value = int(pupil_sampling)\n op.GetOperandCell(constants.MeritColumn_Param2).Value = int(wave_idx)\n op.GetOperandCell(constants.MeritColumn_Param3).Value = float(h_x)\n op.GetOperandCell(constants.MeritColumn_Param4).Value = float(h_y)\n op.GetOperandCell(constants.MeritColumn_Weight).Value = 0\n\n # Take advantage of the loop to simultaneously add the ray to the RayTrace\n normUnPolData.AddRay(wave_idx, h_x, h_y, 0, 0, constants.OPDMode_None)\n\n # time_1 = time() - start0\n # print(\"\\nTime spent setting up MF and Raytrace: %.3f sec\" % time_1)\n # start = time()\n\n # update the Merit Function\n theMFE.CalculateMeritFunction()\n # time_mf = time() - start\n # print(\"Time spent updating MF: %.3f sec\" % time_mf)\n\n # start = time()\n # Run the RayTrace for the whole Slice\n CastTo(raytrace, 'ISystemTool').RunAndWaitForCompletion()\n # time_ray = time() - start\n # print(\"Time spent running Raytrace: %.3f sec\" % time_ray)\n\n # start = time()\n # [3] Time to start reading the results of the RMS WFE Operands + Raytrace coordinates\n normUnPolData.StartReadingResults()\n # Loop over the wavelengths\n for i_wave, wave_idx in enumerate(wavelength_idx):\n # Loop over all Spaxels in the Slice\n for j_field, (h_x, h_y) in enumerate(zip(hx, hy)):\n\n # Calculate the Row index we need to get the Operand\n irow = 2 + i_wave * spaxels_per_slice + j_field\n # print(irow)\n\n op = theMFE.GetOperandAt(irow)\n\n # print(op.GetOperandCell(constants.MeritColumn_Param1).Value)\n # print(op.GetOperandCell(constants.MeritColumn_Param2).Value)\n # print(op.GetOperandCell(constants.MeritColumn_Param3).Value)\n # print(op.GetOperandCell(constants.MeritColumn_Param4).Value)\n rms = op.Value\n\n wavelength = system.SystemData.Wavelengths.GetWavelength(wave_idx).Wavelength\n\n RMS_WFE[i_wave, j_field] = wavelength * 1e3 * rms # We assume the Wavelength comes in Microns\n\n # If we get an RMS value of 0.0, print the data so we can double check the Zemax file\n # This is bad news and it mean the Rays are being vignetted somewhere\n if RMS_WFE[i_wave, j_field] == 0.0:\n print(\"\\nConfig #%d | Wave #%d | Field #%d\" % (config, wave_idx, j_field + 1))\n # raise ValueError\n\n output = normUnPolData.ReadNextResult()\n if output[2] == 0:\n x, y = output[4], output[5]\n foc_xy[i_wave, j_field, 0] = x\n foc_xy[i_wave, j_field, 1] = y\n\n vignetting_code = output[3]\n if vignetting_code != 0:\n vignetting_surface = system.LDE.GetSurfaceAt(vignetting_code).Comment\n # print(\"\\nConfig #%d\" % (config))\n # print(\"Vignetting at surface #%d: %s\" % (vignetting_code, vignetting_surface))\n # if config == 1:\n # raise ValueError\n\n normUnPolData.ClearData()\n CastTo(raytrace, 'ISystemTool').Close()\n # time_res = time() - start\n # print(\"Time spent reading results: %.3f sec\" % time_res)\n\n # time_total = time() - start0\n # print(\"TOTAL Time: %.3f sec\" % time_total)\n # sec_per_wave = time_total / N_waves * 1000\n # print(\"%3.f millisec per Wavelength\" % sec_per_wave)\n\n return [RMS_WFE, obj_xy, foc_xy]", "def apply_spectral_radius(w,spectral_radius):\n assert len(w.shape)==2 and w.shape[0]==w.shape[1],\\\n \"Error: apply_spectral_radius must receive 'w' as a square matrix.\"\n\n new_w = np.array(w)\n spectral_radius_w = calc_spectral_radius(w)\n if spectral_radius_w > 0.0:\n new_w = (w / spectral_radius_w) * spectral_radius\n else:\n print(\"Warning: Spectral radius of 'w' is zero (because of small size). Therefore, spectral radius does not changed.\")\n\n return new_w", "def test_dist_sma_radius(self):\n\n for mod in self.allmods:\n if \"dist_sma_radius\" in mod.__dict__:\n with RedirectStreams(stdout=self.dev_null):\n pp = mod(**self.spec)\n\n a = np.logspace(\n np.log10(pp.arange[0].value / 10.0),\n np.log10(pp.arange[1].value * 100),\n 100,\n )\n Rp = np.logspace(\n np.log10(pp.Rprange[0].value / 10.0),\n np.log10(pp.Rprange[1].value * 100),\n 100,\n )\n\n aa, RR = np.meshgrid(a, Rp)\n\n fr = pp.dist_sma_radius(aa, RR)\n self.assertTrue(\n np.all(fr[aa < pp.arange[0].value] == 0),\n \"dist_sma_radius low bound failed on sma for %s\" % mod.__name__,\n )\n self.assertTrue(\n np.all(fr[aa > pp.arange[1].value] == 0),\n \"dist_sma_radius high bound failed on sma for %s\" % mod.__name__,\n )\n self.assertTrue(\n np.all(fr[RR < pp.Rprange[0].value] == 0),\n \"dist_sma_radius low bound failed on radius for %s\" % mod.__name__,\n )\n self.assertTrue(\n np.all(fr[RR > pp.Rprange[1].value] == 0),\n \"dist_sma_radius high bound failed on radius for %s\" % mod.__name__,\n )\n self.assertTrue(\n np.all(\n fr[\n (aa > pp.arange[0].value)\n & (aa < pp.arange[1].value)\n & (RR > pp.Rprange[0].value)\n & (RR < pp.Rprange[1].value)\n ]\n > 0\n ),\n \"dist_sma_radius is improper pdf for %s\" % mod.__name__,\n )", "def any_scale(scale):\n return scale", "def sfilter(spectral,center, width, exponent=6, taupass=1.0, \\\n taustop=0.0, filtertype = 'bandpass' ):\n\n maxexp = np.log(sys.float_info.max)/np.log(np.max(2*np.abs(spectral-center)/width))\n # minexp = np.log(sys.float_info.min)/np.log(np.min(2*(spectral-center)/width))\n exponent = maxexp if exponent > maxexp else exponent\n # exponent = minexp if exponent < minexp else exponent\n tau = taustop+(taupass-taustop)*np.exp(-(2*np.abs(spectral-center)/width)**exponent)\n maxtau=np.max(tau)\n if filtertype == 'bandpass':\n pass\n elif filtertype == 'lowpass':\n tau = tau * np.greater(spectral,center) + \\\n maxtau * np.ones(spectral.shape) * np.less(spectral,center)\n elif filtertype == 'highpass':\n tau = tau * np.less(spectral,center) + \\\n maxtau * np.ones(spectral.shape) * np.greater(spectral,center)\n else:\n return None\n\n return tau", "def scale_spectrum(self, idx=0, mag=20, filter_name=\"Ks\"):\n\n self.lam, self.spectra[idx] = scale_spectrum(lam=self.lam,\n spec=self.spectra[idx],\n mag=mag,\n filter_name=filter_name,\n return_ec=False)", "def scale_mag(mag,threshold=0.001):\n mmag = mag.transpose()\n mm = np.array([im.dot(im) for im in mmag]) # norms square\n mm = np.sqrt(mm) # vector with norms\n if max(mm)<threshold: # if small do nothing\n return mag\n else:\n return mag/max(mm)", "def sFLD(e_spectra, l_spectra, wavelengths, fwhm, band = 'A', plot=True):\n buffer_in = 5 # range to search within absorption feature\n buffer_out = 1 # range to search outside of the absorption feature\n \n if band == 'A':\n out_in = 0.7535*fwhm+2.8937 # define amount to skip to left shoulder from minimum\n wl_in = 760 # standard location of O2A absorption feature defines start of search location\n if band == 'B':\n out_in = 0.697*fwhm + 1.245 # define amount to skip to left shoulder from minimum\n wl_in = 687 # standard location of the O2B aboorption band defines start of search location\n \n # find the points in given ranges using the stats_on_spectra function\n # find the minimum inside of the band for E_in and L_in starting at the standard locations for each band\n e_in_index, e_in = stats_on_spectra(wavelengths, wl_in - buffer_in, wl_in + buffer_in, e_spectra, 'min')\n l_in_index, l_in = stats_on_spectra(wavelengths, wl_in - buffer_in, wl_in + buffer_in, l_spectra, 'min')\n # locate the left shoulder of the band using the average of values within a given range\n e_out_index, e_out = stats_on_spectra(wavelengths, wl_in - buffer_out - out_in, wl_in - out_in, e_spectra, 'mean')\n l_out_index, l_out = stats_on_spectra(wavelengths, wl_in - buffer_out - out_in, wl_in - out_in, l_spectra, 'mean')\n \n if plot == True: # plot spectra and points at absorption feature\n plt.plot(wavelengths, e_spectra, color = 'orange')\n plt.plot(wavelengths, l_spectra, color = 'blue')\n plt.scatter(wavelengths[e_in_index], e_in, label = 'e_in')\n plt.scatter(wavelengths[l_in_index], l_in, label = 'l_in')\n plt.scatter(wavelengths[e_out_index], e_out, label = 'e_out')\n plt.scatter(wavelengths[l_out_index], l_out, label = 'l_out')\n #plt.legend()\n plt.xlabel('Wavelength (nm)')\n plt.ylabel('Radiance (mW m−2 sr−1 nm−1)')\n \n # zoom to absorption band\n \n if band == 'A':\n plt.xlim(750, 775)\n plt.title('O$_2$A Absorption Band')\n \n if band == 'B':\n plt.xlim(680, 700)\n plt.title('O$_2$B Absorption Band')\n \n plt.show() # show plot\n \n fluorescence = (e_out*l_in - l_out*e_in) / (e_out - e_in) # calculate fluorescence using sFLD method\n \n return(fluorescence)", "def bounds_check(self, radii, scale_factors):\n # TODO: refactor to have a parameter object that knows how to check its own bounds\n if (np.min(radii) < min_r) or (np.max(radii) > max_r) or \\\n (np.min(scale_factors) < min_scale) or (np.max(scale_factors) > max_scale):\n return False\n else:\n return True", "def scale(self, spectrum, dimension, **kwargs):\n prescale_sum = spectrum.sum()\n interpolation = spectrum.interpolate1d(dimension, **kwargs)\n sf = self.get_scale_factor()\n scaled_spec = copy.copy(spectrum)\n scaled_spec._name = spectrum._name + \"_sf\" + str(sf)\n scaled_spec._data = numpy.zeros(spectrum._data.shape)\n n_dim = len(spectrum._data.shape)\n axis = spectrum.get_config().get_index(dimension)\n par = spectrum.get_config().get_par(dimension)\n low = par._low\n high = par._high\n n_bins = par._bins\n step = par.get_width()\n for bin in range(n_bins):\n x = par.get_bin_centre(bin)\n ratio = x/sf\n if ratio < low or ratio >= high:\n continue # Trying to scale values outside range (Unknown)\n elif ratio < low + 0.5*step:\n ratio = low + 0.5*step\n elif ratio > high - 0.5*step:\n ratio = high - 0.5*step - 1e-6 # Floating point issue\n y = interpolation(ratio)\n if y <= 0.:\n continue\n old_bin1 = par.get_bin(ratio)\n old_bin_centre1 = par.get_bin_centre(old_bin1)\n if par.get_bin_centre(old_bin1) > ratio:\n old_bin2 = old_bin1 - 1\n if old_bin2 >= 0:\n x_low1 = old_bin_centre1 - 0.5*step # Equals x_high2\n x_high1 = ratio + 0.5*step\n if x_high1 > high - 0.5*step:\n x_high1 = high - 0.5*step - 1e-6\n area1 = numpy.fabs(0.5 * (x_high1 - x_low1) *\n (interpolation(x_high1) +\n interpolation(x_low1)))\n x_low2 = ratio - 0.5*step\n area2 = numpy.fabs(0.5 * (x_low1 - x_low2) *\n (interpolation(x_low1) +\n interpolation(x_low2)))\n else:\n old_bin2 = 0\n area2 = 0. # This will set scale2 == 0\n area1 = 1.\n else:\n old_bin2 = old_bin1 + 1\n if old_bin2 < n_bins:\n x_low1 = ratio - 0.5*step\n if x_low1 < low + 0.5*step:\n x_low1 = low + 0.5*step\n x_high1 = old_bin_centre1 + 0.5*step # = x_low2\n area1 = numpy.fabs(0.5 * (x_high1 - x_low1) *\n (interpolation(x_high1) +\n interpolation(x_low1)))\n x_high2 = ratio + 0.5*step\n area2 = numpy.fabs(0.5 * (x_high2 - x_high1) *\n (interpolation(x_high2) +\n interpolation(x_high1)))\n else:\n old_bin2 = n_bins - 1\n area2 = 0. # This will set scale2 == 0\n area1 = 1.\n if area1 == 0. and area2 == 0.:\n continue\n scale1 = area1 / (area1 + area2)\n scale2 = area2 / (area1 + area2)\n # Prepare array split. Is there a better way to do this not using\n # eval and exec?\n cur_slice = \"[\"\n old_slice1 = \"[\"\n old_slice2 = \"[\"\n for dim in range(n_dim):\n if dim == axis:\n if bin < n_bins - 1:\n cur_slice += str(bin) + \":\" + str(bin + 1) + \",\"\n else:\n cur_slice += str(bin) + \":,\"\n if old_bin1 < n_bins - 1:\n old_slice1 += (str(old_bin1) + \":\" +\n str(old_bin1 + 1) + \",\")\n else:\n old_slice1 += str(old_bin1) + \":,\"\n if old_bin2 < n_bins - 1:\n old_slice2 += (str(old_bin2) + \":\" +\n str(old_bin2 + 1) + \",\")\n else:\n old_slice2 += str(old_bin2) + \":,\"\n else:\n cur_slice += \":,\"\n old_slice1 += \":,\"\n old_slice2 += \":,\"\n cur_slice = cur_slice[:-1] + \"]\"\n old_slice1 = old_slice1[:-1] + \"]\"\n old_slice2 = old_slice2[:-1] + \"]\"\n old_data1 = eval(\"spectrum._data\"+old_slice1)\n unscaled_sum1 = float(old_data1.sum())\n old_data2 = eval(\"spectrum._data\"+old_slice2)\n unscaled_sum2 = float(old_data2.sum())\n # Check to see if there is data to scale and counts is positive\n if unscaled_sum1 <= 0. and unscaled_sum2 <= 0.:\n continue\n elif unscaled_sum1 <= 0.:\n fill_cmd = (\"scaled_spec._data\" + cur_slice + \"+= old_data2 * \"\n \"(y / unscaled_sum2)\")\n exec(fill_cmd)\n elif unscaled_sum2 <= 0.:\n fill_cmd = (\"scaled_spec._data\" + cur_slice + \"+= old_data1 * \"\n \"(y / unscaled_sum1)\")\n exec(fill_cmd)\n\n else:\n fill_cmd = (\"scaled_spec._data\" + cur_slice + \"+= old_data1 * \"\n \"scale1 * (y / unscaled_sum1) + old_data2 * \"\n \"scale2 * (y / unscaled_sum2)\")\n exec(fill_cmd)\n # renormalise to prescale number of counts\n scaled_spec._num_decays = scaled_spec.sum()\n scaled_spec.scale(prescale_sum)\n scaled_spec._num_decays = spectrum._num_decays\n return scaled_spec", "def normalize_scale(scale):\r\n return scale >= 1.0 and (1.0 / scale) or scale", "def quality(\n wavelength: Union[Quantity, ndarray],\n flux: Union[Quantity, ndarray],\n mask: Optional[ndarray] = None,\n **kwargs,\n) -> float:\n flux = flux * u.dimensionless_unscaled # Turn into Quantity if not already\n flux = flux / flux.unit # Remove units from flux (sqrt(N_e) is unitless)\n\n wis = sqrt_sum_wis(wavelength, flux, mask=mask, **kwargs)\n q = wis / np.sqrt(np.nansum(flux))\n return q.value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a Dictionary for the triangular numbers associated with the Zernike pyramid
def triangular_numbers(N_levels): zernike_rows = list(np.arange(1, N_levels + 1)) triangular = {} for i, zernike_per_row in enumerate(zernike_rows): total = np.sum(zernike_rows[:i+1]) triangular[zernike_per_row] = total return triangular
[ "def _get_tri_dict(self):\n tri_dict = dict(\n vertices=np.concatenate([self.contour.vertices] + [hole.vertices for hole in self.holes]),\n segments=list(self._segment_pairs())\n )\n if self.holes:\n tri_dict['holes'] = np.array([hole.interior_point for hole in self.holes])\n return tri_dict", "def create_triad_counts():\n triads = [str(i) + str(j) + str(k) for i in range(2) for j in range(2) for k in range(2)]\n triad_counts = {}\n\n for triad in triads:\n triad_counts[triad] = [0, 0]\n\n return triad_counts", "def _make_limb_dict():\n\n return {'left_arm_y': 10, 'right_arm_y': 13,\n 'left_arm_z': 11, 'right_arm_z': 14,\n 'left_leg_y': 4, 'right_leg_y': 7,\n 'left_leg_z': 5, 'right_leg_z': 8,\n 'hip_y': 2, 'hip_x': 1}", "def Pauli2_gates_table(self):\r\n pauli2 = {}\r\n for i in range(16):\r\n circ = self.Pauli2_gates(i)\r\n key = self.Pauli_from_gates(2, circ).index()\r\n pauli2[key] = circ\r\n return pauli2", "def create_dictionary():\n d = {}\n for y in range(HEIGHT):\n if (y % 2) != 0:\n pos = (10*y)+10\n else:\n pos =((10*y)-9)+10 \n for x in range(WIDTH):\n xy_tuple = (x,y)\n d[pos] = xy_tuple\n if (y % 2) != 0:\n pos = pos - 1\n else:\n pos = pos + 1\n \n return d", "def make_zernike_indexes(self):\n zernike_n_m = []\n for n in range(10):\n for m in range(n+1):\n if (m+n) & 1 == 0:\n zernike_n_m.append((n,m))\n return np.array(zernike_n_m)", "def make_pt_2_neighbors(tri):\n pt_dict=dict()\n for vlist in tri.vertices:\n for i in vlist:\n if not i in pt_dict:\n pt_dict[i]=list()\n for k in vlist:\n if k != i:\n pt_dict[i].insert(0,k)\n for i in range(tri.points.shape[0]):\n pt_dict[i]=np.unique(pt_dict[i]).tolist()\n return pt_dict", "def test_get_triangle_dict_all_int(self):\n triangle = {'a': 1, 'b': 2, 'c': 3}\n result = get_triangle_type(triangle)\n self.assertEqual(result, 'scalene')", "def Pauli1_gates_table(self):\r\n pauli1 = {}\r\n for i in range(4):\r\n circ = self.Pauli1_gates(i)\r\n key = self.Pauli_from_gates(1, circ).index()\r\n pauli1[key] = circ\r\n return pauli1", "def _create_pyramid(self,):\n pyramid = [1 for _ in range(len(self.colonies))]\n\n #generate pyramid [1,1,1,1,2,3,4,5,6,7]\n i = 0\n workers_remaining=len(self.workers)-len(self.colonies)\n while workers_remaining>0:\n if i+1<workers_remaining:\n pyramid[i] += i+1\n workers_remaining -= i+1\n else:\n pyramid[i]+=workers_remaining\n workers_remaining=0\n i+=1\n\n pyramid = sorted(pyramid, reverse=True)\n return pyramid", "def traps(self):\n return self.trapezoids.trap_list()", "def dimension_homology_sc(self):\r\n vec_dic = {}\r\n for k in range(self.dimension()+1):\r\n p = k \r\n A = self.matrix_simmetric_representate(p)\r\n dn = 0\r\n dc = 0\r\n if (p == 0):\r\n dn = A.shape[1]\r\n if (p > 0 and (p <= self.dimension())):\r\n null = null_space(A)\r\n if (null.size != 0):\r\n dn = len(null[0])\r\n if (all(elem == 0 for elem in null[0])):\r\n dn = 0 \r\n p = k + 1\r\n if (p>0 and (p <= self.dimension())):\r\n A1=self.matrix_simmetric_representate(p)\r\n col = orth(A1)\r\n if (col.size != 0):\r\n dc = len(col[0])\r\n else: \r\n dc = 0\r\n vec_dic[k] = dn - dc\r\n return vec_dic", "def get_all_potential_edges(self) -> Dict[str,\n Tuple[int, int, int, int]]:\n orig_rows = self.tile_rows\n\n ret = dict()\n\n for i in range(0, 4):\n self.rotate_right(i)\n for j in range(0, 2):\n self.flip_l_r(j)\n for k in range(0, 2):\n self.flip_t_b(k)\n edges = self.get_current_edges()\n if edges not in ret.values():\n ret[f'rr{i}_lr{j}_tb{k}'] = edges\n\n self.tile_rows = orig_rows\n\n for j in range(0, 2):\n self.flip_l_r(j)\n for i in range(0, 4):\n self.rotate_right(i)\n for k in range(0, 2):\n self.flip_t_b(k)\n edges = self.get_current_edges()\n if edges not in ret.values():\n ret[f'lr{j}_rr{i}_tb{k}'] = edges\n\n self.tile_rows = orig_rows\n\n for j in range(0, 2):\n self.flip_l_r(j)\n for k in range(0, 2):\n self.flip_t_b(k)\n for i in range(0, 4):\n self.rotate_right(i)\n edges = self.get_current_edges()\n if edges not in ret.values():\n ret[f'lr{j}_tb{k}_rr{i}'] = edges\n\n self.tile_rows = orig_rows\n\n for k in range(0, 2):\n self.flip_t_b(k)\n for j in range(0, 2):\n self.flip_l_r(j)\n for i in range(0, 4):\n self.rotate_right(i)\n edges = self.get_current_edges()\n if edges not in ret.values():\n ret[f'tb{k}_lr{j}_rr{i}'] = edges\n\n self.tile_rows = orig_rows\n\n for k in range(0, 2):\n self.flip_t_b(k)\n for i in range(0, 4):\n self.rotate_right(i)\n for j in range(0, 2):\n self.flip_l_r(j)\n edges = self.get_current_edges()\n if edges not in ret.values():\n ret[f'tb{k}_rr{i}_lr{j}'] = edges\n\n self.tile_rows = orig_rows\n\n for i in range(0, 4):\n self.rotate_right(i)\n for k in range(0, 2):\n self.flip_t_b(k)\n for j in range(0, 2):\n self.flip_l_r(j)\n edges = self.get_current_edges()\n if edges not in ret.values():\n ret[f'rr{i}_tb{k}_lr{j}'] = edges\n\n self.tile_rows = orig_rows\n\n return ret", "def trios(self):\n return self._trios", "def triads_by_type(G):\n # num_triads = o * (o - 1) * (o - 2) // 6\n # if num_triads > TRIAD_LIMIT: print(WARNING)\n all_tri = all_triads(G)\n tri_by_type = defaultdict(list)\n for triad in all_tri:\n name = triad_type(triad)\n tri_by_type[name].append(triad)\n return tri_by_type", "def tripTrian(G):\r\n\tn = len(G)\r\n\ttrip = set()\r\n\ttrian = set()\r\n\tfor u in range(n):\r\n\t\tfor v in G[u]:\r\n\t\t\tfor w in G[v]:\r\n\t\t\t\tif v !=u and v!=w and u != w:\r\n\t\t\t\t\ttrip.add((u,v,w))\r\n\t\t\t\t\ts = getTrip((u,v,w))\r\n\t\t\t\t\tif u in G[w]:\r\n\t\t\t\t\t\ttrip.add((u,v,w))\r\n\t\t\t\t\t\ttrian.update(s)\r\n\t\t\t\t\t\ttrip.update(s)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\ttrip.update(s)\r\n\r\n\treturn (len(trian)/6,len(trip)/6)", "def Diagonali(self):\r\n V = self.Vertici + self.Vertici # Accodare la lista per poter avere un circolo chiuso dei vertici\r\n Diag = {} # Database vuoto dove raccogliere le diagonali\r\n n_d = (self.Num_lati - 1) - 2 # Numero diagonali per punto\r\n for e in range(self.Num_lati):\r\n for l in range(e + 2, e + 2 + n_d):\r\n d = Segmento(V[e], V[l]) # Segmento della diagonale\r\n # Nome della diagonale da usare come chiave del database\r\n if l >= self.Num_lati:\r\n a = l - self.Num_lati\r\n else:\r\n a = l\r\n name = str(e) + str(a)\r\n Diag[name] = (d)\r\n return (Diag)", "def Zlevel(self):\n \n self.Zlevel = {}\n for i in range(len(self.networks)):\n self.Zlevel[self.networks[i].name] = i*50", "def testh1pyramid(self):\n np.set_printoptions(precision = 4, linewidth=200)\n for k in range(1,6):\n degrees = H1Elements(k)\n points = numpy.array([[0,0,0],[0,1,0],[1,1,0],[1,0,0],[1,1,1]])\n cs = []\n for p in points: cs.append(degrees.vertex(p[np.newaxis,:]))\n for e in [[0,1],[1,2],[2,3],[3,0],[0,4],[1,4],[2,4],[3,4]]: cs.append(degrees.edge(points[e]))\n for t in [[0,1,4],[1,2,4],[2,3,4],[3,0,4]]: cs.append(degrees.triangle(points[t]))\n cs.append(degrees.quad(points[[0,3,1]]))\n elt = degrees.pyramid(points, cs)\n \n# map = buildaffine(points[[0,1,3,4]], points[[0,1,3,4]]) \n# elt = Element(rform, cs, mapbasedpullback(map))\n \n np.set_printoptions(precision = 4, suppress=True, linewidth=200)\n dofvals = numpy.concatenate([c.evaluatedofs(elt.values) for c in cs if c is not None], axis=0)\n ndof, nfn = dofvals.shape\n# print k,ndof,nfn\n numpy.testing.assert_array_almost_equal(dofvals, numpy.hstack((numpy.eye(ndof), numpy.zeros((ndof,nfn - ndof )))))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the (Xc, Yc) coordinates of actuator centres inside a circle of rho_aper, assuming there are N_actuators along the [1, 1] line
def actuator_centres(N_actuators, rho_aper=RHO_APER, rho_obsc=RHO_OBSC): x0 = np.linspace(-1., 1., N_actuators, endpoint=True) delta = x0[1] - x0[0] N_in_D = 2*RHO_APER/delta print('%.2f actuators in D' %N_in_D) max_freq = N_in_D / 2 # Max spatial frequency we can sense xx, yy = np.meshgrid(x0, x0) x_f = xx.flatten() y_f = yy.flatten() act = [] for x_c, y_c in zip(x_f, y_f): r = np.sqrt(x_c ** 2 + y_c ** 2) if r < 0.97 * rho_aper and r > 1.05 * rho_obsc: act.append([x_c, y_c]) total_act = len(act) print('Total Actuators: ', total_act) return [act, delta], max_freq
[ "def create_actuator_centres(self, N_actuators, radial=True):\n wave_range = self.slicer_model.wave_range\n wave_ratios = self.slicer_model.waves_ratio\n self.rho_aper = rho_spaxel_scale(self.slicer_model.spaxel_scale, wavelength=wave_range[0])\n self.rho_obsc = CENTRAL_OBS * self.rho_aper\n\n centres = []\n for wave in wave_ratios:\n x0 = np.linspace(-self.rho_aper / wave, self.rho_aper / wave, N_actuators, endpoint=True)\n delta = x0[1] - x0[0]\n xx, yy = np.meshgrid(x0, x0)\n x_f = xx.flatten()\n y_f = yy.flatten()\n\n act = [] # List of actuator centres (Xc, Yc)\n for x_c, y_c in zip(x_f, y_f):\n r = np.sqrt(x_c ** 2 + y_c ** 2)\n # Leave some margin close to the boundary\n if r < (self.rho_aper / wave - delta / 2) and r > (self.rho_obsc / wave + delta / 2):\n act.append([x_c, y_c])\n\n if radial: # Add actuators at the boundaries, keeping a constant angular distance\n for r in [self.rho_aper / wave, self.rho_obsc / wave]:\n N_radial = int(np.floor(2 * np.pi * r / delta))\n d_theta = 2 * np.pi / N_radial\n theta = np.linspace(0, 2 * np.pi - d_theta, N_radial)\n # Super important to do 2Pi - d_theta to avoid placing 2 actuators in the same spot... Degeneracy\n for t in theta:\n act.append([r * np.cos(t), r * np.sin(t)])\n\n centres.append([act, delta])\n return centres", "def center_of_charge(self):\n ret = [0.0, 0.0, 0.0]\n total_c = 0.0\n\n for at in range(self.natom()):\n c = self.charge(at)\n ret = add(ret, scale(self.xyz(at), c))\n total_c += c\n\n ret = scale(ret, 1.0 / total_c)\n return ret", "def circle_center(self):\n return self.container.width / 2, self.container.height / 2", "def _initial_positions_cylinder(n_walkers, radius, R):\n positions = np.zeros((n_walkers, 3))\n positions[:, 1:3] = _fill_circle(n_walkers, radius)\n positions = np.matmul(R, positions.T).T\n return positions", "def get_arc_radius(rad, x_center):\n return [ (x_center+rad*np.cos(a),\n conf[\"ARC_BASE_Y\"]+rad*np.sin(a)) for a in angs ]", "def ellipse_center(self, a):\r\n b, c, d, f, g, a = a[1] / 2, a[2], a[3] / 2, a[4] / 2, a[5], a[0]\r\n num = b * b - a * c\r\n x0 = (c * d - b * f) / num\r\n y0 = (a * f - b * d) / num\r\n return np.array([x0, y0])", "def _circumcircle(self):\n ax, ay = self.p1\n bx, by = self.p2\n cx, cy = self.p3\n d = (ax * (by - cy) + bx * (cy - ay) + cx * (ay - by)) * 2\n x = ((ax**2 + ay**2)*(by - cy) +\n (bx**2 + by**2)*(cy - ay) +\n (cx**2 + cy**2)*(ay - by)) / d\n y = ((ay**2 + ax**2)*(cx - bx) +\n (by**2 + bx**2)*(ax - cx) +\n (cy**2 + cx**2)*(bx - ax)) / d\n r = math.sqrt((x - ax)**2 + (y - ay)**2)\n return ((x, y), r)", "def dcircle(p, xc, yc, r):\n return np.sqrt(((p - np.array([xc, yc])) ** 2).sum(-1)) - r", "def _GetArcCoords(radians):\r\n return [c[0] + r*math.cos(radians), c[1] + r*math.sin(radians)]", "def centroid(cnt):\n m = cv2.moments(cnt)\n cx = int(m['m10'] / m['m00'])\n cy = int(m['m01'] / m['m00'])\n return cx, cy", "def get_circle(a, b, c):\n vec = [a[0]**2 + a[1]**2, b[0]**2 + b[1]**2, c[0]**2 + c[1]**2]\n x_mat = [vec, [a[1], b[1], c[1]], [1]*3]\n y_mat = [vec, [a[0], b[0], c[0]], [1]*3]\n d_mat = [[a[0], b[0], c[0]], [a[1], b[1], c[1]], [1] * 3]\n d = 2 * det(d_mat)\n x = 1 / d * det(x_mat)\n y = -1 / d * det(y_mat)\n center = [x, y]\n #r = norm(center - a)\n r = norm([center[0]-a[0], center[1]-a[1]])\n return center, r", "def plotCentroid(img, cnt, radius = 3, color=(255, 255, 0)):\n\tcx, cy = centroid(cnt)\n\tdrawCircle(img, (cx, cy), radius = radius, color = color)\n\treturn (cx, cy)", "def circle_positions(center, radius):\n x_center, y_center = center\n return [(x, y)\n for x in range(x_center - radius, x_center + radius + 1)\n for y in range(y_center - radius, y_center + radius + 1)\n if distance((x, y), center) <= radius]", "def get_circle_coords(self, radius, divider, count,center_x, center_y):\n\n angle_deg = (360/divider)*count\n angle = radians(angle_deg-(90 + (360/divider)))\n x = radius*cos(angle) + center_x;\n y = radius*sin(angle) + center_y;\n return (int(x), int(y))", "def compute_tracking_cen_coords(self):\n if (self.hdulist[2].header['EXTNAME'] != 'ANTPOSGR'):\n raise Exception(\"Method only applies for ANTENNA FITS file\") \n self.obtain_positional_data() \n self.compute_pointing_model()\n self.crossEl = ((self.obsc_az - self.mnt_az - self.pnt_az) * 60.) * numpy.cos(numpy.radians(self.mnt_el))\n self.El = (self.obsc_el - self.mnt_el - self.pnt_el)*60.\n return self.crossEl, self.El", "def get_center_ball(self, output):\n output = output.reshape((360, 640))\n\n # cv2 image must be numpy.uint8, convert numpy.int64 to numpy.uint8\n output = output.astype(np.uint8)\n\n # reshape the image size as original input image\n heatmap = cv2.resize(output, (640, 360))\n\n # heatmap is converted into a binary image by threshold method.\n ret, heatmap = cv2.threshold(heatmap, 127, 255, cv2.THRESH_BINARY)\n\n # find the circle in image with 2<=radius<=7\n circles = cv2.HoughCircles(heatmap, cv2.HOUGH_GRADIENT, dp=1, minDist=1, param1=50, param2=2, minRadius=2,\n maxRadius=7)\n # check if there have any tennis be detected\n if circles is not None:\n # if only one tennis be detected\n if len(circles) == 1:\n x = int(circles[0][0][0])\n y = int(circles[0][0][1])\n\n return x, y\n return None, None", "def _compute_ball_visualization(self, center, radius, angle):\r\n x_coord = [center[0]]\r\n y_coord = [center[1]]\r\n\r\n angles = np.linspace(angle, angle + 2 * np.pi, 100)\r\n\r\n x_coord.extend([center[0] - radius * np.sin(a) for a in angles])\r\n y_coord.extend([center[1] + radius * np.cos(a) for a in angles])\r\n\r\n return [x_coord, y_coord]", "def get_coordinates_around_center(self, angle, distance):\n center = self.center_point\n point = Point(center.x+distance, center.y) # initial point to rotate\n x = center.x + cos(angle) * (point.x - center.x) - sin(angle) * (point.y - center.y)\n y = center.x + sin(angle) * (point.x - center.x) + cos(angle) * (point.y - center.y)\n return x, y", "def pos_on_semicircle(x, r, cxy):\n pos = np.sqrt(r ** 2 - (x - cxy[0]) ** 2) + cxy[1]\n\n return pos" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the PEAK of the PSF without aberrations so that we can normalize everything by it
def peak_PSF(self): im, strehl = self.compute_PSF(np.zeros(self.N_act)) return strehl
[ "def generatePSF(self):\n\n PSF = dict()\n # Load atmosphere and instrument PSF \n if self.information['PSF']['total']['method'] == 'compute': \n for keyword in self.information['PSF']:\n if keyword != \"total\":\n if \"file\" not in self.information['PSF'][keyword]:\n if self.information['PSF'][keyword]['type'] == 'moffat':\n if 'beta' in self.information['PSF'][keyword]:\n beta=self.information['PSF'][keyword]['beta']\n else:\n beta=2\n else:\n beta=2\n if 'seeing' in self.information['PSF'][keyword]:\n seeing=self.information['PSF'][keyword]['seeing']\n else:\n seeing=self.config['seeing']\n\n # If PSF size bigger than image --> Limit PSF size to image size\n if self.information['PSF'][keyword]['size'][0] > self.information['xsize']:\n self.information['PSF'][keyword]['size'][0] = self.information['xsize']\n print ('PSF size along x axis bigger than image size!\\nPSF size limited to image size along x axis now: %d Pixels' % (self.information['xsize']))\n\n if self.information['PSF'][keyword]['size'][1] > self.information['ysize']:\n self.information['PSF'][keyword]['size'][1] = self.information['ysize']\n print ('PSF size along y axis bigger than image size!\\nPSF size limited to image size along y axis now: %d Pixels' % (self.information['ysize']))\n\n PSFUtils.createPSF(filename=self.path+'/data/psf/'+self.information['PSF'][keyword]['output'],PSF_type=self.information['PSF'][keyword]['type'],imsize=self.information['PSF'][keyword]['size'],pixel_size=[self.config['xPixSize'],self.config['yPixSize']],pixel_scale=self.config['pixelScale_X'],eff_wvl=self.config['eff_wvl'],seeing=seeing,DM1=self.config['D_M1'],DM2=self.config['D_M2'],focal_length=self.config['Focal_length'],oversamp=self.config['psfoversampling'],beta=beta,disp=False,unsigned16bit=False)\n \n PSF[keyword] = self.path+'/data/psf/'+self.information['PSF'][keyword]['output']\n\n else:\n # Check pixel size and oversample if needed\n hdr_ = fits.getheader(self.path+'/data/psf/'+self.information['PSF'][keyword]['file']+'.fits')\n try: \n if hdr_['XPIXELSZ'] != self.information['cameras'][self.information['channel']]['Photocell_SizeX'] / oversamp or hdr_['YPIXELSZ'] != self.information['cameras'][self.information['channel']]['Photocell_SizeY'] / oversamp :\n resampling=[self.information['cameras'][self.information['channel']]['Photocell_SizeX'] / oversamp,self.information['cameras'][self.information['channel']]['Photocell_SizeY'] / oversamp]\n\n PSFUtils.resize(filename1=self.path+'/data/psf/'+self.information['PSF']['keyword']['file'],filename2=self.path+self.information['PSF']['keyword']['file']+'_oversammpled',type='factor',resampling=resampling,overwrite=True,unsigned16bit=False)\n\n PSF[keyword] = self.path+'/data/psf/'+self.information['PSF'][keyword]['file']+'_oversampled'\n else:\n PSF[keyword] = self.path+'/data/psf/'+self.information['PSF'][keyword]['file']\n except:\n PSF[keyword] = self.path+'/data/psf/'+self.information['PSF'][keyword]['file']\n print ('PSF convolution')\n # convolve atmosphere and instrument PSF to get the total PSF\n PSFUtils.convolvePSF(filename1=PSF['atmosphere'],filename2=PSF['instrument'],filename3=self.path+'/data/psf/'+self.information['PSF']['total']['file'])\n #PSFUtils.convolvePSF(filename1=PSF['instrument'],filename2=PSF['atmosphere'],filename3=self.path+self.information['PSF']['total']['output']+'_oversampled')\n #PSFUtils.resize(filename1=self.path+self.information['PSF']['total']['output']+'_oversampled',filename2=self.path+self.information['PSF']['total']['output'],resampling=32/self.information['psfoversampling'],type='sum')\n #PSFUtils.resize(filename1=self.path+self.information['PSF']['total']['output']+'_oversampled',filename2=self.path+self.information['PSF']['total']['output'],resampling=self.information['psfoversampling']/32,type='zoom')\n print ('done')", "def pulp_smash():", "def testMeasureGoodPsf(self):\n # Apply varying PSF model to the exposure\n varyingPsf = self._computeVaryingPsf()\n exposure, catalog = self._runMeasurementTask(psf=varyingPsf)\n key = measBase.SdssShapeResultKey(catalog.schema[\"base_SdssShape\"])\n # First make sure we did indeed get a varying PSF model across the exposure\n psf = exposure.getPsf()\n # Compare truth PSF at positions of two point sources\n self.assertFloatsNotEqual(psf.computeShape(self.pointCentroid1).getIxx(),\n psf.computeShape(self.pointCentroid2).getIxx(), rtol=1E-1)\n self.assertFloatsNotEqual(psf.computeShape(self.pointCentroid1).getIyy(),\n psf.computeShape(self.pointCentroid2).getIyy(), rtol=1E-1)\n # Compare truth PSF at average position vs. truth PSF at extended source position\n self.assertFloatsNotEqual(psf.computeShape(self.extendedCentroid).getIxx(),\n psf.computeShape().getIxx(), rtol=1E-1)\n self.assertFloatsNotEqual(psf.computeShape(self.extendedCentroid).getIyy(),\n psf.computeShape().getIyy(), rtol=1E-1)\n # Now check the base_SdssShape_psf entries against the PSF truth values\n for record in catalog:\n psfTruth = psf.computeShape(afwGeom.Point2D(record.getX(), record.getY()))\n result = record.get(key)\n psfResult = key.getPsfShape(record)\n self._checkPsfShape(result, psfResult, psfTruth)", "def _computeVaryingPsf(self):\n kernelSize = 31\n sigma1 = 1.75\n sigma2 = 2.0*sigma1\n basisKernelList = []\n for sigma in (sigma1, sigma2):\n basisKernel = afwMath.AnalyticKernel(kernelSize, kernelSize,\n afwMath.GaussianFunction2D(sigma, sigma))\n basisImage = afwImage.ImageD(basisKernel.getDimensions())\n basisKernel.computeImage(basisImage, True)\n basisImage /= np.sum(basisImage.getArray())\n if sigma == sigma1:\n basisImage0 = basisImage\n else:\n basisImage -= basisImage0\n basisKernelList.append(afwMath.FixedKernel(basisImage))\n\n order = 1\n spFunc = afwMath.PolynomialFunction2D(order)\n exactKernel = afwMath.LinearCombinationKernel(basisKernelList, spFunc)\n exactKernel.setSpatialParameters([[1.0, 0, 0], [0.0, 0.5E-2, 0.2E-2]])\n exactPsf = measAlg.PcaPsf(exactKernel)\n\n return exactPsf", "def prob_of_transmission_within_household(Ph, Pt, Pf):\n return 1 - ((1 - Ph) * (1 - Pt) * (1 - Pf))", "def calc_fidelity(inimg,refimg,pbimg='',psfimg='',fudge_factor=1.0,scale_factor=1.0,pb_thresh=0.25,clean_up=True,outfile=''):\n\n ia=iatool()\n\n ia.open(inimg)\n # average over the stokes axis to get it down to 3 axes which is what our other one has\n imvals=np.squeeze(ia.getchunk()) * scale_factor\n img_cs = ia.coordsys()\n # how to trim the freq axis--\n #img_shape = (ia.shape())[0:3]\n img_shape = ia.shape()\n ia.close()\n # get beam info\n hdr = imhead(imagename=inimg,mode='summary')\n bmaj_str = str(hdr['restoringbeam']['major']['value'] * fudge_factor)+hdr['restoringbeam']['major']['unit']\n bmin_str = str(hdr['restoringbeam']['minor']['value'] * fudge_factor)+hdr['restoringbeam']['minor']['unit']\n bpa_str = str(hdr['restoringbeam']['positionangle']['value'])+hdr['restoringbeam']['positionangle']['unit']\n\n # i should probably also be setting the beam * fudge_factor in the *header* of the input image\n\n if len(pbimg) > 0:\n ia.open(pbimg)\n pbvals=np.squeeze(ia.getchunk())\n pbvals /= np.max(pbvals)\n pbvals = np.where( pbvals < pb_thresh, 0.0, pbvals)\n #good_pb_ind=np.where( pbvals >= pb_thresh)\n #bad_pb_ind=np.where( pbvals < pb_thresh)\n #pbvals[good_pb_ind] = 1.0\n #if bad_pb_ind[0]:\n # pbvals[bad_pb_ind] = 0.0\n else:\n pbvals = imvals*0.0 + 1.0\n #good_pb_ind = np.where(pbvals)\n #bad_pb_ind = [np.array([])]\n\n ##\n\n ##############\n # open, smooth, and regrid reference image\n #\n\n smo_ref_img = refimg+'.TMP.smo'\n\n # if given a psf image, use that for the convolution. need to regrid onto input\n # model coordinate system first. this is mostly relevant for the single dish\n # if the beam isn't very gaussian (as is the case for alma sim tp)\n if len(psfimg) > 0:\n # consider testing and fixing the case the reference image isn't jy/pix\n ia.open(refimg)\n ref_cs=ia.coordsys()\n ref_shape=ia.shape()\n ia.close()\n ia.open(psfimg)\n psf_reg_im=ia.regrid(csys=ref_cs.torecord(),shape=ref_shape,outfile=psfimg+'.TMP.regrid',overwrite=True,axes=[0,1])\n psf_reg_im.done()\n ia.close()\n ia.open(refimg)\n # default of scale= -1.0 autoscales the PSF to have unit area, which preserves \"flux\" in units of the input map\n # scale=1.0 sets the PSF to have unit *peak*, which results in flux per beam in the output \n ref_convd_im=ia.convolve(outfile=smo_ref_img,kernel=psfimg+'.TMP.regrid',overwrite=True,scale=1.0)\n ref_convd_im.setbrightnessunit('Jy/beam')\n ref_convd_im.done()\n ia.close()\n if clean_up:\n rmtables(psfimg+'.TMP.regrid')\n else:\n # consider testing and fixing the case the reference image isn't jy/pix\n ia.open(refimg) \n im2=ia.convolve2d(outfile=smo_ref_img,axes=[0,1],major=bmaj_str,minor=bmin_str,pa=bpa_str,overwrite=True)\n im2.done()\n ia.close()\n\n smo_ref_img_regridded = smo_ref_img+'.TMP.regrid'\n ia.open(smo_ref_img)\n im2=ia.regrid(csys=img_cs.torecord(),shape=img_shape,outfile=smo_ref_img_regridded,overwrite=True,axes=[0,1])\n refvals=np.squeeze(im2.getchunk())\n im2.done()\n ia.close()\n\n ia.open(smo_ref_img_regridded)\n refvals=np.squeeze(ia.getchunk())\n ia.close()\n\n # set all pixels to zero where the PB is low - to avoid NaN's\n imvals = np.where(pbvals,imvals,0.0)\n refvals = np.where(pbvals,refvals,0.0)\n #if len(bad_pb_ind) > 0:\n #imvals[bad_pb_ind] = 0.0\n #refvals[bad_pb_ind] = 0.0\n\n deltas=(imvals-refvals).flatten()\n # put both image and model values in one array to calculate Beta for F_3- \n allvals = np.array( [np.abs(imvals.flatten()),np.abs(refvals.flatten())])\n # the max of (image_pix_i,model_pix_i), in one flat array of length nixels\n maxvals = allvals.max(axis=0)\n\n # carilli definition. rosero eq1\n f_eq1 = 1.0 - np.max(np.abs(deltas))/np.max(refvals)\n f_eq2 = 1.0 - (refvals.flatten() * np.abs(deltas)).sum() / (refvals * imvals).sum()\n f_eq2b = 1.0 - (refvals.flatten() * np.abs(deltas)).sum() / (refvals * refvals).sum()\n #f_eq3 = 1.0 - (maxvals[gi] * np.abs(deltas[gi])).sum() / (maxvals[gi] * maxvals[gi]).sum()\n f_eq3 = 1.0 - (pbvals.flatten() * maxvals * np.abs(deltas)).sum() / (pbvals.flatten() * maxvals * maxvals).sum()\n\n # if an output image was requested, and a pbimg was given; make one.\n if ((len(outfile)>0) & (len(pbimg)>0)):\n weightfile= 'mypbweight.TMP.im'\n rmtables(weightfile)\n immath(imagename=[pbimg],mode='evalexpr',expr='ceil(IM0/max(IM0) - '+str(pb_thresh)+')',outfile=weightfile)\n betafile = 'mybeta.TMP.im'\n rmtables(betafile)\n immath(imagename=[inimg,smo_ref_img_regridded],mode='evalexpr',expr='iif(abs(IM0) > abs(IM1),abs(IM0),abs(IM1))',outfile=betafile)\n # 19sep19 - change to the actual F_3 contrib ie put abs() back in\n rmtables(outfile)\n print(\" Writing fidelity error image: \"+outfile)\n immath(imagename=[inimg,smo_ref_img_regridded,weightfile,betafile],expr='IM3*IM2*abs(IM0-IM1)/sum(IM3*IM3*IM2)',outfile=outfile)\n # 19sep19 - add fractional error (rel to beta) to output\n rmtables(outfile+'.frac')\n print(\" Writing fractional error image: \"+outfile+'.frac')\n immath(imagename=[inimg,smo_ref_img_regridded,weightfile,betafile],expr='IM2*(IM0-IM1)/IM3',outfile=outfile+'.frac')\n if clean_up:\n rmtables(weightfile)\n rmtables(betafile)\n\n # pearson correlation coefficient evaluated above beta = 1% peak reference image\n gi=np.where( np.abs(maxvals) > 0.01 * np.abs(refvals.max()) )\n ii = imvals.flatten()\n mm = refvals.flatten()\n mm -= mm.min()\n # (x-mean(x)) * (y-mean(y)) / sigma_x / sigma_y\n cc = (ii[gi] - ii[gi].mean()) * (mm[gi] - mm[gi].mean()) / (np.std(ii[gi]) * np.std(mm[gi]))\n #cc = (ii[gi] - ii[gi].mean()) * (mm[gi] - mm[gi].mean()) / (np.std(mm[gi]))**2\n corco = cc.sum() / cc.shape[0]\n\n fa = np.abs(mm) / np.abs(mm - ii)\n fa_0p1 = np.median( fa[ (np.abs(ii) > 1e-3 * mm.max()) | (np.abs(mm) > 1e-3 * mm.max()) ])\n fa_1 = np.median( fa[ (np.abs(ii) > 1e-2 * mm.max()) | (np.abs(mm) > 1e-2 * mm.max()) ])\n fa_3 = np.median( fa[ (np.abs(ii) > 3e-2 * mm.max()) | (np.abs(mm) > 3e-2 * mm.max()) ])\n fa_10 = np.median( fa[ (np.abs(ii) > 1e-1 * mm.max()) | (np.abs(mm) > 1e-1 * mm.max()) ] )\n\n #gi2 = (np.abs(ii) > 1e-3 * mm.max()) | (np.abs(mm) > 1e-3 * mm.max()) \n\n print(\"*************************************\")\n print('image: ',inimg,'reference image:',refimg)\n print(\"Eq1 / Eq2 / Eq2b / Eq3 / corrCoeff \")\n print(f_eq1, f_eq2, f_eq2b, f_eq3,corco)\n print(' ALMA (A_0.1%, A_1%, A_3%, A_10%): ',fa_0p1,fa_1,fa_3,fa_10)\n print(\"*************************************\")\n\n fidelity_results = {'f1': f_eq1, 'f2': f_eq2, 'f2b': f_eq2b, 'f3': f_eq3, 'falma': [fa_0p1, fa_1, fa_3, fa_10]}\n\n if clean_up:\n rmtables(smo_ref_img)\n rmtables(smo_ref_img_regridded)\n\n return fidelity_results", "def f_UPPS_pc(v, P_0, r_f, d, s, T, wealth, phi, n_s, n_o, K):\n W_T = f_W_T_pc(v, P_0, r_f, d, s, T, wealth, phi, n_s, n_o, K)\n value = pow(W_T, -gamma) * f_W_T_to_P_T_pc(v, P_0, r_f, d, s, T, wealth, phi, n_s, n_o, K) * f_P_T_to_P_0(v, r_f, d, s, T)\n return value", "def _get_single_PSF(SCA, bandpass, SCA_pos, pupil_bin,\n n_waves, extra_aberrations, wavelength,\n pupil_plane_type, gsparams):\n from .. import OpticalPSF, ChromaticOpticalPSF\n from . import diameter\n from ..bandpass import Bandpass\n from .roman_bandpass import getBandpasses\n\n if wavelength is None:\n wave = zemax_wavelength\n elif isinstance(wavelength, Bandpass):\n wave = wavelength = wavelength.effective_wavelength\n else:\n wave = wavelength\n\n # All parameters relevant to the aperture. We may be able to use a cached version.\n aper = _make_aperture(SCA, pupil_plane_type, pupil_bin, wave, gsparams)\n\n # Start reading in the aberrations for that SCA\n aberrations, x_pos, y_pos = _read_aberrations(SCA)\n # Do bilinear interpolation, unless we're exactly at the center (default).\n use_aberrations = _interp_aberrations_bilinear(aberrations, x_pos, y_pos, SCA_pos)\n\n if extra_aberrations is not None:\n use_aberrations[:len(extra_aberrations)] += extra_aberrations\n # We don't want to use piston, tip, or tilt aberrations. The former doesn't affect the\n # appearance of the PSF, and the latter cause centroid shifts. So, we set the first 4\n # numbers (corresponding to a place-holder, piston, tip, and tilt) to zero.\n use_aberrations[0:4] = 0.\n\n # Now set up the PSF, including the option to interpolate over waves\n if wavelength is None:\n PSF = ChromaticOpticalPSF(lam=zemax_wavelength,\n diam=diameter, aberrations=use_aberrations,\n aper=aper, gsparams=gsparams)\n if n_waves is not None:\n # To decide the range of wavelengths to use, check the bandpass.\n bp_dict = getBandpasses()\n bp = bp_dict[bandpass]\n PSF = PSF.interpolate(waves=np.linspace(bp.blue_limit, bp.red_limit, n_waves),\n oversample_fac=1.5)\n else:\n tmp_aberrations = use_aberrations * zemax_wavelength / wavelength\n PSF = OpticalPSF(lam=wavelength, diam=diameter,\n aberrations=tmp_aberrations,\n aper=aper, gsparams=gsparams)\n\n return PSF", "def test_nuke_psfs():\n # Without multiprocessing\n mt.nuke_psfs(mprocessing=False)\n\n # With multiprocessing\n mt.nuke_psfs()", "def calculate_gmpe(src_keys, station, output_file, rrups, gmpe_group_name):\n gmpe_group = gmpe_config.GMPES[gmpe_group_name]\n origin = (src_keys['lon_top_center'], src_keys['lat_top_center'])\n dims = (src_keys['fault_length'], src_keys['dlen'],\n src_keys['fault_width'], src_keys['dwid'],\n src_keys['depth_to_top'])\n mech = (src_keys['strike'], src_keys['dip'], src_keys['rake'])\n\n # Station location\n site_geom = [float(station.lon), float(station.lat), 0.0]\n (fault_trace1, upper_seis_depth,\n lower_seis_depth, ave_dip,\n dummy1, dummy2) = putils.FaultTraceGen(origin, dims, mech)\n rjb, rrup, rx = putils.DistanceToSimpleFaultSurface(site_geom,\n fault_trace1,\n upper_seis_depth,\n lower_seis_depth,\n ave_dip)\n\n print \"station: %s, Rrup: %f\" % (station.scode, rrup)\n rrups.append(rrup)\n\n vs30 = 1000\n z10 = None # Let PyNGA calculate it\n z25 = None # Let PyNGA calculate it\n\n # Compute PSA for this stations\n station_median = []\n for period in gmpe_group[\"periods\"]:\n period_medians = []\n for nga_model in gmpe_group[\"models\"]:\n median = gmpe_config.calculate_gmpe(gmpe_group_name,\n nga_model,\n src_keys['magnitude'],\n rjb, vs30,\n period,\n rake=src_keys['rake'],\n dip=src_keys['dip'],\n W=src_keys['fault_width'],\n Ztor=src_keys['depth_to_top'],\n Rrup=rrup, Rx=rx,\n Z10=z10, Z25=z25)\n period_medians.append(median)\n station_median.append((period, period_medians))\n\n # Create label\n file_label = \"\"\n for nga_model in gmpe_group[\"models\"]:\n file_label = \"%s %s\" % (file_label, nga_model)\n # Output data to file\n outfile = open(output_file, 'w')\n outfile.write(\"#station: %s\\n\" % (station.scode))\n outfile.write(\"#period%s\\n\" % (file_label))\n for item in station_median:\n period = item[0]\n vals = item[1]\n out_str = \"%.4f\" % (period)\n for method in vals:\n out_str = out_str + \"\\t%.6f\" % (method)\n outfile.write(\"%s\\n\" % (out_str))\n outfile.close()\n\n # Return list\n return station_median", "def testFitProfile(self):\n image = self.psf.computeKernelImage()\n msf = self.Algorithm.initializeResult(self.ctrl)\n self.Algorithm.fitMoments(msf, self.ctrl, image)\n prev = lsst.shapelet.MultiShapeletFunction(msf)\n self.Algorithm.fitProfile(msf, self.ctrl, image)\n return msf\n\n def getEllipticity(m, c):\n s = lsst.afw.geom.ellipses.SeparableDistortionDeterminantRadius(\n m.getComponents()[c].getEllipse().getCore()\n )\n return numpy.array([s.getE1(), s.getE2()])\n self.assertFloatsAlmostEqual(getEllipticity(prev, 0), getEllipticity(msf, 0), rtol=1E-13)\n self.assertFloatsAlmostEqual(getEllipticity(prev, 1), getEllipticity(msf, 1), rtol=1E-13)\n\n def computeChiSq(m):\n data, model = self.makeImages(m)\n return numpy.sum((data.getArray() - model.getArray())**2)\n bestChiSq = computeChiSq(msf)\n self.assertLessEqual(bestChiSq, computeChiSq(prev))\n step = 1E-4\n for component in msf.getComponents():\n # 0th-order amplitude perturbation\n original = component.getCoefficients()[0]\n component.getCoefficients()[0] = original + step\n self.assertLessEqual(bestChiSq, computeChiSq(msf))\n component.getCoefficients()[0] = original - step\n self.assertLessEqual(bestChiSq, computeChiSq(msf))\n component.getCoefficients()[0] = original\n # Radius perturbation\n original = component.getEllipse()\n component.getEllipse().getCore().scale(1.0 + step)\n self.assertLessEqual(bestChiSq, computeChiSq(msf))\n component.setEllipse(original)\n component.getEllipse().getCore().scale(1.0 - step)\n self.assertLessEqual(bestChiSq, computeChiSq(msf))\n component.setEllipse(original)\n return msf", "def compute_PSSM_self_information(p):\n return -sp.sum(p*sp.log(p))", "def _compute_mpe_state(self, counts):", "def ppf(self, q):\n # A shortcut to the frozen distribution ppf as provided by scipy.\n \n return self.distr.ppf(q)", "def calc_prob_local(self, *args):\n return 0", "def getpval(teststat, statlist):\n \n propzero = 0\n bootvals = []\n for val in statlist:\n if val == 0:\n propzero += 1\n else:\n bootvals.append(val)\n \n propzero = float(propzero) / len(statlist)\n \n shapeinit = getstartingshape(statlist)\n \n shape = optimiselike(getlikeweibull, bootvals, shapeinit)\n scale = (sum(bootvals) / len(bootvals)) / scipy.special.gamma(1 + 1/shape)\n \n pvalue = math.exp(- (teststat/scale) ** shape)\n \n return pvalue * (1 - propzero)", "def do_pnp(pts3d_for_pnp, pts2d_for_pnp, K, iterations=200, reprojThresh=5):\n list_pts3d_for_pnp = pts3d_for_pnp\n list_pts2d_for_pnp = pts2d_for_pnp\n pts3d_for_pnp = np.array(pts3d_for_pnp)\n # pts2d_for_pnp = np.expand_dims(np.squeeze(np.array(pts2d_for_pnp)), axis=1)\n # print(pts3d_for_pnp)\n # print(pts2d_for_pnp.shape)\n num_pts = len(pts3d_for_pnp)\n print(num_pts)\n highest_inliers = 0\n for j in range(iterations):\n pt_idxs = np.random.choice(num_pts, 6, replace=False)\n pts3 = np.array([pts3d_for_pnp[pt_idxs[i]] for i in range(len(pt_idxs))])\n # print(\"pts\",pts3)\n pts2 = np.array([pts2d_for_pnp[pt_idxs[i]] for i in range(len(pt_idxs))])\n _, rvec, tvec = cv2.solvePnP(pts3, pts2, K, distCoeffs=np.array([]), flags=cv2.SOLVEPNP_ITERATIVE)\n R, _ = cv2.Rodrigues(rvec)\n pnp_errors, projpts, avg_err, perc_inliers = test_reproj_pnp_points(list_pts3d_for_pnp, list_pts2d_for_pnp, R, tvec, K, rep_thresh=reprojThresh)\n if highest_inliers < perc_inliers:\n highest_inliers = perc_inliers\n best_R = R\n best_tvec = tvec\n R = best_R\n tvec = best_tvec\n # print('rvec:', rvec,'\\n\\ntvec:', tvec)\n print(\"avg\",avg_err)\n print(\"inlier\",perc_inliers)\n return R, tvec", "def posterior_predictive_pvals(self):\n pvals = {}\n for gene in self.ppc:\n z_true = self.sample[gene]\n z = st.laplace.rvs(*st.laplace.fit(self.ppc[gene]), size=100_000)\n # Rule of thumb: for 100,000 samples, report p-values to the thousands place\n # Add pseudocount for instances where outlier is more extreme than every other sample\n pvals[gene] = round((np.sum(z_true < z) + 1) / (len(z) + 1), 3)\n self.ppp = pd.DataFrame(pvals.items(), columns=[\"Gene\", \"Pval\"]).sort_values(\n \"Pval\"\n )\n self.ppp = self.ppp.set_index(\"Gene\", drop=True)", "def test_AFQ_pft():\n _, bids_path, sub_path = get_temp_hardi()\n\n bundle_names = [\"SLF\", \"ARC\", \"CST\", \"FP\"]\n\n f_pve_csf, f_pve_gm, f_pve_wm = get_fnames('stanford_pve_maps')\n os.rename(f_pve_wm, op.join(sub_path, \"sub-01_ses-01_WMprobseg.nii.gz\"))\n os.rename(f_pve_gm, op.join(sub_path, \"sub-01_ses-01_GMprobseg.nii.gz\"))\n os.rename(f_pve_csf, op.join(sub_path, \"sub-01_ses-01_CSFprobseg.nii.gz\"))\n\n stop_mask = PFTMask(\n MaskFile(\"WMprobseg\"),\n MaskFile(\"GMprobseg\"),\n MaskFile(\"CSFprobseg\"))\n\n my_afq = api.AFQ(\n bids_path,\n dmriprep='vistasoft',\n bundle_info=bundle_names,\n tracking_params={\n \"stop_mask\": stop_mask,\n \"stop_threshold\": \"CMC\",\n \"tracker\": \"pft\"\n })\n my_afq.export_streamlines()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plot an image of the PSF
def plot_PSF(self, coef, wave_idx): PSF, strehl = self.compute_PSF(coef, wave_idx) plt.figure() plt.imshow(PSF) plt.title('Strehl: %.3f' %strehl) plt.colorbar() plt.clim(vmin=0, vmax=1)
[ "def plot_prodata_psf(self,font_size=28,img_name='prodata_psf.pdf',img_id=0):\n rawimage = self.raw_image\n dataimage = self.data\n len_mask = self.lens_mask\n plu_mask_out = self.plu_mask\n\n fig, (ax1, ax2, ax3, ax4,ax5) = plt.subplots(1, 5, figsize=(19, 10))\n ax1.imshow((rawimage), origin='lower', cmap=\"gist_heat\")\n ax1.set_title('Original Image', fontsize=font_size)\n ax1.text(rawimage.shape[0] * 0.55, rawimage.shape[0] * 0.8, 'ID='+repr(img_id), size=12, color='white',\n weight=\"bold\")\n ax1.text(rawimage.shape[0] * 0.2, rawimage.shape[0] * 0.05, 'observation', size=20, color='white', weight=\"bold\")\n ax1.axis('off')\n #\n ax2.imshow((dataimage), origin='lower', cmap=\"gist_heat\")\n ax2.set_title('Image Data', fontsize=font_size)\n ax2.text(dataimage.shape[0] * 0.2, dataimage.shape[0] * 0.05, 'image data', size=20, color='white', weight=\"bold\")\n ax2.axis('off')\n #\n ax3.imshow(len_mask, origin='lower')\n ax3.set_title('Lens light', fontsize=font_size)\n ax3.axis('off')\n #\n ax4.imshow(plu_mask_out, origin='lower')\n ax4.set_title('Mask', fontsize=font_size)\n ax4.axis('off')\n#\n psf=self.psf\n ax5.imshow(np.log10(psf), origin='lower', cmap=\"gist_heat\")\n ax5.set_title('lg(PSF)', fontsize=font_size)\n ax5.axis('off')\n\n plt.show()\n fig.savefig(img_name)\n return 0", "def display_image(image):\n plt.figure()\n plt.imshow(image)\n plt.show()", "def plot_piv(base_f_name, piv_results):\n plt.plot(piv_results[:,0], piv_results[:,1], 'bs')\n plt.title('PIV results')\n plt.xlabel('Y position (pixel)')\n plt.ylabel('Displacement (pixel)')\n out_name = base_f_name + '.png'\n plt.savefig(out_name)\n print(\"Wrote file: {}\".format(out_name))", "def plot(self):\n self.fig = plt.figure('Black hole')\n self.fig.clf() #clear the graph to avoir superposing data from the same set (can be deactivated if need to superpose)\n self.ax = plt.subplot()\n\n if self.img2 is not None:\n self.ax.imshow(self.img2)\n else:\n print(\"No black hole deformation in the memory, displayed the original image instead.\")\n self.ax.imshow(self.img_debut)\n\n self.ax.set_title(\"scrool to zoom in or out \\nright click to add an offset in the background \\nleft click to refresh image \\n close the option windows to stop the program\")\n self.fig.canvas.mpl_connect('scroll_event', self.onscroll)\n self.fig.canvas.mpl_connect('button_press_event', self.onclick)\n self.fig.canvas.mpl_connect('axes_leave_event', self.disconnect)\n self.fig.canvas.mpl_connect('axes_enter_event', self.connect)\n\n self.draw()", "def Plot(self):\n\n ### Create the path names ###\n folder_string = self.params.folder+\"/plots/\"\n u_string = self.params.folder+\"/plots/u.pdf\"\n p_string = self.params.folder+\"/plots/p.pdf\"\n\n ### Check if folder exists ###\n if not os.path.exists(folder_string): os.makedirs(folder_string)\n\n ### Plot the x component of velocity ###\n plot(self.u_next[0],title=\"Velocity in the x Direction\")\n plt.savefig(u_string)\n plt.figure()\n\n ### Plot the pressure ###\n plot(self.p_next,title=\"Pressure\")\n plt.savefig(p_string)\n plt.show()", "def plot(imagePath):\r\n image = mpimg.imread(imagePath)\r\n flatImage = np.reshape(image, (image.shape[0] * image.shape[1], 3))\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111, projection='3d')\r\n colors = [tuple(image[r, c,:] / 255) for r in range(image.shape[0]) for c in range(image.shape[1])]\r\n ax.scatter(\r\n flatImage[:,0],\r\n flatImage[:,1],\r\n flatImage[:,2], c=colors)\r\n ax.set_title(os.path.basename(imagePath), y=1.1)\r\n ax.set_xlabel('Red')\r\n ax.xaxis.label.set_color('red')\r\n ax.set_ylabel('Green')\r\n ax.yaxis.label.set_color('green')\r\n ax.set_zlabel('Blue')\r\n ax.zaxis.label.set_color('blue')\r\n\r\n imageaxis = fig.add_axes([-0.15, 0.7, 0.35, 0.3], anchor = \"NE\")\r\n # fig.figimage(im, fig.bbox.xmax - width, height)\r\n imageaxis.imshow(image)\r\n imageaxis.axis('off')\r\n\r\n plt.show()", "def display_image(X):\n\n\tim = X.reshape(28, 28)\n\ttemp = plt.imshow(im)\n\tplt.show()", "def plot_image(self, image_id):\n image = self.images[image_id]\n image = image.reshape(self.image_shape)\n plt.imshow(image)", "def psf(imaging, grid=None, positions=None, include=None, plotter=None):\n\n plotter.plot_array(\n array=imaging.psf, include_origin=include.origin, grid=grid, positions=positions\n )", "def plot(self):\n self.fig = plt.figure('black hole')\n self.fig.clf() #clear the graph to avoir superposing data from the same set (can be deactivated if need to superpose)\n self.ax = plt.subplot()\n\n if self.img2 is not None:\n self.ax.imshow(self.img2)\n else:\n print(\"No black hole deformation in the memory, displayed the original image instead.\")\n self.ax.imshow(self.img_debut)\n\n self.fig.canvas.set_window_title('Black hole')\n self.ax.set_title(\"scrool to zoom in or out \\nright click to add an offset in the background \\nleft click to refresh image \\n close the option windows to stop the program\")\n self.fig.canvas.mpl_connect('scroll_event', self.onscroll)\n self.fig.canvas.mpl_connect('button_press_event', self.onclick)\n self.fig.canvas.mpl_connect('axes_leave_event', self.disconnect)\n self.fig.canvas.mpl_connect('axes_enter_event', self.connect)\n\n self.draw()", "def display_ns_psf(image, vlim=(), fsize=(8, 8), interp='nearest', title='',\n cmap='gray', extent=None, savefile=None, cb=False):\n\n # Display PSF (oversampled and detector levels)\n fig, ax = plt.subplots(figsize=fsize)\n ax.set_title(title)\n ax.set_aspect('equal')\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_ylim(0.0, np.shape(image)[0])\n\n if vlim == ():\n vlim = (image.min(), image.max())\n \n if extent is not None: \n cax = ax.imshow(image, cmap=cmap, interpolation=interp, vmin=vlim[0], \\\n extent=extent-.5, vmax=vlim[1])\n else:\n cax = ax.imshow(image, cmap=cmap, interpolation=interp, vmin=vlim[0], vmax=vlim[1]) \n \n if cb: fig.colorbar(cax, ax=ax, shrink=0.8)\n\n # See plots when not in Notebook environment\n plt.show()\n\n if savefile is not None:\n fig.savefig(savefile)", "def draw_image(self):\n \n pixel_array = self.imageprepare(self.image_path)\n newArr = self.reshape_pixel_array(pixel_array)\n plt.imshow(newArr, interpolation='nearest')\n plt.savefig('MNIST_IMAGE.png')#save MNIST image\n plt.show()#Show / plot that image", "def plot_fr_and_spikes(self, t):\n plt.figure(figsize=(10, 8))\n\n plt.subplot(2, 2, 1)\n self.plot_base_image()\n\n plt.subplot(2, 2, 2)\n self.plot_firing_rates(t, mode='ON')\n plt.title('Retinal Image')\n\n # Spikes\n ax = plt.subplot(2, 2, 3)\n self.plot_spikes(ax, t, mode='ON', moving_average=True)\n\n ax = plt.subplot(2, 2, 4)\n self.plot_spikes(ax, t, mode='OFF', moving_average=True)", "def plot_image_results(res_image, dpc, unit='AU', log=False, p=0.5):\n image = res_image.image\n image_conv = res_image.image_conv\n\n image[image <= 0] = 0\n image_conv[image_conv <= 0] = 0\n\n im_rot = np.rot90(image)\n im_rot[im_rot <= 0] = 1e-20\n im_conv_rot = np.rot90(image_conv)\n im_conv_rot[im_conv_rot <= 0] = 1e-20\n\n # Extract the integrated flux\n tflux = image.sum()\n wl = res_image.wl\n\n fov_im = res_image.fov / 1000.\n\n # Convert to good spatial units\n if unit == \"AU\":\n xlabel, ylabel = \"X\", \"Y\"\n fact = dpc\n elif unit == \"cm\":\n fact = dpc * au\n xlabel = \"X\"\n ylabel = \"Y\"\n elif unit == \"mas\":\n xlabel = \"RA offset\"\n ylabel = \"DEC offset\"\n fact = 1000.\n else:\n unit = \"arcsec\"\n xlabel = \"RA offset\"\n ylabel = \"DEC offset\"\n fact = 1\n\n extent = np.array([fov_im / 2.0, -fov_im / 2.0, -\n fov_im / 2.0, fov_im / 2.0]) * fact\n\n vmax = image.max()\n vmin = vmax / 1.e4\n\n norm = PowerNorm(p)\n if log:\n norm = LogNorm()\n\n fig = plt.figure(figsize=(13, 6))\n plt.subplot(1, 2, 1)\n ax = plt.gca()\n ax.set_title(r\"Image ($\\lambda$ = %2.2f $\\mu m$), F$_{tot}$ = %2.4f Jy\" % (wl, tflux),\n fontsize=14,\n color=\"Navy\")\n im_cbar = ax.imshow(im_rot, norm=norm, cmap=\"afmhot\", vmin=vmin, vmax=vmax,\n origin=\"upper\", extent=extent)\n # divider = make_axes_locatable(ax)\n # cax = divider.append_axes(\"right\", size=\"3%\", pad=0.05)\n ax.set_xlabel(\"%s [%s]\" % (xlabel, unit), fontsize=12, color=\"gray\")\n ax.set_ylabel(\"%s [%s]\" % (ylabel, unit), fontsize=12, color=\"gray\")\n # clb = plt.colorbar(im_cbar, cax=cax)\n # clb.set_label('Flux density [Jy]', fontsize = 10, color = 'k')\n plt.subplot(1, 2, 2)\n ax = plt.gca()\n ax.set_title(\n r\"Image convolved ($\\theta$ = $\\lambda$/2B, B = %2.0f m)\" % (res_image.Bmax),\n fontsize=14,\n color=\"Navy\")\n im_cbar = ax.imshow(im_conv_rot, norm=norm, cmap=\"afmhot\", vmin=vmin, vmax=vmax,\n origin=\"upper\", extent=extent)\n divider = make_axes_locatable(ax)\n ax.set_yticks([])\n ax.set_xlabel(\"%s [%s]\" % (xlabel, unit), fontsize=12, color=\"gray\")\n cax = divider.append_axes(\"right\", size=\"3%\", pad=0.05)\n clb = plt.colorbar(im_cbar, cax=cax)\n clb.set_label(\"Flux density [Jy]\")\n plt.tight_layout()\n plt.show(block=False)\n return fig", "def plot_it(image_array: np.array) -> None:\n\tplt.rcParams[\"figure.figsize\"] = (20, 15)\n\n\tfig, ax = plt.subplots(1)\n\tax.imshow(image_array)\n\t# plt.savefig(\"./image_\"+str(image_counter)+\".png\")\n\tplt.show()\n\treturn", "def showImage(self, filename):\n image = misc.imread(filename)\n plt.imshow(image)\n plt.show()", "def plot_sample(x):\n plt.imshow(x[:,:,0])\n plt.title(\"gasf\")\n plt.colorbar()\n plt.show()\n\n plt.imshow(x[:,:,1])\n plt.title(\"gadf\")\n plt.colorbar()\n plt.show()\n\n plt.imshow(x[:,:,2])\n plt.title(\"mtf\")\n plt.colorbar()\n plt.show()", "def show_plot(img, title):\n plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n plt.title(\"Hand Number: \" + title)\n plt.show()", "def draw_image(ax, image):\n ax.imshow(image)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given an oversampled PSF (typically 0.51.0 mas spaxels), it calculates the Ensquared Energy of the central spaxel in a new_scale (4, 10, 20 mas) It selects a window of size new_scale and adds up the Intensity of those pixels
def ensquared_one_pix(array, pix_scale, new_scale=40, plot=True): n = int(new_scale // pix_scale) minPix, maxPix = (pix + 1 - n) // 2, (pix + 1 + n) // 2 ens = array[minPix:maxPix, minPix:maxPix] # print(ens.shape) energy = np.sum(ens) if plot: mapp = 'viridis' f, (ax1, ax2) = plt.subplots(1, 2) ax1 = plt.subplot(1, 2, 1) square = Rectangle((minPix-0.5, minPix-0.5), n, n, linestyle='--', fill=None, color='white') ax1.add_patch(square) img1 = ax1.imshow(array, cmap=mapp) ax1.set_title('%.1f mas pixels' % (pix_scale)) img1.set_clim(0, 1) plt.colorbar(img1, ax=ax1, orientation='horizontal') ax2 = plt.subplot(1, 2, 2) img2 = ax2.imshow(ens, cmap=mapp) ax2.set_title('%d mas window' %new_scale) img1.set_clim(0, 1) plt.colorbar(img2, ax=ax2, orientation='horizontal') return energy
[ "def superPSFResamplewInterp(subPSFName,xCen,yCen,dE=[0.0,0.0,0.0],subSampFactor=105,useMemory='n',useHeaderKernel=False,star=False):\r\n\r\n print xCen,yCen\r\n\r\n trueSubFactor=subSampFactor/5 ###start with 5x supersampled PSFs!\r\n halfSSF=subSampFactor/2 ###will be rounded. eg. 105/2 = 52\r\n\r\n if useMemory == 'n' or subPSFName not in psfsInMemory:\r\n print \"Getting \"+subPSFName+\" file from hd\"\r\n imhan=pyfits.open(subPSFName)\r\n data=imhan[0].data\r\n items=imhan[0].header.items()\r\n header=[]\r\n for iii in range(len(items)):\r\n try:header.append(items[iii][0]+' '+items[iii][1])\r\n except:pass\r\n imhan.close()\r\n\r\n psfsInMemory[subPSFName]=data*1.0\r\n psfHeadersInMemory[subPSFName]=header[:]\r\n else:\r\n data=psfsInMemory[subPSFName]\r\n header=psfHeadersInMemory[subPSFName]\r\n\r\n (Y,X)=data.shape\r\n xVals=num.linspace(0,X,X*trueSubFactor)\r\n yVals=num.linspace(0,Y,Y*trueSubFactor)\r\n\r\n resampData=data*0.0\r\n\r\n\r\n if star:\r\n starShiftsX=[]\r\n starShiftsY=[]\r\n for i in range(len(star)):\r\n starShiftsX.append(star[i][0])\r\n starShiftsY.append(star[i][1])\r\n else:\r\n starShiftsX=[0.]\r\n starShiftsY=[0.]\r\n\r\n\r\n for ss in range(len(starShiftsX)):\r\n sx=starShiftsX[ss]\r\n sy=starShiftsY[ss]\r\n\r\n offX=(xCen-int(xCen))*5.\r\n offY=(yCen-int(yCen))*5.\r\n\r\n print offX,offY,sx,sy\r\n\r\n xPoints=num.arange(len(data)) + offX + sx*5.\r\n yPoints=num.arange(len(data)) + offY + sy*5.\r\n\r\n f=interp.RectBivariateSpline(xPoints,yPoints,data)#,bbox=[-10*subSampFactor,(X+10)*subSampFactor,-10*subSampFactor,(Y+10)*subSampFactor])\r\n\r\n #break below into sub forloops because we don't have the memory to brute force it.\r\n for ii in range(trueSubFactor):\r\n xv=xVals[ii::trueSubFactor]\r\n for jj in range(trueSubFactor):\r\n yv=yVals[jj::trueSubFactor]\r\n a=f(xv,yv)\r\n a=num.clip(a,0.0,num.max(a))\r\n resampData+=a\r\n resampData/=num.sum(resampData)\r\n\r\n offX=int((xCen-int(xCen))*5)\r\n offY=int((yCen-int(yCen))*5)\r\n\r\n outData=num.zeros([Y/5,X/5],dtype=num.float32)\r\n for jjjjj in range(Y/5):\r\n for iiiii in range(X/5):\r\n xMin=max(0,iiiii*5-offX-2)\r\n xMax=min(X,iiiii*5-offX+3)\r\n yMin=max(0,jjjjj*5-offY-2)\r\n yMax=min(Y,jjjjj*5-offY+3)\r\n\r\n npix=(xMax-xMin)*(yMax-yMin)\r\n if npix<=0: continue\r\n outData[jjjjj][iiiii]+=num.sum(resampData[yMin:yMax,xMin:xMax])*25./npix\r\n #this should be commented out for speed!\r\n #if outData[jjjjj][iiiii]>1e14:\r\n # print data[yMin:yMax,xMin:xMax]\r\n # print xMin,xMax,yMin,yMax,npix,y,x,npix\r\n # sys.exit()\r\n \r\n #print num.max(outData)\r\n #print num.sum(outData)\r\n #sys.exit()\r\n #now convolve the entire thing with the global Kernel\r\n if useHeaderKernel or dE==[0.0,0.0,0.0]:\r\n kernel=[]\r\n for iiiii in range(len(header)-3,len(header)):\r\n #print header[iiiii]\r\n s=str(header[iiiii]).split()\r\n kernel.append([float(s[1]),float(s[2]),float(s[3])])\r\n kernel=num.array(kernel)\r\n print \"Using header kernel for CTE.\"\r\n else:\r\n kernel=GU.getKernel(dE[0],dE[1],dE[2])\r\n convData=convolve2d(outData,kernel)\r\n return convData", "def synaptic_scaling(self):\n self.W_ee.ss()\n self.W_ei.ss() # this was also found in the EM study\n if self.W_eu.c.has_key('eta_stdp') and self.W_eu.c.eta_stdp>0:\n self.W_eu.ss()", "def superPSFResample(subPSFName,xCen,yCen,dE=[0.0,0.0,0.0],subSampFactor=105,useMemory='n',useHeaderKernel=False,star=False):\r\n trueSubFactor=subSampFactor/5 ###start with 5x supersampled PSFs!\r\n halfSSF=subSampFactor/2 ###will be rounded. eg. 105/2 = 52\r\n \r\n if useMemory == 'n' or subPSFName not in psfsInMemory:\r\n print \"Getting \"+subPSFName+\" file from hd\"\r\n imhan=pyfits.open(subPSFName)\r\n data=imhan[0].data\r\n items=imhan[0].header.items()\r\n header=[]\r\n for iii in range(len(items)):\r\n try:header.append(items[iii][0]+' '+items[iii][1])\r\n except:pass\r\n imhan.close()\r\n \r\n psfsInMemory[subPSFName]=data*1.0\r\n psfHeadersInMemory[subPSFName]=header[:]\r\n else:\r\n data=psfsInMemory[subPSFName]\r\n header=psfHeadersInMemory[subPSFName]\r\n \r\n \r\n #subsample the data\r\n data=num.repeat(num.repeat(data,trueSubFactor,axis=0),trueSubFactor,axis=1)\r\n (aa,bb)=data.shape\r\n\r\n \r\n #add multiple psfs to account for trailing.\r\n if star:\r\n starShiftsX=[]\r\n starShiftsY=[]\r\n for i in range(len(star)):\r\n starShiftsX.append(int(subSampFactor*star[i][0]))\r\n starShiftsY.append(int(subSampFactor*star[i][1]))\r\n else:\r\n starShiftsX=[0.]\r\n starShiftsY=[0.]\r\n \r\n \r\n if len(starShiftsX)>1:\r\n origData=data*1.0\r\n data*=0.0\r\n for i in range(len(starShiftsX)):\r\n if starShiftsY[i]>=0:\r\n ymin=starShiftsY[i]\r\n ymax=aa\r\n Ymin=0\r\n Ymax=aa-starShiftsY[i]\r\n else:\r\n ymin=0\r\n ymax=aa-abs(starShiftsY[i])\r\n Ymin=abs(starShiftsY[i])\r\n Ymax=aa\r\n if starShiftsX[i]>=0:\r\n xmin=starShiftsX[i]\r\n xmax=bb\r\n Xmin=0\r\n Xmax=bb-starShiftsX[i]\r\n else:\r\n xmin=0\r\n xmax=bb-abs(starShiftsX[i])\r\n Xmin=abs(starShiftsX[i])\r\n Xmax=bb\r\n \r\n #print ymin,ymax,xmin,xmax,Ymin,Ymax,Xmin,Xmax,starShiftsX,starShiftsY\r\n data[ymin:ymax,xmin:xmax]+=origData[Ymin:Ymax,Xmin:Xmax]\r\n data/=num.sum(data)\r\n\r\n\r\n #trim excess edge pixels to make sure we are commensurate \r\n #with the subSampFactor\r\n trim_y=aa%subSampFactor\r\n trim_x=bb%subSampFactor\r\n\r\n trim_bottom=int(round(float(trim_y)/2))\r\n trim_top=trim_y-trim_bottom\r\n trim_left=int(round(float(trim_x)/2))\r\n trim_right=trim_x-trim_left\r\n\r\n data=data[trim_bottom:aa-trim_top,trim_left:bb-trim_right]\r\n (aa,bb)=data.shape\r\n\r\n\r\n ####below will add a point pixel with value 1\r\n ####Just for testing purposes.\r\n #(max_i,max_j)=num.unravel_index(data.argmax(),data.shape)\r\n #data*=0.0\r\n #data[max_i-subSampFactor/2:max_i+subSampFactor/2+1,max_j-subSampFactor/2:max_j+subSampFactor/2+1]=1./(subSampFactor*subSampFactor)\r\n ####\r\n \r\n\r\n #now trim the edges to offset the subsampled array\r\n offX=int(round(divmod(xCen,1)[1]*subSampFactor))\r\n offY=int(round(divmod(yCen,1)[1]*subSampFactor))\r\n\r\n\r\n trim_left=subSampFactor-offX\r\n trim_right=offX\r\n trim_bottom=subSampFactor-offY\r\n trim_top=offY\r\n\r\n data=data[trim_bottom:aa-trim_top,trim_left:bb-trim_right]\r\n (aa,bb)=data.shape\r\n\r\n\r\n #now do the binning to get to the desired shape\r\n data_view=data.reshape(aa//subSampFactor, subSampFactor , bb//subSampFactor, subSampFactor)\r\n\r\n trimmed_data=num.sum(data_view,axis=(3,1))\r\n trimmed_data/=num.sum(trimmed_data)\r\n\r\n\r\n\r\n #now convolve the entire thing with the global Kernel\r\n if useHeaderKernel or dE==[0.0,0.0,0.0]:\r\n kernel=[]\r\n for iiiii in range(len(header)-3,len(header)):\r\n #print header[iiiii]\r\n s=str(header[iiiii]).split()\r\n kernel.append([float(s[1]),float(s[2]),float(s[3])])\r\n kernel=num.array(kernel)\r\n print \"Using header kernel for CTE.\"\r\n else:\r\n kernel=GU.getKernel(dE[0],dE[1],dE[2])\r\n convData=convolve2d(trimmed_data,kernel) \r\n return convData", "def createIntegratedPsf(self):\n\n (wavelengths, weights) = self.filter\n for i in range(len(wavelengths)):\n\n wavelength = wavelengths[i]\n weight = weights[i]\n self.convertToOpd(wavelength) # creates self.opd\n opd = self.embedOpd()\n zf = numpy.fft.fft2(opd)\n del opd\n # Compute the amplitude squared.\n # (psf is not really the point spread function yet)\n psf = np.conjugate(zf)\n # psf will now be the point spread function, but still complex\n np.multiply(psf, zf, psf)\n del zf\n # normalize the PSF, and convert to single precision\n psf = psf.real / psf.size\n psf = psf.astype(np.float32)\n\n self.center(psf)\n\n # This describes the image scale if no resampling is done.\n cdelt_before_resampling = (wavelength * MICRONStoMETERS) / \\\n (self.D * self.oversample) * RADIANStoDEGREES\n if self.pixel_size is None:\n # we won't resample the output image\n self.cdelt = cdelt_before_resampling\n # Extract a subset.\n if self.output_size < self.npix:\n o_npix = self.output_size\n n0 = (self.npix - o_npix) // 2\n self.integrated_psf += \\\n (psf[n0:n0 + o_npix, n0:n0 + o_npix] * weight)\n else:\n self.integrated_psf += (psf * weight)\n else:\n # we'll resample to this image scale\n self.cdelt = self.pixel_size / self.oversample * ARCSECtoDEGREES\n # These three parameters are only used by mapPsf and for\n # normalizing the weight after resampling.\n self.rescale = self.cdelt / cdelt_before_resampling\n self.input_center = (self.npix + 1) // 2\n self.output_center = (self.output_size + 1) // 2\n sub_psf = np.zeros((self.output_size, self.output_size),\n dtype=np.float32)\n # Do the resampling, writing the output to sub_psf.\n ndimage.geometric_transform(psf, self.mapPsf,\n output_shape=(self.output_size, self.output_size),\n output=sub_psf, prefilter=True)\n weight = weight * self.rescale**2\n self.integrated_psf += (sub_psf * weight)\n del sub_psf\n\n if self.verbose:\n print(\"PSF for wavelength %g has been computed\" % wavelength)", "def new_scaled_energy(run, smoother=\"pol2\"):\n get_from_ccdb(run)\n endpoint_calib = ROOT.pstags().endpoint_calib\n endpoint_energy = ROOT.pstags().endpoint_energy\n fout = open(f\"new_scaled_energy.{run}\", \"w\")\n Eps_tagm = ROOT.gROOT.FindObject(\"Epair_Etagm_fit\")\n if not Eps_tagm:\n Eps_tagm = ROOT.gROOT.FindObject(\"Epair_Etagm\")\n if not Eps_tagm:\n Eps_tagm = plot_Etagm_Epair(run)[0]\n Eps_tagm.Fit(smoother)\n for func in Eps_tagm.GetListOfFunctions():\n ntagm = Eps_tagm.GetNbinsX()\n for i in range(ntagm):\n Elow = Eps_tagm.GetXaxis().GetBinLowEdge(102-i)\n Ehigh = Eps_tagm.GetXaxis().GetBinUpEdge(102-i)\n f = [(endpoint_calib - endpoint_energy + func.Eval(E)) /\n endpoint_calib for E in (Elow, Ehigh)]\n fout.write(f\"{i+1} {f[0]} {f[1]}\\n\")\n break", "def _scale_edisp(self, input_irf_file, config):\n\n # Reading the Energy parameters\n self._edisp = dict()\n self._edisp['Mlow'] = input_irf_file['ENERGY DISPERSION'].data['MIGRA_LO'][0].copy()\n self._edisp['Mhigh'] = input_irf_file['ENERGY DISPERSION'].data['MIGRA_HI'][0].copy()\n self._edisp['M'] = (self._edisp['Mlow'] + self._edisp['Mhigh']) / 2.0\n\n # -------------------------------------------\n # Scaling the Energy dependence\n\n # Constant error function\n if config['energy_scaling']['err_func_type'] == \"constant\":\n scaling_params = config['energy_scaling']['constant']['scale']\n self._edisp['Mhigh_new'] = self._edisp['Mhigh'] * (scaling_params)\n self._edisp['Mlow_new'] = self._edisp['Mlow'] * (scaling_params)\n\n # Gradients error function\n elif config['energy_scaling']['err_func_type'] == \"gradient\":\n scaling_params = config['energy_scaling']['gradient']\n self._edisp['Mhigh_new'] = self._edisp['Mhigh'] * (\n 1. + scaling_params['scale'] * gradient(scipy.log10(self._edisp['Mhigh']),\n scipy.log10(scaling_params['range_min']),\n scipy.log10(scaling_params['range_max'])) \n )\n self._edisp['Mlow_new'] = self._edisp['Mlow'] * (\n 1. + scaling_params['scale'] * gradient(scipy.log10(self._edisp['Mlow']),\n scipy.log10(scaling_params['range_min']),\n scipy.log10(scaling_params['range_max'])) \n )\n # Step error function\n elif config['energy_scaling']['err_func_type'] == \"step\":\n scaling_params = config['energy_scaling']['step']\n break_points = list(zip(scipy.log10(scaling_params['transition_pos']),\n scaling_params['transition_widths']))\n self._edisp['Mhigh_new'] = self._edisp['Mhigh']* (\n 1 + scaling_params['scale'] * step(scipy.log10(self._edisp['Mhigh']), break_points)\n )\n self._edisp['Mlow_new'] = self._edisp['Mlow']* (\n 1 + scaling_params['scale'] * step(scipy.log10(self._edisp['Mlow']), break_points)\n )\n else:\n raise ValueError(\"Edisp energy scaling: unknown scaling function type '{:s}'\"\n .format(config['energy_scaling']['err_func_type'])\n )\n # ------------------------------------------\n # Recording the scaled variables\n input_irf_file['ENERGY DISPERSION'].data['MIGRA_HI'][0] = self._edisp['Mhigh_new']\n input_irf_file['ENERGY DISPERSION'].data['MIGRA_LO'][0] = self._edisp['Mlow_new']\n self._edisp['M_new'] = (self._edisp['Mlow_new'] + self._edisp['Mhigh_new']) / 2.0", "def generate_fgs_fsw_coefficients(siaf=None, verbose=False, scale=0.06738281367):\n if siaf is None:\n siaf = pysiaf.Siaf('fgs')\n\n instrument = 'FGS'\n\n pre_delivery_dir = os.path.join(JWST_DELIVERY_DATA_ROOT, instrument)\n if not os.path.isdir(pre_delivery_dir):\n os.makedirs(pre_delivery_dir)\n\n for aperture_name in ['FGS1_FULL_OSS', 'FGS2_FULL_OSS']:\n\n aperture = siaf[aperture_name]\n\n # center_offset_x = 1023.5\n # center_offset_y = 1023.5\n center_offset_x = aperture.XSciRef - 1.\n center_offset_y = aperture.YSciRef - 1.\n\n if verbose:\n print('External scale {}'.format(scale))\n print(aperture.get_polynomial_scales())\n\n # get SIAF coefficients\n coefficients = aperture.get_polynomial_coefficients()\n\n ar = coefficients['Sci2IdlX']\n br = coefficients['Sci2IdlY']\n cr = coefficients['Idl2SciX']\n dr = coefficients['Idl2SciY']\n\n a_fsw, b_fsw, c_fsw, d_fsw = polynomial.rescale(ar, br, cr, dr, 1. / scale)\n factor = -1.\n\n if 'FGS1' in aperture_name:\n b_fsw *= -1\n c_fsw = polynomial.flip_y(c_fsw)\n d_fsw = polynomial.flip_y(d_fsw)\n\n a_fsw = polynomial.shift_coefficients(a_fsw, factor * center_offset_x,\n factor * center_offset_y)\n b_fsw = polynomial.shift_coefficients(b_fsw, factor * center_offset_x,\n factor * center_offset_y)\n c_fsw = polynomial.shift_coefficients(c_fsw, factor * center_offset_x,\n factor * center_offset_y)\n d_fsw = polynomial.shift_coefficients(d_fsw, factor * center_offset_x,\n factor * center_offset_y)\n\n a_fsw[0] += center_offset_x\n b_fsw[0] += center_offset_y\n c_fsw[0] += center_offset_x\n d_fsw[0] += center_offset_y\n\n # print FSW coefficients to screen\n fsw_coefficients = Table((c_fsw, d_fsw, a_fsw, b_fsw), names=(\n 'IDEALPTOREALPXCOE', 'IDEALPTOREALPYCOE', 'REALPTOIDEALPXCOE', 'REALPTOIDEALPYCOE'))\n if verbose:\n fsw_coefficients.pprint()\n\n table = Table(names=('parameter_name', 'value'), dtype=(object, float))\n table.add_row(['XOFFSET', center_offset_x])\n table.add_row(['YOFFSET', center_offset_y])\n table.add_row(['PLATESCALE', scale])\n for colname in fsw_coefficients.colnames:\n for i in range(len(fsw_coefficients[colname])):\n table.add_row(['{}_{}'.format(colname, i), fsw_coefficients[colname][i]])\n table['parameter_name'] = np.array(table['parameter_name']).astype(str)\n\n # write to file\n fsw_distortion_file = os.path.join(pre_delivery_dir, 'ifgs{}_distortion_tbl.txt'.format(aperture_name[3]))\n comments = []\n comments.append('FGS distortion coefficients for FSW')\n comments.append('')\n comments.append('Derived from SIAF distortion coefficients.')\n comments.append('')\n comments.append('Generated {} {}'.format(timestamp.isot, timestamp.scale))\n comments.append('by {}'.format(username))\n comments.append('')\n table.meta['comments'] = comments\n formats={'parameter_name': '%-20s', 'value': '%+2.6e'}\n table.write(fsw_distortion_file, format='ascii.fixed_width',\n delimiter=',', delimiter_pad=' ', bookend=False,\n overwrite=True, formats=formats)", "def get_psf_scale_map(self):\n\n scale_map = dict()\n\n scale_map['E_edges'] = scipy.concatenate((self._psf['Elow'], [self._psf['Ehigh'][-1]]))\n scale_map['Theta_edges'] = scipy.concatenate((self._psf['ThetaLow'], [self._psf['ThetaHi'][-1]]))\n\n # Find all \"sigma\" values - tells how many PSF components we have in the IRF file\n column_names = self._psf.keys()\n sigma_columns = list(filter(lambda s: (\"sigma\" in s.lower()) and not (\"new\" in s.lower()),\n column_names))\n\n for sigma_column in sigma_columns:\n # Avoiding division by zero\n can_divide = self._psf[sigma_column] > 0\n\n scale_map[sigma_column] = scipy.zeros_like(self._psf[sigma_column])\n scale_map[sigma_column][can_divide] = self._psf[sigma_column + '_new'][can_divide] / self._psf[sigma_column][can_divide]\n\n wh_nan = scipy.where(scipy.isnan(scale_map[sigma_column]))\n scale_map[sigma_column][wh_nan] = 0\n scale_map[sigma_column] -= 1\n\n return scale_map", "def reponse(ind_sf,ind_sp,sframe,spmod):\n\n from extinction import ccm89, apply\n from pyphot import get_library\n\n lib = get_library()\n filter = lib['Gaia_MAW_G']\n\n #info on calibration stars\n bx,by,rx,ry,zx,zy,h2 = read_spmod(spmod)\n\n #observations\n sf=fits.open(sframe)\n fmp=sf['fibermap'].data\n x=sf['wavelength'].data\n y=sf['flux'].data\n ivar=sf['ivar'].data\n\n #ext = F99(Rv=3.1)\n\n k = 0\n for i in ind_sp: \n j = ind_sf[k]\n if x[0] < 4000.: \n model = interp(x,bx,by['fit'][i,:])\n elif x[0] < 6000.: \n model = interp(x,rx,ry['fit'][i,:])\n else: \n model = interp(x,zx,zy['fit'][i,:])\n\n newx = x.byteswap().newbyteorder() # force native byteorder for calling ccm89\n model = model * 4. * pi # Hlambda to Flambda\n model = apply( ccm89(newx, fmp['ebv'][j]*3.1, 3.1), model) #redden model\n model = model * x / (hplanck*1e7) / (clight*1e2) # erg/cm2/s/AA -> photons/cm2/s/AA\n\n #scale = median(model)/median(y[j,:])\n #print('scale1=',scale)\n\n x_brz = concatenate((bx[(bx < min(rx))],\n rx,\n zx[(zx > max(rx))]))\n model_brz = concatenate((by['fit'][i,(bx < min(rx))],\n ry['fit'][i,:],\n zy['fit'][i,(zx > max(rx))]))\n\n model_brz = model_brz * 4. * pi # Hlambda to Flambda\n\n scale = filter.get_flux(x_brz,model_brz).value / 10.**( \n (fmp['gaia_phot_g_mean_mag'][j] + filter.Vega_zero_mag)/(-2.5) )\n\n #print('scale2=',scale)\n\n r = y[j,:]/model*scale\n w = ivar[j,:]*(model/scale)**2 \n if k == 0: \n rr = r\n ww = w\n else:\n rr = vstack((rr,r))\n ww = vstack((ww,w))\n k += 1\n\n #plt.clf()\n #ma = mean(rr,0) #straight mean response across spectra\n #ema = std(rr,0)/sqrt(k) #uncertainty in mean\n mw = sum(ww*rr,0)/sum(ww,0)#weighted mean\n emw = 1./sqrt(sum(ww,0)) #uncertainty in w. mean\n me = median(rr,0) #median \n ms = smooth(mw,51) #smoothed \n mws = mw - ms #scatter around the smoothed data\n ems = zeros(len(mw))\n length = 51\n for i in range(len(mw)): ems[i] = std(mws[max([0,i-length]):min([len(mws)-1,i+length])]) \n\n #now we compute the relative fiber transmission (including flux losses due to centering)\n k = 0\n a = zeros(len(ind_sp))\n for i in ind_sp: \n j = ind_sf[k]\n a[k] = mean(rr[k,:] / ms)\n print(i,j,a[k],fmp['fiber_ra'][j],fmp['fiber_dec'][j],fmp['gaia_phot_g_mean_mag'][j],mean(y[j,:]))\n k += 1\n\n print('n, median(emw/mw), median(ems/mw)=',k, median(emw/mw),median(ems/mw))\n \n\n return(x,mw,emw,ms,ems,a)", "def get_rescaled_energy_spectrum_saddoughi():\n k = np.asarray(\n [1.27151, 0.554731, 0.21884, 0.139643, 0.0648844, 0.0198547, 0.00558913, 0.00128828, 0.000676395, 0.000254346])\n e = np.asarray([0.00095661, 0.0581971, 2.84666, 11.283, 59.4552, 381.78, 2695.48, 30341.9, 122983, 728530])\n return e, k", "def _compute_pixel_scale(fvc2fp) :\n xc=3000.\n yc=3000.\n eps=0.1\n xpix=np.array([xc,xc+eps,xc])\n ypix=np.array([yc,yc,yc+eps])\n xfp,yfp = fvc2fp.fvc2fp(xpix,ypix)\n J=np.zeros((2,2))\n J[0,0]=(xfp[1]-xfp[0])/eps\n J[0,1]=(xfp[2]-xfp[0])/eps\n J[1,0]=(yfp[1]-yfp[0])/eps\n J[1,1]=(yfp[2]-yfp[0])/eps\n return np.sqrt(np.abs(np.linalg.det(J))) # mm per pixel", "def finalSES(mat, C):\n\tscaleToMin, medianScore, maxScore = SES(mat, C)\n\tprint(f\"Background Score: {scaleToMin} \\tMax Score: {maxScore} \\tMedian Score: {medianScore}\")\n\treturn scaling(mat, medianScore)", "def scale_psf_fluxes(frame, psf):\n scale_factor = (max_flux(frame) / max_flux(psf))\n return psf.profile * scale_factor, psf.fluxes * scale_factor", "def pixelScaleToHandScale(self, s):\n\n return (self.zoom * self.maxTargetSize / self.upscale) / s", "def scale(self):", "def _feature_scaling(self):", "def shear_est(self, gal_image, psf_image, noise=None, F=False):\n # gal_ps = self.pow_spec(gal_image)\n gal_ps = gal_image\n # gal_ps = hk_tool_box.smooth(gal_ps,self.size)\n if noise is not None:\n nbg = self.pow_spec(noise)\n self.flux2 = numpy.sqrt(gal_ps[int(self.size/2), int(self.size/2)]/numpy.sum(self.rim*gal_ps)*numpy.sum(self.rim))\n # nbg = hk_tool_box.smooth(nbg,self.size)\n # rim = self.border(2, size)\n # n = numpy.sum(rim)\n # gal_pn = numpy.sum(gal_ps*rim)/n # the Possion noise of galaxy image\n # nbg_pn = numpy.sum(nbg*rim)/n # the Possion noise of background noise image\n gal_ps = gal_ps - nbg# + nbg_pn - gal_pn\n\n if F:\n psf_ps = psf_image\n else:\n psf_ps = self.pow_spec(psf_image)\n # self.get_radius_new(psf_ps, 2)\n wb, beta = self.wbeta(self.hlr)\n maxi = numpy.max(psf_ps)\n idx = psf_ps < maxi / 100000.\n wb[idx] = 0\n psf_ps[idx] = 1.\n tk = wb/psf_ps * gal_ps\n\n # ky, kx = self.ky, self.kx\n # #\n # kx2 = kx*kx\n # ky2 = ky*ky\n # kxy = kx*ky\n # k2 = kx2 + ky2\n # k4 = k2*k2\n # mn1 = (-0.5)*(kx2 - ky2) # (-0.5)*(kx**2 - ky**2)\n # mn2 = -kxy # -kx*ky\n # mn3 = k2 - 0.5*beta**2*k4 # kx**2 + ky**2 - 0.5*beta**2*(kx**2 + ky**2)**2\n # mn4 = k4 - 8*kx2*ky2 # kx**4 - 6*kx**2*ky**2 + ky**4\n # mn5 = kxy*(kx2 - ky2) # kx**3*ky - kx*ky**3\n\n # mn1 = self.mn1\n # mn2 = self.mn2\n mn3 = self.k2 - 0.5*beta**2*self.k4\n # mn4 = self.mn4\n # mn5 = self.mn5\n\n mg1 = numpy.sum(self.mn1 * tk)*self.alpha\n mg2 = numpy.sum(self.mn2 * tk)*self.alpha\n mn = numpy.sum(mn3 * tk)*self.alpha\n mu = numpy.sum(self.mn4 * tk)*(-0.5*beta**2)*self.alpha\n mv = numpy.sum(self.mn5 * tk)*(-2.*beta**2)*self.alpha\n\n return mg1, mg2, mn, mu, mv", "def scale(self, spectrum, dimension, **kwargs):\n prescale_sum = spectrum.sum()\n interpolation = spectrum.interpolate1d(dimension, **kwargs)\n sf = self.get_scale_factor()\n scaled_spec = copy.copy(spectrum)\n scaled_spec._name = spectrum._name + \"_sf\" + str(sf)\n scaled_spec._data = numpy.zeros(spectrum._data.shape)\n n_dim = len(spectrum._data.shape)\n axis = spectrum.get_config().get_index(dimension)\n par = spectrum.get_config().get_par(dimension)\n low = par._low\n high = par._high\n n_bins = par._bins\n step = par.get_width()\n for bin in range(n_bins):\n x = par.get_bin_centre(bin)\n ratio = x/sf\n if ratio < low or ratio >= high:\n continue # Trying to scale values outside range (Unknown)\n elif ratio < low + 0.5*step:\n ratio = low + 0.5*step\n elif ratio > high - 0.5*step:\n ratio = high - 0.5*step - 1e-6 # Floating point issue\n y = interpolation(ratio)\n if y <= 0.:\n continue\n old_bin1 = par.get_bin(ratio)\n old_bin_centre1 = par.get_bin_centre(old_bin1)\n if par.get_bin_centre(old_bin1) > ratio:\n old_bin2 = old_bin1 - 1\n if old_bin2 >= 0:\n x_low1 = old_bin_centre1 - 0.5*step # Equals x_high2\n x_high1 = ratio + 0.5*step\n if x_high1 > high - 0.5*step:\n x_high1 = high - 0.5*step - 1e-6\n area1 = numpy.fabs(0.5 * (x_high1 - x_low1) *\n (interpolation(x_high1) +\n interpolation(x_low1)))\n x_low2 = ratio - 0.5*step\n area2 = numpy.fabs(0.5 * (x_low1 - x_low2) *\n (interpolation(x_low1) +\n interpolation(x_low2)))\n else:\n old_bin2 = 0\n area2 = 0. # This will set scale2 == 0\n area1 = 1.\n else:\n old_bin2 = old_bin1 + 1\n if old_bin2 < n_bins:\n x_low1 = ratio - 0.5*step\n if x_low1 < low + 0.5*step:\n x_low1 = low + 0.5*step\n x_high1 = old_bin_centre1 + 0.5*step # = x_low2\n area1 = numpy.fabs(0.5 * (x_high1 - x_low1) *\n (interpolation(x_high1) +\n interpolation(x_low1)))\n x_high2 = ratio + 0.5*step\n area2 = numpy.fabs(0.5 * (x_high2 - x_high1) *\n (interpolation(x_high2) +\n interpolation(x_high1)))\n else:\n old_bin2 = n_bins - 1\n area2 = 0. # This will set scale2 == 0\n area1 = 1.\n if area1 == 0. and area2 == 0.:\n continue\n scale1 = area1 / (area1 + area2)\n scale2 = area2 / (area1 + area2)\n # Prepare array split. Is there a better way to do this not using\n # eval and exec?\n cur_slice = \"[\"\n old_slice1 = \"[\"\n old_slice2 = \"[\"\n for dim in range(n_dim):\n if dim == axis:\n if bin < n_bins - 1:\n cur_slice += str(bin) + \":\" + str(bin + 1) + \",\"\n else:\n cur_slice += str(bin) + \":,\"\n if old_bin1 < n_bins - 1:\n old_slice1 += (str(old_bin1) + \":\" +\n str(old_bin1 + 1) + \",\")\n else:\n old_slice1 += str(old_bin1) + \":,\"\n if old_bin2 < n_bins - 1:\n old_slice2 += (str(old_bin2) + \":\" +\n str(old_bin2 + 1) + \",\")\n else:\n old_slice2 += str(old_bin2) + \":,\"\n else:\n cur_slice += \":,\"\n old_slice1 += \":,\"\n old_slice2 += \":,\"\n cur_slice = cur_slice[:-1] + \"]\"\n old_slice1 = old_slice1[:-1] + \"]\"\n old_slice2 = old_slice2[:-1] + \"]\"\n old_data1 = eval(\"spectrum._data\"+old_slice1)\n unscaled_sum1 = float(old_data1.sum())\n old_data2 = eval(\"spectrum._data\"+old_slice2)\n unscaled_sum2 = float(old_data2.sum())\n # Check to see if there is data to scale and counts is positive\n if unscaled_sum1 <= 0. and unscaled_sum2 <= 0.:\n continue\n elif unscaled_sum1 <= 0.:\n fill_cmd = (\"scaled_spec._data\" + cur_slice + \"+= old_data2 * \"\n \"(y / unscaled_sum2)\")\n exec(fill_cmd)\n elif unscaled_sum2 <= 0.:\n fill_cmd = (\"scaled_spec._data\" + cur_slice + \"+= old_data1 * \"\n \"(y / unscaled_sum1)\")\n exec(fill_cmd)\n\n else:\n fill_cmd = (\"scaled_spec._data\" + cur_slice + \"+= old_data1 * \"\n \"scale1 * (y / unscaled_sum1) + old_data2 * \"\n \"scale2 * (y / unscaled_sum2)\")\n exec(fill_cmd)\n # renormalise to prescale number of counts\n scaled_spec._num_decays = scaled_spec.sum()\n scaled_spec.scale(prescale_sum)\n scaled_spec._num_decays = spectrum._num_decays\n return scaled_spec", "def ellipse_sbprofile(ellipsefit, minerr=0.0, snrmin=1.0, sma_not_radius=False,\n cut_on_cog=False, sdss=False, linear=False):\n sbprofile = dict()\n bands = ellipsefit['bands']\n if 'refpixscale' in ellipsefit.keys():\n pixscale = ellipsefit['refpixscale']\n else:\n pixscale = ellipsefit['pixscale']\n eps = ellipsefit['eps_moment']\n if 'redshift' in ellipsefit.keys():\n sbprofile['redshift'] = ellipsefit['redshift'] \n \n for filt in bands:\n psfkey = 'psfsize_{}'.format(filt.lower())\n if psfkey in ellipsefit.keys():\n sbprofile[psfkey] = ellipsefit[psfkey]\n\n sbprofile['minerr'] = minerr\n sbprofile['smaunit'] = 'pixels'\n sbprofile['radiusunit'] = 'arcsec'\n\n # semi-major axis and circularized radius\n #sbprofile['sma'] = ellipsefit[bands[0]].sma * pixscale # [arcsec]\n\n for filt in bands:\n #area = ellipsefit[filt].sarea[indx] * pixscale**2\n\n sma = np.atleast_1d(ellipsefit['sma_{}'.format(filt.lower())]) # semi-major axis [pixels]\n sb = np.atleast_1d(ellipsefit['intens_{}'.format(filt.lower())]) # [nanomaggies/arcsec2]\n sberr = np.atleast_1d(np.sqrt(ellipsefit['intens_err_{}'.format(filt.lower())]**2 + (0.4 * np.log(10) * sb * minerr)**2))\n \n if sma_not_radius:\n radius = sma * pixscale # [arcsec]\n else:\n radius = sma * np.sqrt(1 - eps) * pixscale # circularized radius [arcsec]\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n if linear:\n keep = np.isfinite(sb)\n else:\n keep = np.isfinite(sb) * ((sb / sberr) > snrmin)\n #if filt == 'FUV':\n # pdb.set_trace()\n \n if cut_on_cog:\n keep *= (ellipsefit['sma_{}'.format(filt.lower())] * pixscale) <= np.max(ellipsefit['cog_sma_{}'.format(filt.lower())])\n keep = np.where(keep)[0]\n \n sbprofile['keep_{}'.format(filt.lower())] = keep\n\n if len(keep) == 0 or sma[0] == -1:\n sbprofile['sma_{}'.format(filt.lower())] = np.array([-1.0]).astype('f4') # [pixels]\n sbprofile['radius_{}'.format(filt.lower())] = np.array([-1.0]).astype('f4') # [arcsec]\n sbprofile['mu_{}'.format(filt.lower())] = np.array([-1.0]).astype('f4') # [nanomaggies/arcsec2]\n sbprofile['muerr_{}'.format(filt.lower())] = np.array([-1.0]).astype('f4') # [nanomaggies/arcsec2]\n else:\n sbprofile['sma_{}'.format(filt.lower())] = sma[keep] # [pixels]\n sbprofile['radius_{}'.format(filt.lower())] = radius[keep] # [arcsec]\n if linear:\n sbprofile['mu_{}'.format(filt.lower())] = sb[keep] # [nanomaggies/arcsec2]\n sbprofile['muerr_{}'.format(filt.lower())] = sberr[keep] # [nanomaggies/arcsec2]\n continue\n else:\n sbprofile['mu_{}'.format(filt.lower())] = 22.5 - 2.5 * np.log10(sb[keep]) # [mag/arcsec2]\n sbprofile['muerr_{}'.format(filt.lower())] = 2.5 * sberr[keep] / sb[keep] / np.log(10) # [mag/arcsec2]\n\n #sbprofile[filt] = 22.5 - 2.5 * np.log10(ellipsefit[filt].intens)\n #sbprofile['mu_{}_err'.format(filt.lower())] = 2.5 * ellipsefit[filt].int_err / \\\n # ellipsefit[filt].intens / np.log(10)\n #sbprofile['mu_{}_err'.format(filt.lower())] = np.sqrt(sbprofile['mu_{}_err'.format(filt.lower())]**2 + minerr**2)\n\n # Just for the plot use a minimum uncertainty\n #sbprofile['{}_err'.format(filt.lower())][sbprofile['{}_err'.format(filt.lower())] < minerr] = minerr\n\n if 'g' in bands and 'r' in bands and 'z' in bands:\n radius_gr, indx_g, indx_r = np.intersect1d(sbprofile['radius_g'], sbprofile['radius_r'], return_indices=True)\n sbprofile['gr'] = sbprofile['mu_g'][indx_g] - sbprofile['mu_r'][indx_r]\n sbprofile['gr_err'] = np.sqrt(sbprofile['muerr_g'][indx_g]**2 + sbprofile['muerr_r'][indx_r]**2)\n sbprofile['radius_gr'] = radius_gr\n\n radius_rz, indx_r, indx_z = np.intersect1d(sbprofile['radius_r'], sbprofile['radius_z'], return_indices=True)\n sbprofile['rz'] = sbprofile['mu_r'][indx_r] - sbprofile['mu_z'][indx_z]\n sbprofile['rz_err'] = np.sqrt(sbprofile['muerr_r'][indx_r]**2 + sbprofile['muerr_z'][indx_z]**2)\n sbprofile['radius_rz'] = radius_rz\n \n # SDSS\n if sdss and 'g' in bands and 'r' in bands and 'i' in bands:\n radius_gr, indx_g, indx_r = np.intersect1d(sbprofile['radius_g'], sbprofile['radius_r'], return_indices=True)\n sbprofile['gr'] = sbprofile['mu_g'][indx_g] - sbprofile['mu_r'][indx_r]\n sbprofile['gr_err'] = np.sqrt(sbprofile['muerr_g'][indx_g]**2 + sbprofile['muerr_r'][indx_r]**2)\n sbprofile['radius_gr'] = radius_gr\n\n radius_ri, indx_r, indx_i = np.intersect1d(sbprofile['radius_r'], sbprofile['radius_i'], return_indices=True)\n sbprofile['ri'] = sbprofile['mu_r'][indx_r] - sbprofile['mu_i'][indx_i]\n sbprofile['ri_err'] = np.sqrt(sbprofile['muerr_r'][indx_r]**2 + sbprofile['muerr_i'][indx_i]**2)\n sbprofile['radius_ri'] = radius_ri\n \n # Just for the plot use a minimum uncertainty\n #sbprofile['gr_err'][sbprofile['gr_err'] < minerr] = minerr\n #sbprofile['rz_err'][sbprofile['rz_err'] < minerr] = minerr\n\n # # Add the effective wavelength of each bandpass, although this needs to take\n # # into account the DECaLS vs BASS/MzLS filter curves.\n # from speclite import filters\n # filt = filters.load_filters('decam2014-g', 'decam2014-r', 'decam2014-z', 'wise2010-W1', 'wise2010-W2')\n # for ii, band in enumerate(('g', 'r', 'z', 'W1', 'W2')):\n # sbprofile.update({'{}_wave_eff'.format(band): filt.effective_wavelengths[ii].value})\n\n return sbprofile" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
update kth(0indexed) value with a
def set_val(self, k, a): k += self.n - 1 self.dat[k] = a while k > 0: k = (k - 1) // 2 # parent self.dat[k] = self.op(self.dat[k * 2 + 1], self.dat[k * 2 + 2])
[ "def update(self, k, x):\n k = self.num + k\n self.tree[k] = x\n while k > 1:\n k = k // 2\n self.tree[k] = self.func(self.tree[k * 2], self.tree[k * 2 + 1])", "def update(self, x: int, k: int) -> None:\n while x <= self.n:\n self.tree[x] += k\n x += self.lowbit(x)", "def _update(self, idx, value):\r\n pass", "def setitem(v, k, val):\n assert k in range(v.size)\n v.store[k] = val", "def _bucket_setitem(self, j, k, v):\n if self._table[j] is None:\n self._table[j] = UnsortedTableMap() # create new bucket at index j\n oldSize = len(self._table[j])\n self._table[j][k] = v\n if len(self._table[j]) > oldSize: # key is new to the table\n self._n += 1", "def _bucket_setitem(self, j, k, v):\n pass", "def set_val(self, k, v):\n k += self.n - 1\n self.dat_v[k] = v\n self.dat_f[k] = self.idf()\n self._re_calculate_above(k)", "def insert(self, k, value):\n\n if self._n == self._capacity: \n self._resize(2 * self._capacity) \n for j in range(self._n, k, -1): \n self._A[j] = self._A[j-1]\n self._A[k] = value \n self._n += 1", "def __setitem__(self, k, value):\n self._coords[k] = value", "def insert(self, k, value):\n # (for simplicity, we assume 0 <= k <= n in this verion)\n if self._n == self._capacity: # not enough room\n self._resize(2 * self._capacity) # so double capacity\n for j in range(self._n, k, -1): # shift rightmost first\n self._A[j] = self._A[j - 1]\n self._A[k] = value # store newest element\n self._n += 1", "def insert(self, k, value):\n if self._n == self._capacity: # not enough room\n self._resize(2 * self._capacity) # so double capacity\n for j in range(self._n, k, -1): # shift rightmost first\n self._A[j] = self._A[j-1]\n self._A[k] = value # store newest element\n self._n += 1", "def update(self, idx, value):\n idx = self.__capacity - 1 + idx\n self.__tree[idx] = value\n self.__update(idx)", "def modify_pos(self, k, delta):\n self.pos[k] += delta", "def update(self, index, value):\n\n if not 0 <= index <= self._size - 1:\n raise IndexError('Invalid index: {0}'.format(index))\n\n index += 1 # position w.r.t a one-indexed array.\n while index <= self._size:\n self._tree[index - 1] += value\n index += (index & -index)", "def __getitem__(self, k):\n if not 0 <= k < self._n:\n raise IndexError('invalid index')\n return self._A[k] # retrieve from array", "def change(self, i, k, delta):\n self._change(1, 0, self.N, i, k, delta)", "def assign(self, k, v):\n if self.dict == []:\n self.dict.append([k, v])\n else:\n for tup in self.dict:\n if tup[0] == k:\n tup[1] = v\n else:\n self.dict.append([k, v])", "def update_e_k(ss, k):\n e_k = cal_error_k(ss, k)\n ss.error_cache[k] = [1, e_k]", "def incr(self, idx, val):\n idx += 1\n while idx < len(self.tree):\n self.tree[idx] += val\n idx += (idx & (-idx))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decodes and yields each game event from the contents byte string.
def decode_replay_game_events(contents): decoder = BitPackedDecoder(contents, typeinfos) for event in _decode_event_stream(decoder, game_eventid_typeid, game_event_types, decode_user_id=True): yield event
[ "def decode_replay_game_events(contents):\r\n decoder = BitPackedDecoder(contents, typeinfos)\r\n for event in _decode_event_stream(decoder,\r\n game_eventid_typeid,\r\n game_event_types,\r\n decode_user_id=True):\r\n yield event", "def decode_replay_message_events(contents):\r\n decoder = BitPackedDecoder(contents, typeinfos)\r\n for event in _decode_event_stream(decoder,\r\n message_eventid_typeid,\r\n message_event_types,\r\n decode_user_id=True):\r\n yield event", "def decode_replay_message_events(contents):\n decoder = BitPackedDecoder(contents, typeinfos)\n for event in _decode_event_stream(decoder,\n message_eventid_typeid,\n message_event_types,\n decode_user_id=True):\n yield event", "def decode_replay_tracker_events(contents):\r\n decoder = VersionedDecoder(contents, typeinfos)\r\n for event in _decode_event_stream(decoder,\r\n tracker_eventid_typeid,\r\n tracker_event_types,\r\n decode_user_id=False):\r\n yield event", "def decode_replay_tracker_events(contents):\n decoder = VersionedDecoder(contents, typeinfos)\n for event in _decode_event_stream(decoder,\n tracker_eventid_typeid,\n tracker_event_types,\n decode_user_id=False):\n yield event", "def chunks(raw):\n for i in range(0, len(raw), EVENT_SIZE):\n yield struct.unpack(EVENT_FORMAT, raw[i:i+EVENT_SIZE])", "def _parse_event_buffer(event_buffer):\n i = 0\n while i + 16 <= len(event_buffer):\n wd, mask, cookie, length = struct.unpack_from(\"iIII\", event_buffer, i)\n name = event_buffer[i + 16 : i + 16 + length].rstrip(b\"\\0\")\n i += 16 + length\n yield wd, mask, cookie, name", "def decode(self, s):\r\n (tsec, tfrac, self.eventType, self.eventCode,\r\n self.eventValue) = struct.unpack(Format.Event, s)\r\n\r\n self.time = tsec + tfrac / 1000000.0", "def parse_bytes(self, bytes_):\n for byte in bytes_:\n self.parse_byte(byte)", "def test_textAsEvent_encoding(self):\n self.assertEquals(\n textAsEvent(u\"S\\xe1nchez\"),\n b\"data: S\\xc3\\xa1nchez\\n\\n\"\n )", "def decode_replay_attributes_events(contents):\n buffer = BitPackedBuffer(contents, 'little')\n attributes = {}\n if not buffer.done():\n attributes['source'] = buffer.read_bits(8)\n attributes['mapNamespace'] = buffer.read_bits(32)\n count = buffer.read_bits(32)\n attributes['scopes'] = {}\n while not buffer.done():\n value = {}\n value['namespace'] = buffer.read_bits(32)\n value['attrid'] = attrid = buffer.read_bits(32)\n scope = buffer.read_bits(8)\n value['value'] = buffer.read_aligned_bytes(4)[::-1].strip(b'\\x00')\n if not scope in attributes['scopes']:\n attributes['scopes'][scope] = {}\n if not attrid in attributes['scopes'][scope]:\n attributes['scopes'][scope][attrid] = []\n attributes['scopes'][scope][attrid].append(value)\n return attributes", "def decode(self):\n instr = self.fetch()", "def events_from_bytes(cls, data, res, frame_num):\n\t\tall_events = [np.zeros(res) for t in range(frame_num - 1)]\n\t\tfor i in range(res[0]):\n\t\t\tfor j in range(res[1]):\n\t\t\t\tevents = cls._pixel_events_from_bytes(data)\n\t\t\t\tfor event in events:\n\t\t\t\t\tall_events[event[1]][i, j] = event[0]\n\n\t\treturn all_events", "def parse_event(self, stream, skip_content=False):\n preamble = self.parse_preamble(stream)\n header = self.parse_header(preamble.header, stream)\n content = None\n read_length = None\n if skip_content:\n stream.seek(preamble.content, SEEK_CUR)\n else:\n content = stream.read(preamble.content)\n read_length = len(content)\n content = content.decode(self.encoding)\n\n stream.seek(1, SEEK_CUR) # new line after each event\n\n if not skip_content and read_length != preamble.content:\n raise Exception('Invalid content size. The stream is either unreadable or corrupted. ' +\n 'Preamble declares %d bytes, but content length is %d' % (preamble.content, len(content)))\n\n return Event(id=header.id, source=header.source, timestamp=header.timestamp,\n tags=header.tags, content=content)", "def process(self, alert_bytes):\n # \"\"\"Expects that alert was in form of dict before converting to bytes for PS\"\"\"\n # alert_dict = json.loads(alert_bytes.decode('utf-8')) \n\n alert_dict = lass.deserialize_alert(alert_bytes)\n return [alert_dict]", "def msgs_from_bytes(self, b):\n msgs = []\n # User remainder bytes\n parse_bytes = self.remainder + b.decode('ascii')\n # Find the first frame delimiter\n i = parse_bytes.find('\\r\\n')\n while i >= 0:\n # Try to parse a single message\n m = self._parse_msg(parse_bytes[:i])\n # Remove parsed bytes and delimter\n parse_bytes = parse_bytes[i+2:]\n # Add parsed message, if any\n if m:\n msgs.append(m)\n self.logger.debug('Parsed ASCII frame: address={}, function={}, len={}'.format(m.address, m.function, len(m.data) if m.data else 0))\n #else - warn?\n i = parse_bytes.find('\\r\\n')\n # Store any remaining bytes for the next pass\n self.remainder = parse_bytes\n return msgs", "def load(f):\n while True:\n c = f.read(1)\n if len(c) == 1:\n msg_len = _read_int(f, already_read=c)\n msg_str = f.read(msg_len)\n if len(msg_str) < msg_len:\n raise ValueError(\"Unexpected EOF while parsing message\")\n yield javascript.loads(msg_str.decode())\n else:\n break", "def decode(self, s: str) -> [str]:", "def decode_stream(self):\n io = self.io\n result = None\n\n while True:\n opcode = io.read(1)\n if not opcode:\n break\n else:\n opcode = ord(opcode)\n\n klass = MicroOpDecoder.opcode_to_class.get(opcode)\n yield klass.decode(io)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decodes and yields each message event from the contents byte string.
def decode_replay_message_events(contents): decoder = BitPackedDecoder(contents, typeinfos) for event in _decode_event_stream(decoder, message_eventid_typeid, message_event_types, decode_user_id=True): yield event
[ "def decode_replay_message_events(contents):\r\n decoder = BitPackedDecoder(contents, typeinfos)\r\n for event in _decode_event_stream(decoder,\r\n message_eventid_typeid,\r\n message_event_types,\r\n decode_user_id=True):\r\n yield event", "def decode_replay_game_events(contents):\r\n decoder = BitPackedDecoder(contents, typeinfos)\r\n for event in _decode_event_stream(decoder,\r\n game_eventid_typeid,\r\n game_event_types,\r\n decode_user_id=True):\r\n yield event", "def decode_replay_game_events(contents):\n decoder = BitPackedDecoder(contents, typeinfos)\n for event in _decode_event_stream(decoder,\n game_eventid_typeid,\n game_event_types,\n decode_user_id=True):\n yield event", "def decode_replay_tracker_events(contents):\r\n decoder = VersionedDecoder(contents, typeinfos)\r\n for event in _decode_event_stream(decoder,\r\n tracker_eventid_typeid,\r\n tracker_event_types,\r\n decode_user_id=False):\r\n yield event", "def decode_replay_tracker_events(contents):\n decoder = VersionedDecoder(contents, typeinfos)\n for event in _decode_event_stream(decoder,\n tracker_eventid_typeid,\n tracker_event_types,\n decode_user_id=False):\n yield event", "def msgs_from_bytes(self, b):\n msgs = []\n # User remainder bytes\n parse_bytes = self.remainder + b.decode('ascii')\n # Find the first frame delimiter\n i = parse_bytes.find('\\r\\n')\n while i >= 0:\n # Try to parse a single message\n m = self._parse_msg(parse_bytes[:i])\n # Remove parsed bytes and delimter\n parse_bytes = parse_bytes[i+2:]\n # Add parsed message, if any\n if m:\n msgs.append(m)\n self.logger.debug('Parsed ASCII frame: address={}, function={}, len={}'.format(m.address, m.function, len(m.data) if m.data else 0))\n #else - warn?\n i = parse_bytes.find('\\r\\n')\n # Store any remaining bytes for the next pass\n self.remainder = parse_bytes\n return msgs", "def parse_bytes(self, bytes_):\n for byte in bytes_:\n self.parse_byte(byte)", "def decode_message(message):", "def decode(self, msg_bytes):\n return json.loads(Stream.decode(self, msg_bytes))", "def recv(self):\n esc = False\n data = b''\n msg = b''\n while True:\n data += self.socket.recv(1024)\n if 0 == len(data):\n return\n cut_off = 0\n for c in data:\n cut_off += 1\n if esc:\n msg += bytes([c])\n esc = False\n elif chr(c) == '\\\\':\n esc = True\n elif chr(c) == '$':\n try:\n yield msg.decode()\n except UnicodeDecodeError:\n yield None\n data = data[cut_off:]\n msg = b''\n else:\n msg += bytes([c])", "def _parse_messages (self):\n msgs = []\n end_idx = 0\n buf = self._buf\n while buf:\n frame_type = ord(buf[0])\n if frame_type == 0:\n # Normal message.\n end_idx = buf.find(\"\\xFF\")\n if end_idx == -1: #pragma NO COVER\n break\n msgs.append(buf[1:end_idx].decode('utf-8', 'replace'))\n buf = buf[end_idx + 1:]\n elif frame_type == 255:\n # Closing handshake.\n assert ord(buf[1]) == 0, \"Unexpected closing handshake: %r\" % buf\n self.websocket_closed = True\n break\n else:\n raise ValueError(\"Don't understand how to parse this type of message: %r\" % buf)\n self._buf = buf\n return msgs", "def chunks(raw):\n for i in range(0, len(raw), EVENT_SIZE):\n yield struct.unpack(EVENT_FORMAT, raw[i:i+EVENT_SIZE])", "def _parse_messages(self):\r\n msgs = []\r\n end_idx = 0\r\n buf = self._buf\r\n while buf:\r\n frame_type = ord(buf[0])\r\n if frame_type == 0:\r\n # Normal message.\r\n end_idx = buf.find(\"\\xFF\")\r\n if end_idx == -1: #pragma NO COVER\r\n break\r\n msgs.append(buf[1:end_idx].decode('utf-8', 'replace'))\r\n buf = buf[end_idx+1:]\r\n elif frame_type == 255:\r\n # Closing handshake.\r\n assert ord(buf[1]) == 0, \"Unexpected closing handshake: %r\" % buf\r\n self.websocket_closed = True\r\n break\r\n else:\r\n raise ValueError(\"Don't understand how to parse this type of message: %r\" % buf)\r\n self._buf = buf\r\n return msgs", "def many_from_bytes(cls, bytestring):\n messages = []\n matches = list(cls.regexp.finditer(bytestring))\n for match in matches:\n substring = match.group(0)\n try:\n message = cls.from_bytes(substring)\n except sungrow.BadBinaryMessage:\n LOG.debug('bad substring {0!r}'.format(substring))\n continue\n ## see docstring for explanation of why -len(substring)\n item = (match.start(), -len(substring), message)\n messages.append(item)\n return messages", "def parse_messages(event):\n messages = [get_message_body(record) for record in event[\"Records\"]]\n print(str(messages))\n return messages", "def _parse_event_buffer(event_buffer):\n i = 0\n while i + 16 <= len(event_buffer):\n wd, mask, cookie, length = struct.unpack_from(\"iIII\", event_buffer, i)\n name = event_buffer[i + 16 : i + 16 + length].rstrip(b\"\\0\")\n i += 16 + length\n yield wd, mask, cookie, name", "def process(self, alert_bytes):\n # \"\"\"Expects that alert was in form of dict before converting to bytes for PS\"\"\"\n # alert_dict = json.loads(alert_bytes.decode('utf-8')) \n\n alert_dict = lass.deserialize_alert(alert_bytes)\n return [alert_dict]", "def decode_message(cls, data):\n N = len(data)\n (magic,) = struct.unpack('>B', data[0:1])\n if magic == 0:\n # version 0\n (crc,) = struct.unpack('>i', data[1:5])\n payload = data[5:N]\n assert zlib.crc32(payload) == crc\n msg = Message(magic, None, crc, payload)\n log.debug(\"Got v0 Message, %s\", msg)\n yield msg\n elif magic == 1:\n # version 1\n (att, crc) = struct.unpack('>Bi', data[1:6])\n payload = data[6:N]\n assert zlib.crc32(payload) == crc\n if att & KafkaClient.ATTRIBUTE_CODEC_MASK == 0:\n # Uncompressed, just a single Message\n msg = Message(magic, att, crc, payload)\n log.debug(\"Got v1 Message, %s\", msg)\n yield msg\n elif att & KafkaClient.ATTRIBUTE_CODEC_MASK == 1:\n # Gzip encoded Message\n gz = gzip_decode(payload)\n (msgs, _) = cls.read_message_set(gz)\n for msg in msgs:\n yield msg\n elif att & KafkaClient.ATTRIBUTE_CODEC_MASK == 2:\n # Snappy encoded Message\n snp = snappy_decode(payload)\n (msgs, _) = cls.read_message_set(snp)\n for msg in msgs:\n yield msg\n else:\n raise RuntimeError(\"Unsupported compression type: %d\" % (att & KafkaClient.ATTRIBUTE_CODEC_MASK))", "def load(f):\n while True:\n c = f.read(1)\n if len(c) == 1:\n msg_len = _read_int(f, already_read=c)\n msg_str = f.read(msg_len)\n if len(msg_str) < msg_len:\n raise ValueError(\"Unexpected EOF while parsing message\")\n yield javascript.loads(msg_str.decode())\n else:\n break" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decodes and yields each tracker event from the contents byte string.
def decode_replay_tracker_events(contents): decoder = VersionedDecoder(contents, typeinfos) for event in _decode_event_stream(decoder, tracker_eventid_typeid, tracker_event_types, decode_user_id=False): yield event
[ "def decode_replay_tracker_events(contents):\r\n decoder = VersionedDecoder(contents, typeinfos)\r\n for event in _decode_event_stream(decoder,\r\n tracker_eventid_typeid,\r\n tracker_event_types,\r\n decode_user_id=False):\r\n yield event", "def decode_replay_message_events(contents):\r\n decoder = BitPackedDecoder(contents, typeinfos)\r\n for event in _decode_event_stream(decoder,\r\n message_eventid_typeid,\r\n message_event_types,\r\n decode_user_id=True):\r\n yield event", "def decode_replay_message_events(contents):\n decoder = BitPackedDecoder(contents, typeinfos)\n for event in _decode_event_stream(decoder,\n message_eventid_typeid,\n message_event_types,\n decode_user_id=True):\n yield event", "def decode_replay_game_events(contents):\r\n decoder = BitPackedDecoder(contents, typeinfos)\r\n for event in _decode_event_stream(decoder,\r\n game_eventid_typeid,\r\n game_event_types,\r\n decode_user_id=True):\r\n yield event", "def decode_replay_game_events(contents):\n decoder = BitPackedDecoder(contents, typeinfos)\n for event in _decode_event_stream(decoder,\n game_eventid_typeid,\n game_event_types,\n decode_user_id=True):\n yield event", "def decode(self, s):\r\n (tsec, tfrac, self.eventType, self.eventCode,\r\n self.eventValue) = struct.unpack(Format.Event, s)\r\n\r\n self.time = tsec + tfrac / 1000000.0", "def chunks(raw):\n for i in range(0, len(raw), EVENT_SIZE):\n yield struct.unpack(EVENT_FORMAT, raw[i:i+EVENT_SIZE])", "def parse_bytes(self, bytes_):\n for byte in bytes_:\n self.parse_byte(byte)", "def _parse_event_buffer(event_buffer):\n i = 0\n while i + 16 <= len(event_buffer):\n wd, mask, cookie, length = struct.unpack_from(\"iIII\", event_buffer, i)\n name = event_buffer[i + 16 : i + 16 + length].rstrip(b\"\\0\")\n i += 16 + length\n yield wd, mask, cookie, name", "def process(self, alert_bytes):\n # \"\"\"Expects that alert was in form of dict before converting to bytes for PS\"\"\"\n # alert_dict = json.loads(alert_bytes.decode('utf-8')) \n\n alert_dict = lass.deserialize_alert(alert_bytes)\n return [alert_dict]", "def decode(self, s: str) -> [str]:", "def decode_replay_attributes_events(contents):\n buffer = BitPackedBuffer(contents, 'little')\n attributes = {}\n if not buffer.done():\n attributes['source'] = buffer.read_bits(8)\n attributes['mapNamespace'] = buffer.read_bits(32)\n count = buffer.read_bits(32)\n attributes['scopes'] = {}\n while not buffer.done():\n value = {}\n value['namespace'] = buffer.read_bits(32)\n value['attrid'] = attrid = buffer.read_bits(32)\n scope = buffer.read_bits(8)\n value['value'] = buffer.read_aligned_bytes(4)[::-1].strip(b'\\x00')\n if not scope in attributes['scopes']:\n attributes['scopes'][scope] = {}\n if not attrid in attributes['scopes'][scope]:\n attributes['scopes'][scope][attrid] = []\n attributes['scopes'][scope][attrid].append(value)\n return attributes", "def decode(self):\n instr = self.fetch()", "def msgs_from_bytes(self, b):\n msgs = []\n # User remainder bytes\n parse_bytes = self.remainder + b.decode('ascii')\n # Find the first frame delimiter\n i = parse_bytes.find('\\r\\n')\n while i >= 0:\n # Try to parse a single message\n m = self._parse_msg(parse_bytes[:i])\n # Remove parsed bytes and delimter\n parse_bytes = parse_bytes[i+2:]\n # Add parsed message, if any\n if m:\n msgs.append(m)\n self.logger.debug('Parsed ASCII frame: address={}, function={}, len={}'.format(m.address, m.function, len(m.data) if m.data else 0))\n #else - warn?\n i = parse_bytes.find('\\r\\n')\n # Store any remaining bytes for the next pass\n self.remainder = parse_bytes\n return msgs", "def test_decode_trace(self):\n self.assertEqual(td.trace(), decoder.decode_trace(BytesIO(td.trace(True))))", "def iterparse(source, encoding='utf-8'):\n if hasattr(source, 'read'):\n for event in _parse2(source):\n yield event\n else:\n with io.open(source, encoding=encoding) as fh:\n for event in _parse2(fh):\n yield event", "def carve(self, bs, dataFile, verbose=False):\n _bs = bs\n records = []\n headers = []\n\n i = 0\n # Find all occurrences of the magic string\n found = _bs.findall(evt_header.MagicString, bytealigned=False)\n readSoFarBits = 0\n for idx in found:\n _bs.pos = idx\n r = EvtRecord()\n r.setPathname(dataFile)\n r.setPosition(_bs.pos)\n\n # Read an EVT header field:\n # The algorithm here is to find the message separator \n # and use that as a basis for locating the other fields.\n # Since we split large input files, \"offset\" fields are\n # invalid. \n\n # Message length\n fieldBits = 32\n lenIdx = idx - fieldBits # Set position to idx of length\n _bs.pos = lenIdx\n recordLength = _bs.read(fieldBits).uintle\n r.setField(\"length\", recordLength)\n readSoFarBits += fieldBits\n\n # Calculate size of variable data at end of record \n varDataSize = evt_record.FixedSize - recordLength \n # When reading the size in a header\n if varDataSize < 0: \n varDataSize = 0\n\n # Reset stream position\n _bs.pos = idx\n\n # Message separator\n fieldBits = 32 \n # Check to see if we are reading past end of stream\n data = self.carveField(_bs, \"reserved\", \"uint\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"reserved\", data)\n\n # Record number\n fieldBits = 32 \n data = self.carveField(_bs, \"recordNumber\", \"uintle\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"recordNumber\", data)\n\n # Date created\n fieldBits = 32 \n data = self.carveField(_bs, \"timeGenerated\", \"uintle\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"timeGenerated\", data)\n\n # Date written\n fieldBits = 32 \n data = self.carveField(_bs, \"timeWritten\", \"uintle\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"timeWritten\", data)\n\n # Event ID\n fieldBits = 16 \n data = self.carveField(_bs, \"eventID\", \"uintle\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"eventID\", data)\n \n # Event RVA offset\n fieldBits = 16 \n data = self.carveField(_bs, \"eventRVA\", \"uintle\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"eventRVA\", data)\n\n # Event type\n fieldBits = 16 \n data = self.carveField(_bs, \"eventType\", \"uintle\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"eventType\", data)\n\n # Num strings\n fieldBits = 16 \n data = self.carveField(_bs, \"numStrings\", \"uintle\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"numStrings\", data)\n\n # Category\n fieldBits = 16 \n data = self.carveField(_bs, \"eventCategory\", \"uintle\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"eventCategory\", data)\n\n # Reserved flags \n fieldBits = 16 \n data = self.carveField(_bs, \"reservedFlags\", \"uint\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"reservedFlags\", data)\n\n # Closing record number\n fieldBits = 32 \n data = self.carveField(_bs, \"closingRecordNumber\", \"uint\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"closingRecordNumber\", data)\n\n # String offset\n fieldBits = 32 \n data = self.carveField(_bs, \"stringOffset\", \"uint\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"stringOffset\", data)\n\n # User SID length\n fieldBits = 32\n data = self.carveField(_bs, \"userSidLength\", \"uintle\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"userSidLength\", data)\n\n # User SID offset\n fieldBits = 32 \n data = self.carveField(_bs, \"userSidOffset\", \"uintle\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"userSidOffset\", data)\n\n # Data length\n fieldBits = 32 \n data = self.carveField(_bs, \"dataLength\", \"uintle\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"dataLength\", data)\n\n # Data offset\n fieldBits = 32\n data = self.carveField(_bs, \"dataOffset\", \"uintle\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"dataOffset\", data)\n\n # Variable data\n # FIXME: dont rely on peek() to avoid reading past end of stream\n fieldBits = int(r.getField(\"length\"))\n try:\n data = _bs.peek(\"bytes\" + \":\" + str(fieldBits))\n except bitstring.ReadError:\n if verbose:\n print \"[EVT]: Unable to read EVT data field; \"\\\n \"it would be truncated\"\n break\n data = self.carveField(_bs, \"varData\", \"bytes\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"varData\", data)\n\n # SID\n # FIXME: find out why sidLength is so weird\n #sidLength = r.getField(\"userSidLength\")\n #if sidLength > 0:\n # sidOffset = r.getField(\"userSidOffset\")\n # if sidOffset <= _bs.length:\n # _bs.pos = sidOffset\n # fieldBits = sidLength\n # if readSoFarBits + fieldBits >= _bs.len:\n # fieldBits = _bs.len - _bs.pos\n # sid = _bs.read(fieldBits).uint\n # r.setField(\"sid\", sid)\n # break\n # sid = _bs.read(fieldBits).uint\n # r.setField(\"sid\", sid)\n #readSoFarBits += fieldBits\n records.append(r)\n return (headers, records)", "def _server_events(self, response):\n with closing(response):\n for line in response.iter_lines(chunk_size=1):\n if not line:\n continue\n yield json.loads(line)", "def events_from_bytes(cls, data, res, frame_num):\n\t\tall_events = [np.zeros(res) for t in range(frame_num - 1)]\n\t\tfor i in range(res[0]):\n\t\t\tfor j in range(res[1]):\n\t\t\t\tevents = cls._pixel_events_from_bytes(data)\n\t\t\t\tfor event in events:\n\t\t\t\t\tall_events[event[1]][i, j] = event[0]\n\n\t\treturn all_events" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decodes and return the replay header from the contents byte string.
def decode_replay_header(contents): decoder = VersionedDecoder(contents, typeinfos) return decoder.instance(replay_header_typeid)
[ "def decode_replay_header(contents):\r\n decoder = VersionedDecoder(contents, typeinfos)\r\n return decoder.instance(replay_header_typeid)", "def decode_header(byte_iter):\n try:\n return MMSDecoder.decode_mms_header(byte_iter)\n except wsp_pdu.DecodeError:\n return wsp_pdu.Decoder.decode_header(byte_iter)", "def decode_header(cls, stream):\n read = stream.read\n data = read(2)\n\n if len(data) != 2:\n print 'data:',data\n raise WebSocketError(\"Unexpected EOF while decoding header\")\n\n first_byte, second_byte = struct.unpack('!BB', data)\n\n header = cls(\n fin=first_byte & cls.FIN_MASK == cls.FIN_MASK,\n opcode=first_byte & cls.OPCODE_MASK,\n flags=first_byte & cls.HEADER_FLAG_MASK,\n length=second_byte & cls.LENGTH_MASK)\n\n has_mask = second_byte & cls.MASK_MASK == cls.MASK_MASK\n\n if header.opcode > 0x07:\n if not header.fin:\n raise ProtocolError(\n \"Received fragmented control frame: {0!r}\".format(data))\n\n # Control frames MUST have a payload length of 125 bytes or less\n if header.length > 125:\n raise FrameTooLargeException(\n \"Control frame cannot be larger than 125 bytes: \"\n \"{0!r}\".format(data))\n\n if header.length == 126:\n # 16 bit length\n data = read(2)\n\n if len(data) != 2:\n raise WebSocketError('Unexpected EOF while decoding header')\n\n header.length = struct.unpack('!H', data)[0]\n elif header.length == 127:\n # 64 bit length\n data = read(8)\n\n if len(data) != 8:\n raise WebSocketError('Unexpected EOF while decoding header')\n\n header.length = struct.unpack('!Q', data)[0]\n\n if has_mask:\n mask = read(4)\n\n if len(mask) != 4:\n raise WebSocketError('Unexpected EOF while decoding header')\n\n header.mask = mask\n\n return header", "def get_decoded_header(value):\n decoded_header_items = decode_header(value)\n decoded_header_value = ''\n for item in decoded_header_items:\n try:\n decoded_item = item[0].decode(item[1], 'ignore') if item[1] is not None else item[0]\n except:\n logger.warning(f\"Decoding went wrong for value '{value}'!\")\n # Pretend decoded item is empty :-(\n decoded_item = ''\n if isinstance(decoded_item, bytes):\n decoded_item = decoded_item.decode('ascii', 'ignore')\n decoded_header_value += decoded_item\n return decoded_header_value", "def decode_replay_details(contents):\r\n decoder = VersionedDecoder(contents, typeinfos)\r\n return decoder.instance(game_details_typeid)", "def decode_replay_details(contents):\n decoder = VersionedDecoder(contents, typeinfos)\n return decoder.instance(game_details_typeid)", "def parse(cls, bytes):\n h = cls.parse_header(bytes[:16])\n start_data = h.header_size\n end_data = h.header_size + h.data_size\n data = bytes[start_data:end_data].decode()\n assert len(data) == h.data_size\n return data", "def unpackRecHeader(self):\n return self.unpack('4s3i',16,'REC_HEAD')", "def decode_content(raw_content):\n return raw_content", "def decode_headers(header_lines):\n header_string = header_lines.decode(errors='ignore')\n header_native = re.sub(r'\\r\\n', '\\n', header_string)\n return header_native", "def header_content(self):\n\n if not self.has_header():\n pass\n\n return binascii.unhexlify(binascii.hexlify(bytes(bytearray(self.records[0].data)))).decode('ascii')", "def decode(cls, raw: bytes) -> \"EthernetHeader\":\n # unsigned char dmac[6];\n # unsigned char smac[6];\n # uint16_t ethertype;\n # unsigned char payload[];\n dmac = raw[:6]\n smac = raw[6:12]\n typ = socket.htons(struct.unpack(\"H\", raw[12:14])[0])\n payload = raw[14:]\n return EthernetHeader(dmac=dmac, smac=smac, typ=typ, payload=payload)", "def _unserialize_header(self, data, persistent_start):\n name = \"\"\n sbuffer = data\n # Skip characters until a valid message id appears\n while len(sbuffer) >= self.header_size:\n header = sbuffer[:self.header_size]\n if repr(header) in self.messages:\n name = header\n break\n if not persistent_start:\n break\n sbuffer = sbuffer[1:]\n return name, len(data) - len(sbuffer)", "def decode_replay_initdata(contents):\n decoder = BitPackedDecoder(contents, typeinfos)\n return decoder.instance(replay_initdata_typeid)", "def read_header(self):\n return struct.unpack(self.header_format, self.reader.read(8))", "def decode_replay(replay_file_obj):\n decoder = zstd.ZstdDecompressor()\n # Rewind to the beginning of the file obj, because\n # gcloud might have read it first\n replay_file_obj.seek(0)\n replay_data = replay_file_obj.read()\n try:\n decoded_data = decoder.decompress(replay_data)\n json_data = json.loads(decoded_data.decode('utf-8').strip())\n return json_data\n except zstd.ZstdError:\n # The replay file can't be decoded.\n return None\n finally:\n # Seek the replay file back to start so we can upload it.\n replay_file_obj.seek(0)", "def unpack_header(self, data):\r\n return struct.unpack(self.struct_header, data[:self.struct_header_size])", "def getDecodedHeaders(msg, cset='utf-8'):\n\n headers = ''\n for h, v in msg.items():\n uvalue = u''\n try:\n v = decode_header(re.sub('\\n\\s', ' ', v))\n except HeaderParseError:\n v = [(v, 'us-ascii')]\n for frag, cs in v:\n if not cs:\n cs = 'us-ascii'\n try:\n uvalue += unicode(frag, cs, 'replace')\n except LookupError:\n # The encoding charset is unknown. At this point, frag\n # has been QP or base64 decoded into a byte string whose\n # charset we don't know how to handle. We will try to\n # unicode it as iso-8859-1 which may result in a garbled\n # mess, but we have to do something.\n uvalue += unicode(frag, 'iso-8859-1', 'replace')\n headers += '%s: %s\\n' % (h, uvalue.encode(cset, 'replace'))\n return headers", "def ws_decode_frame(self):\n\n buf = self.ws_buffer\n if len(buf) < 14:\n return ''\n start = 2\n opcode = ord(buf[0]) & 0xf\n\n if opcode == 0x8: # close frame\n if self.ws_state == \"open\":\n self.ws_state = \"gotclose\"\n raise WException(1000)\n\n length = ord(buf[1]) & 0x7f\n if length == 126:\n length, = struct.unpack(\"!H\", buf[2:4])\n start += 2\n elif length == 127:\n length, = struct.unpack(\"!Q\", buf[2:10])\n start += 8\n\n mask = [ord(b) for b in buf[start:start + 4]]\n start += 4\n\n if len(buf) < start + length:\n return ''\n\n payload = buf[start:start + length]\n self.ws_buffer = buf[start + length:]\n\n clear = ''\n for i in range(len(payload)):\n clear += chr(mask[i % 4] ^ ord(payload[i]))\n\n if opcode == 0x1:\n clear = clear.decode(\"UTF8\")\n return clear" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decodes and returns the game details from the contents byte string.
def decode_replay_details(contents): decoder = VersionedDecoder(contents, typeinfos) return decoder.instance(game_details_typeid)
[ "def decode_replay_details(contents):\r\n decoder = VersionedDecoder(contents, typeinfos)\r\n return decoder.instance(game_details_typeid)", "def decode_content(raw_content):\n return raw_content", "def decode_replay_header(contents):\r\n decoder = VersionedDecoder(contents, typeinfos)\r\n return decoder.instance(replay_header_typeid)", "def decode_replay_header(contents):\n decoder = VersionedDecoder(contents, typeinfos)\n return decoder.instance(replay_header_typeid)", "def decode_message(message):", "def decode(byte_string):\n obj = pickle.loads(byte_string)\n return obj", "def decode(self):\n instr = self.fetch()", "def unpack_string(self):\n return Unpacker.unpack_string(self).decode(\"ascii\")", "def decode_message(self, raw):\n return raw.decode('utf-8')", "def decode(self, encoded):", "def decode(self, session_data):\r\n pickled = base64.decodestring(session_data)\r\n return pickle.loads(pickled)", "def decode_replay_initdata(contents):\n decoder = BitPackedDecoder(contents, typeinfos)\n return decoder.instance(replay_initdata_typeid)", "def Decode(self, encoded_data):", "def decode_content(self, raw_content):\n try:\n obj = pickle.loads(raw_content)\n return obj\n except Exception:\n raise IkatsException(\"Failed to load picked object. Context={}\".format(str(self)))", "def decode(binary):\n return json_mod.loads(binary.decode(\"utf-8\"))", "def decode(cls, data):\n h = struct.unpack('B', data)[0]\n # Bits 7-5 define the message type\n mtype = (h & 224) >> 5\n # Bits 1-0 define the major version\n major = h & 3\n m = MACHeader(mtype, major)\n return m", "def parse(cls, bytes):\n h = cls.parse_header(bytes[:16])\n start_data = h.header_size\n end_data = h.header_size + h.data_size\n data = bytes[start_data:end_data].decode()\n assert len(data) == h.data_size\n return data", "def _decode_str(self, buf):\n length = self._decode_vint(buf)\n result = buf.read(length)\n if len(result) != length:\n raise EndOfMessage(True)\n return result", "def _decode_text(self):\n\n print(f\"Hex decode; received message is {self.message}\")\n return bytes.fromhex(self.message).decode('utf-8')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decodes and return the replay init data from the contents byte string.
def decode_replay_initdata(contents): decoder = BitPackedDecoder(contents, typeinfos) return decoder.instance(replay_initdata_typeid)
[ "def decode_replay_header(contents):\r\n decoder = VersionedDecoder(contents, typeinfos)\r\n return decoder.instance(replay_header_typeid)", "def decode_replay_header(contents):\n decoder = VersionedDecoder(contents, typeinfos)\n return decoder.instance(replay_header_typeid)", "def decode(byte_string):\n obj = pickle.loads(byte_string)\n return obj", "def loads(self, s):\n self._data = self.decoder.decode(s)", "def decode_replay_details(contents):\r\n decoder = VersionedDecoder(contents, typeinfos)\r\n return decoder.instance(game_details_typeid)", "def decode_replay_details(contents):\n decoder = VersionedDecoder(contents, typeinfos)\n return decoder.instance(game_details_typeid)", "def decode_content(raw_content):\n return raw_content", "def decode(self, data):\n return self.__cipher.decrypt(data)", "def Decode(self, encoded_data):", "def decode(self, s: str) -> [str]:", "def decode(self, session_data):\r\n pickled = base64.decodestring(session_data)\r\n return pickle.loads(pickled)", "def decode(self, encoded):", "def deserialize(self, str):\n try:\n end = 0\n _x = self\n start = end\n end += 152\n (_x.tcp, _x.ori, _x.zone, _x.vacuum, _x.workx, _x.worky, _x.workz, _x.workq0, _x.workqx, _x.workqy, _x.workqz, _x.toolx, _x.tooly, _x.toolz, _x.toolq0, _x.toolqx, _x.toolqy, _x.toolqz, _x.ret,) = _struct_2d2q14dq.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.msg = str[start:end].decode('utf-8')\n else:\n self.msg = str[start:end]\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def test_decode(self):\n pass # TODO(tlarsen)", "def decode (cls, bytes, cmddict):\n attrs = SeqCmdAttrs.decode(bytes[0:1])\n delay = SeqDelay .decode(bytes[1:4])\n cmd = cmddict .decode(bytes[4:] )\n return cls(cmd, delay, attrs)", "def decode(self):\n instr = self.fetch()", "def unpack_string(self):\n return Unpacker.unpack_string(self).decode(\"ascii\")", "def _decode_str(self, buf):\n length = self._decode_vint(buf)\n result = buf.read(length)\n if len(result) != length:\n raise EndOfMessage(True)\n return result", "def extract(self, data):\n return ujson.loads(self.cipher.decrypt(data))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decodes and yields each attribute from the contents byte string.
def decode_replay_attributes_events(contents): buffer = BitPackedBuffer(contents, 'little') attributes = {} if not buffer.done(): attributes['source'] = buffer.read_bits(8) attributes['mapNamespace'] = buffer.read_bits(32) count = buffer.read_bits(32) attributes['scopes'] = {} while not buffer.done(): value = {} value['namespace'] = buffer.read_bits(32) value['attrid'] = attrid = buffer.read_bits(32) scope = buffer.read_bits(8) value['value'] = buffer.read_aligned_bytes(4)[::-1].strip(b'\x00') if not scope in attributes['scopes']: attributes['scopes'][scope] = {} if not attrid in attributes['scopes'][scope]: attributes['scopes'][scope][attrid] = [] attributes['scopes'][scope][attrid].append(value) return attributes
[ "def unpack(self, buff, offset=0):\n begin = offset\n for name, value in self.get_class_attributes():\n size = self._unpack_attribute(name, value, buff, begin)\n begin += size", "def decode(self, b):\n # Check what is included in this attributes file\n flags = int.from_bytes(b[0:4], byteorder='big', signed=False)\n\n # Use FILEXFER_names as a guide\n has_size = flags&1 == 1\n has_uidgid = flags&2 == 2\n has_permissions = flags&4 == 4\n has_acmodtime = flags&8 == 8\n has_extended = flags&16 == 16\n\n # Extract the easy ints\n i = 4\n if has_size:\n self.size = int.from_bytes(b[i:i+8], byteorder='big', signed=False)\n i += 8\n if has_uidgid:\n self.uid = int.from_bytes(b[i:i+4], byteorder='big', signed=False)\n self.gid = int.from_bytes(b[i+4:i+8], byteorder='big', signed=False)\n i += 8\n if has_permissions:\n self.permissions = int.from_bytes(b[i:i+4], byteorder='big', signed=False)\n i += 4\n if has_acmodtime:\n self.atime = int.from_bytes(b[i:i+4], byteorder='big', signed=False)\n self.mtime = int.from_bytes(b[i+4:i+8], byteorder='big', signed=False)\n i += 8\n\n # Get the extensions (if included)\n if has_extended:\n num_extensions = int.from_bytes(b[i:i+4], byteorder='big', signed=False)\n i+= 4\n\n for extension in range(1, num_extensions):\n string_len = int.from_bytes(b[i:i+4], byteorder='big', signed=False)\n type = b[i+4:i+4+string_len].decode(\"utf-8\")\n\n self.extended_type.append(type)\n\n i += 4 + string_len\n\n string_len = int.from_bytes(b[i:i + 4], byteorder='big', signed=False)\n data = b[i+4:i+4+string_len].decode(\"utf-8\")\n\n\n self.extended_data.append(data)\n\n i += 4 + string_len\n\n # Record byte length for parsing purposes\n self.byte_length = i", "def unpack(self, buff, offset=0):\n begin = offset\n for name, value in self.get_class_attributes():\n if type(value).__name__ != \"Header\":\n size = self._unpack_attribute(name, value, buff, begin)\n begin += size", "def parse_attrs(buf):\r\n attrs = []\r\n while buf:\r\n t = ord(buf[0])\r\n l = ord(buf[1])\r\n if l < 2:\r\n break\r\n d, buf = buf[2:l], buf[l:]\r\n attrs.append((t, d))\r\n return attrs", "def read(self, istream):\n super(GetAttributeListResponsePayload, self).read(istream)\n tstream = utils.BytearrayStream(istream.read(self.length))\n\n if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, tstream):\n uid = primitives.TextString(tag=enums.Tags.UNIQUE_IDENTIFIER)\n uid.read(tstream)\n self.uid = uid.value\n else:\n raise exceptions.InvalidKmipEncoding(\n \"expected uid encoding not found\")\n\n names = list()\n while(self.is_tag_next(enums.Tags.ATTRIBUTE_NAME, tstream)):\n name = primitives.TextString(tag=enums.Tags.ATTRIBUTE_NAME)\n name.read(tstream)\n names.append(name.value)\n self.attribute_names = names\n\n self.is_oversized(tstream)\n self.validate()", "def unpack(self, s):\n\n raise NotImplementedError()", "def _iterattrs(self, handle=\"\"):\n if not handle:\n handle = self.handle\n attr = gv.firstattr(handle)\n while gv.ok(attr):\n yield gv.nameof(attr), decode_page(gv.getv(handle, attr))\n attr = gv.nextattr(handle, attr)", "def decode(self, s: str) -> [str]:", "def loads(self, s):\n self._data = self.decoder.decode(s)", "def decode_attributes(response: dict):\n decoded = {}\n for var, attr in cls.DECODER_ATTRIBUTE_MAP.items():\n if var in response:\n decoded[attr] = response[var]\n\n content = json.loads(response[\"Content\"])\n decoded = {**decoded, **content}\n\n return decoded", "def decode(self):\n instr = self.fetch()", "def decode (cls, bytes, cmddict=None):\n byte = struct.unpack('B', bytes)[0]\n self = cls()\n defval = self.default\n\n for bit, name, value0, value1, default in SeqCmdAttrs.Table:\n mask = 1 << bit\n bitset = mask & byte\n defset = mask & defval\n if bitset != defset:\n if bitset:\n self.attrs[name] = value1\n else:\n self.attrs[name] = value0\n\n return self", "def read(self, istream):\n super(GetAttributeListResponsePayload, self).read(istream)\n tstream = utils.BytearrayStream(istream.read(self.length))\n\n if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, tstream):\n self._unique_identifier = primitives.TextString(\n tag=enums.Tags.UNIQUE_IDENTIFIER\n )\n self._unique_identifier.read(tstream)\n else:\n self._unique_identifier = None\n\n names = list()\n while self.is_tag_next(enums.Tags.ATTRIBUTE_NAME, tstream):\n name = primitives.TextString(tag=enums.Tags.ATTRIBUTE_NAME)\n name.read(tstream)\n names.append(name)\n self._attribute_names = names\n\n self.is_oversized(tstream)", "def decode(self, encoded):", "def parse_bytes(self, bytes_):\n for byte in bytes_:\n self.parse_byte(byte)", "def decode(self, data_gen):\n\t\tfor mb in data_gen:\n\t\t\tyield self.mojibake_to_bytes(mb)", "def decrypt_attr(data, key):\n data = MegaCrypto.base64_decode(data)\n k, iv, meta_mac = MegaCrypto.get_cipher_key(key)\n attr = MegaCrypto.cbc_decrypt(data, k)\n\n #: Data is padded, 0-bytes must be stripped\n return json.loads(\n re.search(r'{.+?}', attr).group(0)) if attr[:6] == 'MEGA{\"' else False", "def read(self,):\n b = bytearray()\n c = None\n while True:\n c = self.s.read(1)\n # If it's an escape character, get the next char\n if c == ESCAPE_CHAR:\n c = self.s.read(1)\n if c == '':\n return\n yield c\n continue\n\n if len(c) == 0 or c == REPLY_TERMINATOR:\n return\n yield c\n return", "def from_bytes(self, string):\n msg = srsly.msgpack_loads(gzip.decompress(string))\n self.attrs = msg[\"attrs\"]\n self.strings = set(msg[\"strings\"])\n lengths = numpy.fromstring(msg[\"lengths\"], dtype=\"int32\")\n flat_spaces = numpy.fromstring(msg[\"spaces\"], dtype=bool)\n flat_tokens = numpy.fromstring(msg[\"tokens\"], dtype=\"uint64\")\n shape = (flat_tokens.size // len(self.attrs), len(self.attrs))\n flat_tokens = flat_tokens.reshape(shape)\n flat_spaces = flat_spaces.reshape((flat_spaces.size, 1))\n self.tokens = NumpyOps().unflatten(flat_tokens, lengths)\n self.spaces = NumpyOps().unflatten(flat_spaces, lengths)\n for tokens in self.tokens:\n assert len(tokens.shape) == 2, tokens.shape\n return self" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the squareroot Wiener filter (WF) gain function.
def srwf(xi): return np.sqrt(wienergain(xi)) # SRWF gain function.
[ "def get_filter_gain(b, a, f_gain, fs):\n # Save the passband gain\n w, h = signal.freqz(b, a)\n w_gain = f_gain * 2 * np.pi / fs\n\n ind = np.where(w >= w_gain)[0][0]\n gain = abs(h[ind])\n\n return gain", "def gain(dB):\n return 10.**(dB/10.)", "def A_weighting(fs):\n b, a = A_weighting_analog()\n\n # Use the bilinear transformation to get the digital filter.\n return bilinear(b, a, fs)", "def compute_wf(self, state): \n\n # For the coarse Wigner function the dimension is that of the \n # underlying affine plane.\n W = np.zeros((self.coarse_field.dim, self.coarse_field.dim)) \n\n # Turn kets into density operators if need be.\n if state.shape[0] == 1: \n state = np.outer(state, np.conj(state)) \n \n # A sorted copy of the subfield for plotting\n sorted_els = sorted(self.coarse_field.elements)\n\n # The coarse Wigner function is indexed by the subfield, so use this.\n for alpha in self.subfield: \n for beta in self.subfield: \n coarse_point = (self.subfield_map[alpha], self.subfield_map[beta])\n mat = np.trace(np.dot(state, self.coarse_kernel[coarse_point]))\n \n # Determine where in the Wigner matrix to put this value\n a = sorted_els.index(coarse_point[0])\n b = sorted_els.index(coarse_point[1])\n\n W[a][b] = (1.0 / self.field.dim) * mat \n\n return W", "def butterworth_filter(freq):\n\tf_raw = 1/(0.00000002*100*33)\n\tb = np.array([[-32092,15750],[-31238,14895]])*2.0**(-14)\n\tomega = 2*np.pi*freq/f_raw\n\te1, e2 = np.exp(-1j*omega), np.exp(-2j*omega)\n\ttmp = (1+2*e1+e2)**2/(1+b[0,0]*e1+b[0,1]*e2)/(1+b[1,0]*e1+b[1,1]*e2)\n\treturn tmp * (1+sum(b[0]))*(1+sum(b[1]))/16", "def WB(x, Fs):\n\n N = len(x)\n w = np.hamming(N)\n\n x1 = x * w\n x1 = lfilter([1], [1., 0.63], x1)\n\n # Fs = spf.getframerate()\n ncoeff = 2 + Fs / 1000\n A = librosa.lpc(x1, int(ncoeff))\n rts = np.roots(A)\n rts = [r for r in rts if np.imag(r) >= 0]\n angz = np.arctan2(np.imag(rts), np.real(rts))\n frqs = angz * (Fs / (2 * math.pi))\n rts = np.array(rts)\n bw = -1 / 2 * (Fs / (2 * np.pi)) * np.log(abs(rts))\n\n frqs = [round(frqs[i], 2) for i in range(0, len(frqs))]\n frqs = sorted(frqs)\n bw = [x for _, x in sorted(zip(frqs, bw))]\n F1, F2 = frqs[1], frqs[2]\n bwF1, bwF2 = bw[1], bw[2]\n # print(bwF1)\n # print(bwF2)\n if bwF1 > 400 and bwF2 > 600:\n return True\n return False", "def calc_weight(self):\n self.weight = (self.Profile.Atot*1e-6 * self.lStiff*1e-3\n * self.Material.density\n )\n pass", "def sharpness_penalty(self):\n # This polynomial function gives the gain for peaking filter which achieves 18 dB / octave max derivative\n # The polynomial estimate is accurate in the vicinity of 18 dB / octave\n gain_limit = -0.09503189270199464 + 20.575128011847003 * (1 / self.q)\n # Scaled sigmoid function as penalty coefficient\n x = self.gain / gain_limit - 1\n sharpness_penalty_coefficient = 1 / (1 + np.e ** (-x * 100))\n return np.mean(np.square(self.fr * sharpness_penalty_coefficient))", "def sharpen_weights(w, gamma):\n\n w = w**gamma\n w /= np.sum(w)\n\n return w", "def Equ_wave (previous_U):\n return lambda U: (U-previous_U)/DELTA_t+G((U + previous_U)/2)", "def weighting(wb, m, a):\n s = control.tf([1, 0], [1])\n return (s/m + wb) / (s + wb*a)", "def band_penalty(self):\n fc_ix = np.argmin(np.abs(self.f - self.fc)) # Index to frequency array closes to center frequency\n # Number of indexes on each side of center frequency, not extending outside, only up to 10 kHz\n n = min(fc_ix, self.ix10k - fc_ix)\n if n == 0:\n return 0.0\n return np.mean(np.square(self.fr[fc_ix - n:fc_ix] - (self.gain - self.fr[fc_ix + n - 1:fc_ix - 1:-1])))", "def evolve_fqe_givens(wfn: Wavefunction, u: np.ndarray) -> Wavefunction:\n wfn = evolve_fqe_givens_sector(wfn, u, sector='alpha')\n wfn = evolve_fqe_givens_sector(wfn, u, sector='beta')\n return wfn", "def blackbody_flux_wavelength(T, wvlngth):\n\n wvlngth = wvlngth*(1.0E-9) #convert from nm to m\n flux = 2.0*H*C**2.0/wvlngth**5.0*1.0/(exp(H*C/(wvlngth*KB*T))-1.0)\n flux = flux*(1.0E-9) #convert back to nm-1\n return flux*pi", "def gaussbroad(w, s, hwhm):\n \"\"\"\n History\n --------\n Dec-90 GB,GM\n Rewrote with fourier convolution algorithm.\n Jul-91 AL\n Translated from ANA to IDL.\n 22-Sep-91 JAV\n Relaxed constant dispersion check# vectorized, 50% faster.\n 05-Jul-92 JAV\n Converted to function, handle nonpositive hwhm.\n Oct-18 AW\n Python version\n \"\"\"\n\n # Warn user if hwhm is negative.\n if hwhm < 0:\n logger.warning(\"Forcing negative smoothing width to zero.\")\n\n # Return input argument if half-width is nonpositive.\n if hwhm <= 0:\n return s # true: no broadening\n\n # Calculate (uniform) dispersion.\n nw = len(w) ## points in spectrum\n wrange = w[-1] - w[0]\n dw = wrange / (nw - 1) # wavelength change per pixel\n\n # Make smoothing gaussian; extend to 4 sigma.\n # 4.0 / sqrt(2.0 * alog(2.0)) = 3.3972872\n # sqrt(alog(2.0)) = 0.83255461\n # sqrt(alog(2.0) / pi) = 0.46971864\n # (*1.0000632 to correct for >4 sigma wings)\n if hwhm >= 5 * wrange:\n return np.full(nw, np.sum(s) / nw)\n ## points in half gaussian\n nhalf = int(3.3972872 * hwhm / dw)\n ## points in gaussian (odd!)\n ng = 2 * nhalf + 1\n # wavelength scale of gaussian\n wg = dw * (np.arange(ng, dtype=float) - (ng - 1) / 2)\n # convenient absisca\n xg = (0.83255461 / hwhm) * wg\n # unit area gaussian w / FWHM\n gpro = (0.46974832 * dw / hwhm) * np.exp(-xg * xg)\n gpro = gpro / np.sum(gpro)\n\n # Pad spectrum ends to minimize impact of Fourier ringing.\n sout = convolve(s, gpro, mode=\"nearest\")\n\n return sout", "def landweber(Gw, uw, tau, f, eq_shift):\n\n # We have to shift fw in time to account for the negative part\n # time values in the signal\n gp1 = f + tau * np.real(ifft((np.conj(Gw) * eq_shift * (uw - Gw * fft(\n f)))))\n\n return gp1", "def generate_coefficients(self):\n A = 10**(self.G/40.0)\n w0 = 2.0 * np.pi * (self.fc / self.rate)\n alpha = np.sin(w0) / (2.0 * self.Q)\n\n if self.filter_type == 'high_shelf':\n b0 = A * ( (A+1) + (A-1) * np.cos(w0) + 2 * np.sqrt(A) * alpha )\n b1 = -2 * A * ( (A-1) + (A+1) * np.cos(w0) )\n b2 = A * ( (A+1) + (A-1) * np.cos(w0) - 2 * np.sqrt(A) * alpha )\n a0 = (A+1) - (A-1) * np.cos(w0) + 2 * np.sqrt(A) * alpha\n a1 = 2 * ( (A-1) - (A+1) * np.cos(w0) )\n a2 = (A+1) - (A-1) * np.cos(w0) - 2 * np.sqrt(A) * alpha\n elif self.filter_type == 'low_shelf':\n b0 = A * ( (A+1) - (A-1) * np.cos(w0) + 2 * np.sqrt(A) * alpha )\n b1 = 2 * A * ( (A-1) - (A+1) * np.cos(w0) )\n b2 = A * ( (A+1) - (A-1) * np.cos(w0) - 2 * np.sqrt(A) * alpha )\n a0 = (A+1) + (A-1) * np.cos(w0) + 2 * np.sqrt(A) * alpha\n a1 = -2 * ( (A-1) + (A+1) * np.cos(w0) )\n a2 = (A+1) + (A-1) * np.cos(w0) - 2 * np.sqrt(A) * alpha\n elif self.filter_type == 'high_pass':\n b0 = (1 + np.cos(w0))/2\n b1 = -(1 + np.cos(w0))\n b2 = (1 + np.cos(w0))/2\n a0 = 1 + alpha\n a1 = -2 * np.cos(w0)\n a2 = 1 - alpha\n elif self.filter_type == 'low_pass':\n b0 = (1 - np.cos(w0))/2\n b1 = (1 - np.cos(w0))\n b2 = (1 - np.cos(w0))/2\n a0 = 1 + alpha\n a1 = -2 * np.cos(w0)\n a2 = 1 - alpha\n elif self.filter_type == 'peaking':\n b0 = 1 + alpha * A\n b1 = -2 * np.cos(w0)\n b2 = 1 - alpha * A\n a0 = 1 + alpha / A\n a1 = -2 * np.cos(w0)\n a2 = 1 - alpha / A\n elif self.filter_type == 'notch':\n b0 = 1 \n b1 = -2 * np.cos(w0)\n b2 = 1\n a0 = 1 + alpha\n a1 = -2 * np.cos(w0)\n a2 = 1 - alpha\n elif self.filter_type == 'high_shelf_DeMan':\n K = np.tan(np.pi * self.fc / self.rate) \n Vh = np.power(10.0, self.G / 20.0)\n Vb = np.power(Vh, 0.499666774155)\n a0_ = 1.0 + K / self.Q + K * K\n b0 = (Vh + Vb * K / self.Q + K * K) / a0_\n b1 = 2.0 * (K * K - Vh) / a0_\n b2 = (Vh - Vb * K / self.Q + K * K) / a0_\n a0 = 1.0\n a1 = 2.0 * (K * K - 1.0) / a0_\n a2 = (1.0 - K / self.Q + K * K) / a0_\n elif self.filter_type == 'high_pass_DeMan':\n K = np.tan(np.pi * self.fc / self.rate)\n a0 = 1.0\n a1 = 2.0 * (K * K - 1.0) / (1.0 + K / self.Q + K * K)\n a2 = (1.0 - K / self.Q + K * K) / (1.0 + K / self.Q + K * K)\n b0 = 1.0\n b1 = -2.0\n b2 = 1.0\n else:\n raise ValueError(\"Invalid filter type\", self.filter_type) \n\n self.b, self.a = np.array([b0, b1, b2])/a0, np.array([a0, a1, a2])/a0\n self.reset_state()", "def get_weight(ew1, ew2):\n dw = flu.delta_epiweeks(ew1, ew2)\n yr = 52.2\n hl1, hl2, bw = yr, 1, 4\n a = 0.05\n #b = (np.cos(2 * np.pi * (dw / yr)) + 1) / 2\n b = np.exp(-((min(dw % yr, yr - dw % yr) / bw) ** 2))\n c = 2 ** -(dw / hl1)\n d = 1 - 2 ** -(dw / hl2)\n return (a + (1 - a) * b) * c * d", "def A_weight(signal, fs):\n\n b, a = A_weighting(fs)\n return lfilter(b, a, signal)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the xpath to user folder link
def get_user_folder_link_xpath(): return links['users_folder'].get('folder_xpath')
[ "def get_enrolment_methods_link_xpath():\n return links['users_folder']['enrolment_link'].get('xpath')", "def get_home_page_link_xpath():\n return links['home_page_link'].get('xpath')", "def _getLDAPUserFolder(self, user):\n return getattr(user.acl_users, self.id)._getLDAPUserFolder()", "def get_home_dir(user):\n username = user.username\n root_dir = File.objects.filter(parent=None)[0]\n user_dir = root_dir.parent_node.filter(name='user')[0]\n home_dir = user_dir.parent_node.filter(name=username)[0]\n return home_dir", "def getRootPath(self, _pRootPath):\r\n resp = self.do_request('GET', \"/\" + _pRootPath + \"/\")\r\n status = resp.status\r\n if status == 401:\r\n raise ValueError(\"Authentication failed. Exiting!\")\r\n elif status == 404:\r\n raise ValueError(\"Exchange user path doesn't exist. Exiting!\")\r\n elif status == 200:\r\n pass\r\n else:\r\n raise ValueError(\"Status = %s. Exiting!\" % status)\r\n\r\n # get the respons text\r\n text = resp.read()\r\n\r\n # find the content of the <BASE href=\"...\"> element\r\n # ^^^ \r\n a = re.compile('<BASE href=\"([^\"]+)\">')\r\n m = a.search(text)\r\n\r\n if not m:\r\n raise ValueError(\"Could not find <BASE href=\\\"..\\\"> tag. Exiting!\")\r\n \r\n return m.group(1)", "def GetHomeFolder(self): # real signature unknown; restored from __doc__\n pass", "def foaflink(username):\n return userlink(username) + 'data/foaf'", "def userHomeLinkURL(self):\r\n member = self.portalState.member()\r\n userid = member.getId()\r\n return \"%s/author/%s\" % (self.portalState.navigation_root_url(), userid)", "def get_user_path(user, exact=False):\n FM_ROOT = settings.FILE_MANAGER_ROOT\n if user.is_superuser and not exact:\n return FM_ROOT\n else:\n return os.path.join(FM_ROOT, user.username)", "def folder_path(self) -> str:\n return pulumi.get(self, \"folder_path\")", "def user_directories():\r\n section = document.add_section()\r\n new_width, new_height = section.page_height, section.page_width\r\n section.orientation = WD_ORIENT.LANDSCAPE\r\n section.page_width = 10058400\r\n section.page_height = 7772400\r\n document.add_heading('User Directories', level=1)\r\n userdirectories = get_qlik_sense.get_userdirectory()\r\n num_of_udc = len(userdirectories)\r\n table = document.add_table(rows=num_of_udc+1, cols=6)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'name'\r\n row.cells[1].text = 'userDirectoryName'\r\n row.cells[2].text = 'configured'\r\n row.cells[3].text = 'operational'\r\n row.cells[4].text = 'type'\r\n row.cells[5].text = 'syncOnlyLoggedInUsers'\r\n for directory in range(num_of_udc):\r\n row = table.rows[directory+1]\r\n row.cells[0].text = str(userdirectories[directory][0])\r\n row.cells[1].text = str(userdirectories[directory][1])\r\n row.cells[2].text = str(userdirectories[directory][2])\r\n row.cells[3].text = str(userdirectories[directory][3])\r\n row.cells[4].text = str(userdirectories[directory][4])\r\n row.cells[5].text = str(userdirectories[directory][5])\r\n\r\n # document.add_page_break()\r", "def build_user_paths():\n for path in USER_DIRLIST:\n if os.path.islink(path):\n pass # ok\n elif not os.path.isdir(path):\n os.makedirs(path)", "def _get_xpath(elem, root_xpath):\n if elem.classes._get_class_value():\n return '//%s[@class=\"%s\"]' % (elem.tag, elem.classes._get_class_value())\n else:\n return '%s/%s' % (root_xpath, str(elem.tag))", "def get_main_courses_link_xpath():\n return links['main_courses_page_link'].get('xpath')", "def list_user_folders(self, token):\n parameters = dict()\n parameters['token'] = token\n response = self.request('midas.user.folders', parameters)\n return response", "def getLoginLink(self):\r\n if users.get_current_user():\r\n url = users.create_logout_url(self.request.uri)\r\n url_linktext = 'Logout'\r\n else:\r\n url = users.create_login_url(self.request.uri)\r\n url_linktext = 'Login'\r\n return (url, url_linktext)", "def userFolder():\n #path=os.path.abspath(tempfile.gettempdir()+\"/swhlab/\")\n #don't use tempdir! it will get deleted easily.\n path=os.path.expanduser(\"~\")+\"/.swhlab/\" # works on windows or linux\n # for me, path=r\"C:\\Users\\swharden\\.swhlab\"\n if not os.path.exists(path):\n print(\"creating\",path)\n os.mkdir(path)\n return os.path.abspath(path)", "def get_absolute_url(self) -> str:\n return \"/users/%s/\" % self.email", "def test_getLinkrelToParentDirectory(self):\n linkrel = self.builder.getLinkrel(FilePath(\"/foo\"),\n FilePath(\"/foo/bar\"))\n self.assertEquals(linkrel, \"../\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the xpath to enrolment methods link
def get_enrolment_methods_link_xpath(): return links['users_folder']['enrolment_link'].get('xpath')
[ "def get_main_courses_link_xpath():\n return links['main_courses_page_link'].get('xpath')", "def clickOnEnrollButton(self):\n self.elementClick(locator=self._enroll_button)", "def functionURI(self):\n ret = libxml2mod.xmlXPathGetFunctionURI(self._o)\n return ret", "def get_xpath_next_button(self) -> str:\n\n return self.__xpath_next_button", "def getLink(self):", "def get_xpath_reservation_button(self) -> str:\n\n return self.__xpath_reservation_button", "def get_resume_link(self, element, **kwargs):", "def get_accomplishment_link(element):\n try:\n return element.find_element_by_class_name(\n \"pv-accomplishment-entity__external-source\"\n ).get_attribute(\"href\")\n except NoSuchElementException:\n return \"\"", "def navigate_to():\n return Navi.navigate_to(\"XML Repoll Files\")", "def etlWorkflowUrl(self):\n return self.sdaUrl + \"/workflows/_etl\"", "def oed_url(self):\n return 'http://www.oed.com/view/th/class/%d' % self.id", "def test_view_enabled(self, method, url):\n with override_waffle_switch(COURSE_ENROLLMENT_ADMIN_SWITCH, active=True):\n response = getattr(self.client, method)(url)\n assert response.status_code == 200", "def navigate_to():\n return Navi.navigate_to(\"till audit\")", "def test_getLinkZopeAdapterRegistry(self):\n sut = self.getInventoryWithZope()\n\n result = sut.getLink(\"zope.interface.adapter.AdapterRegistry\")\n\n self.assertEqual(\"https://zope.tld/adapter.html\", result)", "def get_home_page_link_xpath():\n return links['home_page_link'].get('xpath')", "def _get_xpath(elem, root_xpath):\n if elem.classes._get_class_value():\n return '//%s[@class=\"%s\"]' % (elem.tag, elem.classes._get_class_value())\n else:\n return '%s/%s' % (root_xpath, str(elem.tag))", "def actionURL(self):\n raise NotImplementedError()", "def clickEnrollSubmitButton(self):\n self.elementClick(locator=self._submit_enroll, locatorType=\"xpath\")", "def get_xpath(xpath=\"\"):\n query = {\"type\": \"config\", \"action\": \"get\", \"xpath\": xpath}\n\n return __proxy__[\"panos.call\"](query)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a custom boolean operator. This method is shorthand for calling
def bool_op( self, opstring: str, precedence: int = 0, python_impl: Optional[Callable[..., Any]] = None, ) -> Callable[[Any], Operators]: return self.op( opstring, precedence=precedence, is_comparison=True, python_impl=python_impl, )
[ "def __bool__(self):\n if self.operator in (operators.eq, operators.ne):\n # this is using the eq/ne operator given int hash values,\n # rather than Operator, so that \"bool\" can be based on\n # identity\n return self.operator(*self._orig) # type: ignore\n else:\n raise TypeError(\"Boolean value of this clause is not defined\")", "def logical_operator(self):\n return self._logical_operator", "def is_binary_operator(oper):\n # definition:\n # memeber in class\n # ret-type operator symbol(arg)\n # globally\n # ret-type operator symbol( arg1, arg2 )\n symbols = [\n ',', '()', '[]', '!=', '%', '%=', '&', '&&', '&=', '*', '*=', '+',\n '+=', '-', '-=', '->', '->*', '/', '/=', '<', '<<', '<<=', '<=', '=',\n '==', '>', '>=', '>>', '>>=', '^', '^=', '|', '|=', '||']\n if not isinstance(oper, calldef.operator_t):\n return False\n if oper.symbol not in symbols:\n return False\n if isinstance(oper, calldef.member_operator_t):\n if 1 == len(oper.arguments):\n return True\n else:\n return False\n else:\n if 2 == len(oper.arguments):\n return True\n else:\n return False", "def visit_BoolOp(self, node):\n raise NotImplementedError(\"Boolean operations are not supported: \"\n \"Line Number: {} Column Offset: \"\n \"{}\".format(node.lineno, node.col_offset))", "def is_operator(self):\n\n return self.get_type() == TOKEN_TYPE_OPERATOR", "def isOperator(self, *args):\n return _libsbml.ASTBasePlugin_isOperator(self, *args)", "def on_true(self) -> global___Expression:", "def is_operator(cls, method_name):\n try:\n getattr(cls, method_name)\n except Exception:\n return False\n return Scenario.meta(cls, \"operator\", method_name, default=False)", "def __or__(self, obj):\n return self._boolean_operation(obj, operator.__or__)", "def pl_true(exp, model={}):\n op, args = exp.op, exp.args\n if exp == TRUE:\n return True\n elif exp == FALSE:\n return False\n elif is_prop_symbol(op):\n return model.get(exp)\n elif op == '~':\n p = pl_true(args[0], model)\n if p is None: return None\n else: return not p\n elif op == '|':\n result = False\n for arg in args:\n p = pl_true(arg, model)\n if p is True: return True\n if p is None: result = None\n return result\n elif op == '&':\n result = True\n for arg in args:\n p = pl_true(arg, model)\n if p is False: return False\n if p is None: result = None\n return result\n p, q = args\n if op == '>>':\n return pl_true(~p | q, model)\n elif op == '<<':\n return pl_true(p | ~q, model)\n pt = pl_true(p, model)\n if pt is None: return None\n qt = pl_true(q, model)\n if qt is None: return None\n if op == '<=>':\n return pt == qt\n elif op == '^':\n return pt != qt\n else:\n raise ValueError, \"illegal operator in logic expression\" + str(exp)", "def isOperator(self):\n return _libsbml.ASTNode_isOperator(self)", "def test04_boolean_operator(self):\n\n import _cppyy\n number = _cppyy.gbl.number\n\n n = number(20)\n assert n\n\n n = number(0)\n assert not n", "def _true(*args, **kwargs):\n return True", "def _eq(self, value: Any) -> bool:\n return value == self.op", "def is_operator(formula):\n return is_binary_operator(formula) or isinstance(formula, Not)", "def operator(self) -> Optional[LogicalOperator]:\n return self.__operator", "def __and__(self, obj):\n return self._boolean_operation(obj, operator.__and__)", "def match_operator(self, operator: str) -> bool:\n if self.is_operator(operator):\n self.next()\n return True\n else:\n return False", "def operator_present(input_str): # HELPER\n operator_list = ['+','-','*','/','**','<<','>>']\n\n if input_str in operator_list:\n return True\n else: return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Implement the ``<`` operator. In a column context, produces the clause ``a < b``.
def __lt__(self, other: Any) -> ColumnOperators: return self.operate(lt, other)
[ "def __lt__(self, other):\n return self.calculate(lt, other)", "def __lt__(self, other):\n return OPERATOR_MAP[self.value][1] < OPERATOR_MAP[other.value][1]", "def lt(x, y):\n return x < y", "def __lt__(self, other_feature_or_val):\n from featuretools.primitives import LessThan\n return LessThan(self, other_feature_or_val)", "def __gt__(self, other: Any) -> ColumnOperators:\n return self.operate(gt, other)", "def __lt__(self, other):\n return self.x > other.x", "def less_than(self, value):\n\n return self._add_condition(f\"<{value}\", lambda other: other < value)", "def __lt__(self, other):\n return self.element() < other.element()", "def test_less_than(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"lessThan\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::lt\"},\n )", "def __lt__(self, rhs):\n return _table.Connection___lt__(self, rhs)", "def less(x1, x2):\n return compare_chararrays(x1, x2, '<', True)", "def __lt__(self, other):\n pass", "def lt(self, e1, e2):\n return self._poset.lt(e1, e2)", "def __lt__(self, other) -> bool:\n return self.result < other", "def __lt__(self, other):\n\n return self.points_function(self, other)", "def less_than(field: str, value: Any) -> Expression:\n return Expression(_criterion(field, \"lessThan\", value))", "def __lt__(self, other):\n return self.sortval < other.sortval", "def _less_than_op(spec):", "def test_less_than_bcast(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"lessThan\"),\n torch.randn(3, 4, 5),\n torch.randn(4, 5),\n fusible_ops={\"aten::lt\"},\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Implement the ``==`` operator. In a column context, produces the clause ``a = b``. If the target is ``None``, produces ``a IS NULL``.
def __eq__(self, other: Any) -> ColumnOperators: # type: ignore[override] return self.operate(eq, other)
[ "def sql_equals(self):\n return getattr(self.manager.modelclass, self.manager.column) == self.value", "def equals(field: str, value: Any) -> Expression:\n return Expression(_criterion(field, \"equals\", value))", "def __eq__(self, rhs):\n return _table.Connection___eq__(self, rhs)", "def _eq(self, value: Any) -> bool:\n return value == self.op", "def __eq__(self, other):\n if (self.table_name == other.table_name and\n self.column_name == other.column_name and\n self.fk_column == other.fk_column):\n return True\n return False", "def __eq__(self, other):\n if isinstance(self, VariableExpression) and \\\n isinstance(other, VariableExpression):\n return self.variable == other.variable\n else:\n return False", "def __eq__(self, other):\n return ZeroaryOperator.__eq__(self, other) and \\\n self.relation_key == other.relation_key", "def __eq__(self, other):\n return self.sql_id == other.sql_id", "def test_equality_method(self):\r\n wc1 = WhereClause('a', EqualsOperator(), 'c')\r\n wc2 = WhereClause('a', EqualsOperator(), 'c')\r\n assert wc1 == wc2", "def test_query_filter_strategy_eq(self):\n filter_ = Filter('', [], ColumnType('name', Person, None), ('eq', ['Fred']))\n\n models = self.session.query(Person).apply_filter(filter_).all()\n self.assertTrue(len(models) == 1)\n self.assertTrue(models[0].name == 'Fred')", "def __eq__(self, other):\n return isinstance(other, Expr) and self.op == other.op and self.args == other.args", "def _logical_equal(x, y):\n x_ = _static_value(x)\n y_ = _static_value(y)\n if x_ is None or y_ is None:\n return math_ops.equal(x, y)\n return constant_op.constant(np.array_equal(x_, y_))", "def test_columnComparison(self):\n self.assertEquals(\n Select(\n From=self.schema.FOO,\n Where=self.schema.FOO.BAR == self.schema.FOO.BAZ\n ).toSQL(),\n SQLFragment(\"select * from FOO where BAR = BAZ\", [])\n )", "def equals(self, val, target = None):\n\n if not target:\n target = self\n\n if isinstance(val, (np.int32, np.float32, int, float)):\n err_code = _cudanet.equals_scalar(self.p_mat, ct.c_float(val), target.p_mat)\n else:\n err_code = _cudanet.equals(self.p_mat, val.p_mat, target.p_mat)\n\n if err_code:\n raise generate_exception(err_code)\n\n return target", "def dialect_eq(lhs, rhs):\n # type: (csv.Dialect, csv.Dialect) -> bool\n return (lhs.delimiter == rhs.delimiter and\n lhs.quotechar == rhs.quotechar and\n lhs.doublequote == rhs.doublequote and\n lhs.escapechar == rhs.escapechar and\n lhs.quoting == rhs.quoting and\n lhs.skipinitialspace == rhs.skipinitialspace)", "def test_custom_eq():\n class Model(BaseModel):\n id = Column(Integer, hash_key=True)\n\n def __eq__(self, other):\n return self.id == other.id\n same = Model(id=3)\n other = Model(id=3)\n assert same == other\n assert Model.__hash__ is object.__hash__", "def __eq__(self,other):\n\t\tif other != None:\n\t\t\treturn self.id==other.id and \\\n\t\t\t\t self.length == other.length and \\\n\t\t\t\t self.value==other.value\n\t\telse:\n\t\t\treturn False", "def __eq__(self, other):\n if not isinstance(other, Expression):\n return False\n\n return self.evaluate() == other.evaluate()", "def test_columnEqualityTruth(self):\n s = self.schema\n self.assertEquals(bool(s.FOO.BAR == s.FOO.BAR), True)\n self.assertEquals(bool(s.FOO.BAR != s.FOO.BAR), False)\n self.assertEquals(bool(s.FOO.BAZ != s.FOO.BAR), True)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Implement the ``IS DISTINCT FROM`` operator. Renders "a IS DISTINCT FROM b" on most platforms; on some such as SQLite may render "a IS NOT b".
def is_distinct_from(self, other: Any) -> ColumnOperators: return self.operate(is_distinct_from, other)
[ "def is_not_distinct_from(self, other: Any) -> ColumnOperators:\n return self.operate(is_not_distinct_from, other)", "def test_distinct(self):\n self.assertEquals(\n Select(\n [self.schema.FOO.BAR],\n From=self.schema.FOO,\n Distinct=True\n ).toSQL(),\n SQLFragment(\"select distinct BAR from FOO\")\n )", "def is_distinct(value, *args):\n return value not in args", "def distinct_sql(self, fields):\n if fields:\n raise NotImplementedError('DISTINCT ON fields is not supported by this database backend')\n else:\n return 'DISTINCT'", "def isdistinct(seq):\n return len(seq) == len(set(seq))", "def isdistinct(token):\n\n # Token is the distinct keyword\n return token and token.lower() in Token.DISTINCT", "def is_pairwise_distinct(*args, **kw):\n # this gives the same result as: distinct_values(args, None)\n # it's probably faster to use a builtin...\n #return len(set(args)) == len(args)\n ## even through the following may do fewer tests:\n #for i in xrange(len(args) - 1):\n # if args[i] in args[i + 1:]: return False\n #return True\n return seq_all_different(args, **kw)", "def test_distinct(self):\n self.Person(name=\"Mr Orange\", age=20).save()\n self.Person(name=\"Mr White\", age=20).save()\n self.Person(name=\"Mr Orange\", age=30).save()\n self.Person(name=\"Mr Pink\", age=30).save()\n assert set(self.Person.objects.distinct(\"name\")) == {\n \"Mr Orange\",\n \"Mr White\",\n \"Mr Pink\",\n }\n assert set(self.Person.objects.distinct(\"age\")) == {20, 30}\n assert set(self.Person.objects(age=30).distinct(\"name\")) == {\n \"Mr Orange\",\n \"Mr Pink\",\n }", "def distinct(self):\n return DistinctQuery(self)", "def test_no_duplicates(self):\n with Historical_ROAs_Table() as t:\n sql = f\"SELECT DISTINCT({','.join(t.columns[:-1])}) FROM {t.name}\"\n distinct = len(t.execute(sql))\n sql = f\"SELECT * FROM {t.name}\"\n assert len(t.execute(sql)) == distinct", "def test_select_distinct(self):\n p = tensor.fmatrix()\n u = tensor.fvector()\n n = tensor.iscalar()\n m = multinomial.MultinomialWOReplacementFromUniform('auto')(p, u, n)\n\n f = function([p, u, n], m, allow_input_downcast=True)\n\n n_elements = 1000\n all_indices = range(n_elements)\n np.random.seed(12345)\n for i in [5, 10, 50, 100, 500, n_elements]:\n uni = np.random.rand(i).astype(config.floatX)\n pvals = np.random.randint(1, 100, (1, n_elements)).astype(config.floatX)\n pvals /= pvals.sum(1)\n res = f(pvals, uni, i)\n res = np.squeeze(res)\n assert len(res) == i, res\n assert np.all(np.in1d(np.unique(res), all_indices)), res", "def test_select_distinct(self):\n th_rng = RandomStreams(12345)\n\n p = tensor.fmatrix()\n n = tensor.iscalar()\n m = th_rng.multinomial_wo_replacement(pvals=p, n=n)\n\n f = function([p, n], m, allow_input_downcast=True)\n\n n_elements = 1000\n all_indices = range(n_elements)\n np.random.seed(12345)\n for i in [5, 10, 50, 100, 500, n_elements]:\n pvals = np.random.randint(1, 100, (1, n_elements)).astype(config.floatX)\n pvals /= pvals.sum(1)\n res = f(pvals, i)\n res = np.squeeze(res)\n assert len(res) == i\n assert np.all(np.in1d(np.unique(res), all_indices)), res", "def is_distinct(n):\n nstr = str(n)\n return len(nstr) == len(set(nstr))", "def single_distinct(self, table_to_select, table_data):\n\t\theader = ''\n\t\tcolumn_data = {}\n\t\tdata = []\n\n\t\ttable_needed, column = self.search_column(self.distinct_process[0], table_to_select)\n\t\theader += table_needed + '.' + column + ', '\n\t\tprint '%9s' % (table_needed + '.' + column),\n\t\tfor i in self.columns:\n\t\t\tprint '%9s' %i,\n\t\tprint\n\n\t\tremaining_data = []\n\n\t\tfor j in table_data:\n\t\t\tvalue = j[self.schema[table_needed].index(self.distinct_process[0])]\n\t\t\t# print value\n\t\t\ttemp_remaining_data = []\n\t\t\tif value not in data:\n\t\t\t\tdata.append(value)\n\t\t\t\tprint '%9s' %(value),\n\t\t\t\tfor k in self.columns:\n\t\t\t\t\ttemp_remaining_data.append(j[self.schema[table_needed].index(k)])\n\t\t\t\t\tprint '%9s' %j[self.schema[table_needed].index(k)],\n\t\t\t\tprint\n\t\t\t\tremaining_data.append(temp_remaining_data)\n\n\n\t\t\t# SUPPOSE IM HAVING A QUERY SELECT DISTINCT(A), B FROM TABLE1, \n\t\t\t# I NEED TO TAKE RECORDS WHICH HAVE REDUNDANT A's BUT BASICALLY DIFFERENT B'S\n\t\t\t# THE FOLLOWING CODE DOES THAT\n\t\t\telse:\n\t\t\t\tcount = 0\n\t\t\t\tflag = False\n\t\t\t\tif len(self.columns) > 0:\n\t\t\t\t\tfor l in self.columns:\n\t\t\t\t\t\ttempvar = j[self.schema[table_needed].index(l)]\n\t\t\t\t\t\ttemp_remaining_data.append(j[self.schema[table_needed].index(l)])\n\t\t\t\t\t\tif count == 0:\n\t\t\t\t\t\t\tfor k in range(len(remaining_data)):\n\t\t\t\t\t\t\t\tif tempvar == remaining_data[k][count]:\n\t\t\t\t\t\t\t\t\tflag = True\n\t\t\t\t\t\tcount += 1\n\t\t\t\t\tif flag == False:\n\t\t\t\t\t\tdata.append(value)\n\t\t\t\t\t\tprint '%9s' %value,\n\t\t\t\t\t\tremaining_data.append(temp_remaining_data)\n\t\t\t\t\t\tfor i in temp_remaining_data:\n\t\t\t\t\t\t\tprint '%9s' %i,\n\t\t\t\t\t\tprint", "def __IsMultiQuery(self, condition):\n return condition.lower() in ('in', '!=')", "def distinct(self):\n return Query(self.as_set)", "def test_distinct(self):\n pkgs = [\n make_package(factory=DynamoPackage),\n make_package(version=\"1.3\", filename=\"mypath3\", factory=DynamoPackage),\n make_package(\"mypkg2\", \"1.3.4\", \"my/other/path\", factory=DynamoPackage),\n ]\n self._save_pkgs(*pkgs)\n saved_pkgs = self.db.distinct()\n self.assertCountEqual(saved_pkgs, set([p.name for p in pkgs]))", "def is_unique(self):\n raise NotImplementedError(\n 'operation is_unique(...) not yet implemented')", "def isdistinct(coll):\n most_common = collections.Counter(coll).most_common(1)\n return not most_common[0][1] > 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Implement the ``IS NOT DISTINCT FROM`` operator. Renders "a IS NOT DISTINCT FROM b" on most platforms; on some such as SQLite may render "a IS b".
def is_not_distinct_from(self, other: Any) -> ColumnOperators: return self.operate(is_not_distinct_from, other)
[ "def is_distinct_from(self, other: Any) -> ColumnOperators:\n return self.operate(is_distinct_from, other)", "def is_distinct(value, *args):\n return value not in args", "def __invert__(self):\n return NotAny(self)", "def test_distinct(self):\n self.assertEquals(\n Select(\n [self.schema.FOO.BAR],\n From=self.schema.FOO,\n Distinct=True\n ).toSQL(),\n SQLFragment(\"select distinct BAR from FOO\")\n )", "def __ne__(self, nqubit):\n\n return not self == nqubit", "def isNot(self, value):\n newq = self.copy()\n newq.setOp(Query.Op.IsNot)\n newq.setValue(value)\n return newq", "def not1(a):\n \n return ~a", "def __ne__(self, other):\n return not_equal(self, other)", "def filter_neq(self, value):\n return self.filter(lambda x: x != value)", "def __ne__(self, other):\n return not self._field1 == other._field1", "def __ne__(self, other):\r\n\t\treturn (self.type != other.type or self.value != other.value)", "def are_not_equal(value1, value2):\n return not ObjectComparator.are_equal(value1, value2)", "def not_equal_to(self, value):\n\n return self._add_condition(f\"!= {value}\", lambda other: other != value)", "def test_not_equal(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"notEqual\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::ne\"},\n )", "def distinct_sql(self, fields):\n if fields:\n raise NotImplementedError('DISTINCT ON fields is not supported by this database backend')\n else:\n return 'DISTINCT'", "def __ne__(self, other):\r\n return not (self._field1 == other._field1)", "def CNOT(p, q, *args, **kwargs):\n p.cnot(q, *args, **kwargs); return p, q", "def not_in(self, other: Any) -> ColumnOperators:\n return self.operate(not_in_op, other)", "def __ne__(self, other_feature_or_val):\n from featuretools.primitives import NotEquals\n return NotEquals(self, other_feature_or_val)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Implement the ``>`` operator. In a column context, produces the clause ``a > b``.
def __gt__(self, other: Any) -> ColumnOperators: return self.operate(gt, other)
[ "def __lt__(self, other: Any) -> ColumnOperators:\n return self.operate(lt, other)", "def test_greater_than(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"greaterThan\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::gt\"},\n )", "def __gt__(self, other):\n return greater(self, other)", "def greater_than(field: str, value: Any) -> Expression:\n return Expression(_criterion(field, \"greaterThan\", value))", "def greater_than(self, value):\n\n return self._add_condition(f\">{value}\", lambda other: other > value)", "def _greater_than_op(spec):", "def greater(x1, x2):\n return compare_chararrays(x1, x2, '>', True)", "def test_greater_than_bcast(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"greaterThan\"),\n torch.randn(3, 4, 5),\n torch.randn(4, 5),\n fusible_ops={\"aten::gt\"},\n )", "def greater(x1, x2, out=None):\n return _ufunc_helper(x1, x2, _npi.greater, _np.greater, _npi.greater_scalar,\n _npi.less_scalar, out)", "def __gt__(self, other_feature_or_val):\n from featuretools.primitives import GreaterThan\n return GreaterThan(self, other_feature_or_val)", "def greater_than_or_equal_to(self, value):\n\n return self._add_condition(f\">={value}\", lambda other: other >= value)", "def comparison(self) -> Expr:\n expr = self.term()\n\n # GREATER, GREATER_EQUAL, LESS, LESS_EQUAL\n match_list = [TokenType.GREATER, TokenType.GREATER_EQUAL,\n TokenType.LESS, TokenType.LESS_EQUAL]\n while self.match(match_list):\n operator = self.previous()\n right = self.term()\n expr = Binary(expr, operator, right)\n\n return expr", "def test_greater_than(self):\n operator = 'greaterThan'\n\n val = fake.random_int(0, 100)\n\n val_true = val - fake.random_int(1, val)\n val_false = val + fake.random_int(0, 100)\n\n condition = Condition(operator=operator, values=values(value=val_true))\n self.assertTrue(condition.evaluate(val))\n\n # Not\n condition = Condition(operator=operator, values=values(value=val_false))\n self.assertFalse(condition.evaluate(val))", "def cmpGreaterThan(self, conn1, sql1, conn2, sql2):\n for row in self.get_query_results(conn1, sql1):\n res1 = row[0]\n for row in self.get_query_results(conn2, sql2):\n res2 = row[0]\n self.log.info(\n \"cmpGreaterThan:: task: {}, value1: {}, value2: {}\".format(\n self.task_id, str(res1), str(res2)\n )\n )\n\n if res1 <= res2:\n raise AirflowException(\n \"EtlValidation cmpGreaterThanError: query {}\".format(sql1 + \"<=\" + sql2)\n )", "def __gt__(self, other):\n return self.sortval > other.sortval", "def __gt__(self, other): # __gt__ is a method for implementation of >\n return self.votes > other.votes # return True when arguement is correct", "def __gt__(self, other):\n return self.element() > other.element()", "def _greater_than_or_equal_to_op(spec):", "def convert_broadcast_greater(node, **kwargs):\n return create_basic_op_node('Greater', node, kwargs)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
implement the >> operator. Not used by SQLAlchemy core, this is provided for custom operator systems which want to use >> as an extension point.
def __rshift__(self, other: Any) -> ColumnOperators: return self.operate(rshift, other)
[ "def __rshift__(self, other):\n other.set_upstream(self)\n # return other so a >> b >> c works\n return other", "def __rshift__(self, monadic):\n if not callable(monadic):\n return NotImplemented\n # pylint: disable = star-args\n composed = lambda *args, **kwargs: self(*args, **kwargs) >> monadic\n return self.__class__(composed)", "def __mod__(self, other: Any) -> ColumnOperators:\n return self.operate(mod, other)", "def bitwise_rshift(self, other: Any) -> ColumnOperators:\n\n return self.operate(bitwise_rshift_op, other)", "def bitwise_lshift(self, other: Any) -> ColumnOperators:\n\n return self.operate(bitwise_lshift_op, other)", "def shift_right(self):\n if self.check_stack(2, \">>\"):\n value2 = self.stack.pop()\n value1 = self.stack.pop()\n if isinstance(value1, int) and isinstance(value2, int):\n self.stack.append(value1 >> value2)\n else:\n print(\"This operation requires 2 int\")\n self.stack.append(value1)\n self.stack.append(value2)", "def __lshift__(self, other):\r\n # TODO: extend to secret offset\r\n if not isinstance(other, int):\r\n return NotImplemented\r\n\r\n return runtime.mul(self, 1<<other)", "def __lshift__(self, value):\n\t\tif isinstance(value, str):\n\t\t\tself.setState(value)\n\t\telse:\n\t\t\tself.execute(value)\n\t\treturn self", "def __rshift__(self, f: Callable[[R], \"Outcome[L, Any]\"]) -> \"Outcome[L, Any]\":\n return self.then(f)", "def __lt__(self, other: Any) -> ColumnOperators:\n return self.operate(lt, other)", "def lshift(self, attr):\n return self.set_child_and_return(shifter.lshift(self.statement, attr))", "def __rshift__(self, other):\n return rule(self, other)", "def piped(self):\n\t\tpass", "def pipe_filter(self, pred: Predicate) -> Stream[RecordT]:\n self.ops.append(functools.partial(takewhile, pred))\n return self", "def shift_left(self):\n if self.check_stack(2, \"<<\"):\n value2 = self.stack.pop()\n value1 = self.stack.pop()\n if isinstance(value1, int) and isinstance(value2, int):\n self.stack.append(value1 << value2)\n else:\n print(\"This operation requires 2 int\")\n self.stack.append(value1)\n self.stack.append(value2)", "def base_operator(self):\n raise NotImplementedError()", "def rshift(self, attr):\n return self.set_child_and_return(shifter.rshift(self.statement, attr))", "def __rrshift__(self, other):\n return rule(other, self)", "def __rlshift__(self, *args):\n return _libsbml.string___rlshift__(self, *args)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Implement the 'concat' operator. In a column context, produces the clause ``a || b``, or uses the ``concat()`` operator on MySQL.
def concat(self, other: Any) -> ColumnOperators: return self.operate(concat_op, other)
[ "def _rconcat(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(concat_op, other)", "def concatpair(self, compiler, connection, **extra_context):\n # Spanner's CONCAT function returns null if any of its arguments are null.\n # Prevent that by converting null arguments to an empty string.\n clone = self.copy()\n clone.set_source_expressions(\n IfNull(e, Value(\"\")) for e in self.get_source_expressions()\n )\n return clone.as_sql(compiler, connection, **extra_context)", "def concat(cls, c1, c2, op):\r\n if c1.clause and c2.clause:\r\n return cls('({}) {} ({})'.format(c1.clause, op, c2.clause), c1.params + c2.params)\r\n elif c1.clause:\r\n return c1\r\n elif c2.clause:\r\n return c2\r\n else:\r\n return cls('', ())", "def CONCAT(string, *more_strings):\n return CONCATENATE(string, *more_strings)", "def _resolve_concat_operator(self, op: placeholder_pb2.ConcatOperator) -> str:\n return \"\".join(str(self.resolve(e)) for e in op.expressions)", "def concat(*args, sep=\"/\"):\n return sep.join(args)", "def concat(*args, **kw):\n sep = kw.pop('sep', '')\n enc = kw.pop('enc', '')\n if kw: raise TypeError(str.format(\"concat: unknown arguments {kw}\", kw=seq2str(kw.keys())))\n if len(args) == 1:\n try:\n return join(args[0], sep=sep, enc=enc)\n except TypeError:\n pass\n except:\n raise\n return join(args, sep=sep, enc=enc)", "def _handle_concat(self, column, node):\n\n # can assume that all columns present in input\n # are also present in the output.\n col_name = node.out_rel.columns[column.idx].name\n column.name = col_name\n\n return self._continue_traversal(column, node)", "def concatenate(v1, v2):\n if isinstance(v1, StringVal) and isinstance(v2, StringVal):\n return str(v1) + ' ' + str(v2)\n elif isinstance(v1, NoneVal):\n return v2\n elif isinstance(v2, NoneVal):\n return v1\n else:\n ValueFactory.log.warning(\"concatenation not implemented for %s + %s\" % (v1, v2))\n return NoneVal()", "def concat_cols(df, cols, delim):\n\n cols_str = [df[x].astype(str) for x in cols]\n\n return reduce(lambda a, b: a + delim + b, cols_str)", "def concat_with_pipe(x, cols=None):\n cols = cols or x.index\n return \"|\".join([x[i] for i in cols if x[i] not in [None, np.nan]])", "def concatenate_columns(params: List[str]) -> str:\n convert_columns_to_string = [f'string({col})' for col in params]\n\n return f\"concat({','.join(convert_columns_to_string)})\"", "def concatStr(a, b):\n if a and b:\n _tmpstr = ' '\n else:\n _tmpstr = ''\n return str(a) + _tmpstr + str(b)", "def test_evaluate_concat_expression(self):\n value = self.evaluate_common(\"concat('starts','with')\")\n self.assertTrue(\n value.type_code == edm.SimpleType.String, \"Expected String\")\n self.assertTrue(value.value == \"startswith\")\n value = self.evaluate_common(\"concat('3.1',concat('4','159'))\")\n self.assertTrue(value.value == \"3.14159\")\n try:\n value = self.evaluate_common(\"concat('3.14',1)\")\n self.fail(\"integer as parameter\")\n except odata.EvaluationError:\n pass\n try:\n value = self.evaluate_common(\"concat('3.14')\")\n self.fail(\"1 parameter\")\n except odata.EvaluationError:\n pass\n try:\n value = self.evaluate_common(\"concat('3.1','4','159')\")\n self.fail(\"3 parameters\")\n except odata.EvaluationError:\n pass", "def concat(seq1, seq2):\n if type_tag(seq1) == type_tag(seq2):\n return seq1 + seq2\n else:\n types = (type_tag(seq1), type_tag(seq2))\n if types in concat.adders:\n return concat.adders[types](seq1, seq2)", "def str_join(df:pd.DataFrame, # dataframe \n sep:str, # separation character between items\n *cols:list # list of columns to make into a list\n )->str: # string of values\n from functools import reduce\n return reduce(lambda x, y: x.astype(str).str.cat(y.astype(str), sep=sep),\n [df[col] for col in cols])", "def concat2concatws(payload, **kwargs):\n if payload:\n payload = payload.replace(\"CONCAT(\", \"CONCAT_WS(MID(CHAR(0),0,0),\")\n return payload", "def concat_pattern():\n pattern = is_tuple(None)\n pattern = is_op(\"concatenate\")(pattern)\n\n return pattern", "def concat(values, sep=', '):\n concat_str = None\n try:\n concat_str = sep.join([str(v) for v in values if not is_empty(v)])\n except Exception as e:\n pass\n return concat_str" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Implement an 'rconcat' operator. this is for internal use at the moment
def _rconcat(self, other: Any) -> ColumnOperators: return self.reverse_operate(concat_op, other)
[ "def __radd__(self, left_arr):\n concat_arr = left_arr.copy() # Create new instance to return\n concat_arr.extend(self)\n return concat_arr", "def concat(self, other: Any) -> ColumnOperators:\n return self.operate(concat_op, other)", "def concatenate_data():", "def _concat(self, other):\n w = self._dtype.bit_length()\n try:\n other._dtype.bit_length\n except AttributeError:\n raise TypeError(\"Can not concat Bits and\", other)\n\n self = self._vec()\n if areHValues(self, other):\n return self._concat__val(other)\n else:\n w = self._dtype.bit_length()\n other_w = other._dtype.bit_length()\n resWidth = w + other_w\n Bits = self._dtype.__class__\n resT = Bits(resWidth, signed=self._dtype.signed, force_vector=True)\n # is instance of signal\n if isinstance(other, InterfaceBase):\n other = other._sig\n\n if other._dtype == BOOL:\n other = other._auto_cast(BIT)\n elif isinstance(other._dtype, Bits):\n if other._dtype.signed is not None:\n other = other._vec()\n else:\n raise TypeError(other._dtype)\n\n if self._dtype.signed is not None:\n self = self._vec()\n\n return Operator.withRes(AllOps.CONCAT, [self, other], resT)\\\n ._auto_cast(Bits(resWidth,\n signed=self._dtype.signed))", "def _rewrite_concat(self, node: saldag.Concat):\n\n if node.is_lower_boundary():\n\n out_stored_with = node.out_rel.stored_with\n for par in node.parents:\n if not par.is_root():\n par.out_rel.stored_with = copy.copy(out_stored_with)\n node.is_mpc = False", "def concat(seq1, seq2):\n if type_tag(seq1) == type_tag(seq2):\n return seq1 + seq2\n else:\n types = (type_tag(seq1), type_tag(seq2))\n if types in concat.adders:\n return concat.adders[types](seq1, seq2)", "def __rshift__(self, other):\n if isinstance(other, list):\n for o in other:\n self.set_downstream(o)\n else:\n self.set_downstream(other)\n return other", "def __add__(self, right_arr):\n concat_arr = self.copy() # Create new instance to return\n concat_arr.extend(right_arr)\n return concat_arr", "def __rshift__(self, other):\n other.set_upstream(self)\n # return other so a >> b >> c works\n return other", "def concat(self, acc, arg=None, *args):\n # print 'concat call({}, {}, {})'.format(acc, arg, args)\n if arg is None:\n arg = acc\n acc = []\n\n # print 'concat fixed({}, {}, {})'.format(acc, arg, args)\n\n if not isinstance(acc, list):\n # Support upgrading a single item to a list\n acc = [acc]\n\n # print 'concat action({}, {})'.format(acc, arg)\n\n return acc + [arg]", "def __rshift__(self, other: Any) -> ColumnOperators:\n return self.operate(rshift, other)", "def concat(self):\n return True", "def concatv(*seqs):\n return concat(seqs)", "def lconcat(seqs):\n return list(toolz.concat(seqs))", "def concat(self, other: \"Linked[T]\") -> None:\n first_self = self\n last_self = self.backward\n\n first_other = other\n last_other = other.backward\n # self ++ other\n # consider last_self and first_other\n last_self._join(first_other)\n last_other._join(first_self)", "def _concat(self, partial: Optional[O], outputs: O):\n raise NotImplementedError", "def _concatenate_inner(self, chunks, direction):\n tmp_bucket = []\n source_chunks = chunks if direction else chunks[::-1]\n target_chunks = ChunkList()\n for chunk in source_chunks:\n if (\n # if the chunk has matched dependency, do concatenation.\n chunk.dependency == direction or\n # if the chunk is SPACE, concatenate to the previous chunk.\n (direction == False and chunk.is_space())\n ):\n tmp_bucket.append(chunk)\n continue\n tmp_bucket.append(chunk)\n if not direction: tmp_bucket = tmp_bucket[::-1]\n new_word = ''.join([tmp_chunk.word for tmp_chunk in tmp_bucket])\n chunk.update_word(new_word)\n target_chunks.append(chunk)\n tmp_bucket = []\n if tmp_bucket: target_chunks += tmp_bucket\n return target_chunks if direction else target_chunks[::-1]", "def concat(cls, c1, c2, op):\r\n if c1.clause and c2.clause:\r\n return cls('({}) {} ({})'.format(c1.clause, op, c2.clause), c1.params + c2.params)\r\n elif c1.clause:\r\n return c1\r\n elif c2.clause:\r\n return c2\r\n else:\r\n return cls('', ())", "def concatenate(self, other):\n return as_stream_iterator(_flatten_stream_from_reversed_list([other, self]))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r"""Implement the ``like`` operator.
def like( self, other: Any, escape: Optional[str] = None ) -> ColumnOperators: return self.operate(like_op, other, escape=escape)
[ "def _checkNotLike(match):\n if match.group(\"op\").upper() == \"LIKE\":\n raise RuntimeError(\"comparisons using LIKE are not supported\")", "def postfix(cls, __and=True, __key=None, **kwargs):\r\n return _queries(\"LIKE\", __key, __and, [(k, f\"%{_escape_like(v)}\") for k, v in kwargs.items()])", "def prefix(cls, __and=True, __key=None, **kwargs):\r\n return _queries(\"LIKE\", __key, __and, [(k, f\"{_escape_like(v)}%\") for k, v in kwargs.items()])", "def parse_like_term(term):\n case_insensitive = term.startswith('*')\n if case_insensitive:\n term = term[1:]\n # apply operators\n if term.startswith('^'):\n oper = 'startswith'\n term = term[1:]\n elif term.startswith('='):\n oper = 'exact'\n term = term[1:]\n else:\n oper = 'contains'\n # add case insensitive flag\n if case_insensitive:\n oper = 'i' + oper\n return oper, term", "def is_like(self, q):\n q = q.lower()\n return q in self.title.lower() or q in self.url.lower() or q in self.media_type.lower()", "def isStarlike(self):\n \n pass", "def like(text, pattern):\n\n # TODO 处理`,`\n re_pattern = pattern.replace(\"?\", \".?\")\n re_pattern = re_pattern.replace(\"*\", \".*?\")\n m = re.match(re_pattern, text)\n if m:\n return True\n return False", "def Like(text, pattern):\n return fnmatch.fnmatch(text, pattern)", "def like(self, uid):\n return self._get('like/{}'.format(uid))['match']", "def test_query_filter_strategy_like(self):\n filter_ = Filter('', [], ColumnType('name', Person, None), ('like', ['red']))\n\n models = self.session.query(Person).apply_filter(filter_).all()\n self.assertTrue(len(models) == 1)\n self.assertTrue(models[0].name == 'Fred')", "def convert_like(pattern, column_name):\n pattern2 = ''\n if pattern[-1] == '%':\n # Wildcard at end - Use xx>= and yy< instead of LIKE to ensure Index can be used\n txt = f'({column_name} >= ? and {column_name} < ?)'\n pattern2 = inc_key(pattern)\n elif '%' in pattern:\n # Wildcard in middle - use LIKE\n txt = f'{column_name} LIKE ?'\n else:\n txt = f'{column_name} = ?'\n\n # print(f'[{txt}] ')\n return txt, pattern, pattern2", "def post_like(self, entry, **args):\n args.update(entry=entry)\n return self.fetch(\"/like\", post_args=args)", "def not_like(\n self, other: Any, escape: Optional[str] = None\n ) -> ColumnOperators:\n return self.operate(not_like_op, other, escape=escape)", "def prep_for_like_query(self, x):\n # http://msdn2.microsoft.com/en-us/library/ms179859.aspx\n return smart_text(x).replace('%', '\\%').replace('_', '\\_')", "def by_post(cls, post):\n likes = Like.gql(\"WHERE post = :1\", post.key())\n return likes", "def equaltolike(payload, **kwargs):\n def process(match):\n word = match.group()\n word = \"%sLIKE%s\" % (\" \" if word[0] != \" \" else \"\", \" \" if word[-1] != \" \" else \"\")\n\n return word\n retVal = payload\n if payload:\n retVal = re.sub(r\"\\s*=\\s*\", lambda match: process(match), retVal)\n return retVal", "def test_wildcards_both_inside_and_outside_literal(self):\n qs = '\"Fo? t*\" said the *'\n qs_escaped, wildcard = wildcard_escape(qs)\n\n self.assertEqual(\n qs_escaped,\n r'\"Fo\\? t\\*\" said the *',\n \"Wildcards in literal should be escaped\",\n )\n self.assertTrue(wildcard, \"Wildcard should be detected\")\n self.assertIsInstance(\n Q_(\"match\", \"title\", qs),\n type(index.Q(\"wildcard\", title=r'\"Fo\\? t\\*\" said the *')),\n \"Wildcard Q object should be generated\",\n )", "def test_findlike():\n parser = CmdParser([findlike])\n out = parser.parse(\"findlike . -name foo\")\n assert out[0].arguments[0].present == True\n assert out[0].arguments[0].value == \"foo\"\n assert out[0].arguments[1].present == True\n assert out[0].arguments[1].value == \".\"\n assert out[0].as_shell_string() == \"findlike . -name foo\"", "def convert_to_like(column_value: str) -> str:\n like_query = \"%\".join(column_value)\n like_query = \"%\" + like_query + \"%\"\n return like_query" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Produce a bitwise XOR operation, typically via the ``^`` operator, or ```` for PostgreSQL.
def bitwise_xor(self, other: Any) -> ColumnOperators: return self.operate(bitwise_xor_op, other)
[ "def bitwise_xor(self, other):\n return math_funcs.bitwise_xor(self, other)", "def xor(a, b):", "def my_xor(x, y):\r\n return (x | y) & (~x | ~y)", "def logical_xor(a, b):\n return bool(a) ^ bool(b)", "def lc_xor(*args):\n return ''.join([chr(byte) for byte in [ reduce(lambda x,y: x^y, items) for items in zip(*[[ ord(ch) for ch in arg] for arg in args])]])", "def __xor__(self, other):\n return _flagOp(xor, self, other)", "def logical_xor(self, other):\n return self.operation(other, lambda x, y: int(bool(x) ^ bool(y)))", "def xor(a, b):\n\n return np.logical_xor(a, b, dtype='uint8').astype(\"uint8\")", "def __xor__(self, obj):\n return self._boolean_operation(obj, operator.__xor__)", "def xor(stroka,key):\n s = \"\"\n for symbol in stroka:\n s += chr(ord(symbol)^key)\n return s", "def XOR(str1, str2):\r\n return bytes(a ^ b for a, b in zip(str1, str2))", "def logical_xor(x1, x2, out=None):\n return _ufunc_helper(x1, x2, _npi.logical_xor, _np.logical_xor, _npi.logical_xor_scalar, None, out)", "def special_xor(expr):\n \n if type(expr) == xor_t and expr.op1 == expr.op2:\n return value_t(0)\n \n return", "def XOR(A, B):\n if A!=B:\n return 1\n else:\n return 0", "def XOR(str1, str2):\r\n return \"\".join(chr(ord(a) ^ ord(b)) for a, b in zip(str1, str2))", "def xor(x, y):\n return bool(x) != bool(y)", "def logical_xor(self, a, b):\n a = _convert_other(a, raiseit=True)\n return a.logical_xor(b, context=self)", "def convert_broadcast_logical_xor(node, **kwargs):\n return create_basic_op_node('Xor', node, kwargs)", "def xor(a, b):\n out = bytearray()\n for i,c in enumerate(a):\n out.append(c ^ b[i])\n return bytes(out)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Produce a bitwise NOT operation, typically via the ``~`` operator.
def bitwise_not(self) -> ColumnOperators: return self.operate(bitwise_not_op)
[ "def bitwise_not_(self):\n return math_funcs.bitwise_not(self, self)", "def bitwise_not(data):\n return _make.bitwise_not(data)", "def bitwise_not(x, out=None, **kwargs):\n return _unary_func_helper(x, _npi.bitwise_not, _np.bitwise_not, out=out, **kwargs)", "def logical_not(data):\n return _make.logical_not(data)", "def logical_not(x, out=None, **kwargs):\n return _unary_func_helper(x, _npi.logical_not, _np.logical_not, out=out, **kwargs)", "def convert_logical_not(node, **kwargs):\n return create_basic_op_node('Not', node, kwargs)", "def logic_not(f_args) :\n\treturn logic_xor(f_args, inversed_logic=True)", "def logical_not(x, f=None):\n return _cur_framework(x, f=f).logical_not(x)", "def logical_not(x, name=None):\n result = _op_def_lib.apply_op(\"LogicalNot\", x=x, name=name)\n return result", "def not1(a):\n \n return ~a", "def _logical_not(x):\n x_ = _static_value(x)\n if x_ is None:\n return math_ops.logical_not(x)\n return constant_op.constant(np.logical_not(x_))", "def logical_not(node: NodeInput, name: Optional[str] = None) -> Node:\n return _get_node_factory_opset1().create(\"LogicalNot\", [node])", "def not_(bits: int) -> int:\n # The `& ALL_` is necessary so python doesn't treat bits as 2's compliment\n return ~bits & ALL_", "def logical_negation(self):\n return H2OFrame._expr(expr=ExprNode(\"not\", self), cache=self._ex._cache)", "def _negate_in_binary(self, negated_op, original_op):\n return self", "def __xor__(self, other):\n return _flagOp(xor, self, other)", "def get_not(self):\n return LogicVar.OPERATORS[self.get_operator_type()]", "def nand_gate(self, input_a, input_b):\n return self.not_gate(\n self.and_gate(input_a, input_b)\n )", "def negate(x):\n return x ^ 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Produce a bitwise LSHIFT operation, typically via the ``<<`` operator.
def bitwise_lshift(self, other: Any) -> ColumnOperators: return self.operate(bitwise_lshift_op, other)
[ "def lshift(self, value):\n return self.clone().lshift_(value)", "def __lshift__(self, other):\r\n # TODO: extend to secret offset\r\n if not isinstance(other, int):\r\n return NotImplemented\r\n\r\n return runtime.mul(self, 1<<other)", "def __rlshift__(self, *args):\n return _libsbml.string___rlshift__(self, *args)", "def leftshift(x, c):\n return x << c", "def _left_rotate(n, l):\n return ((n << l) | (n >> (32 - l))) & 0xffffffff", "def left_shift(key,shift):\n if shift > len(key):\n shift = shift % len(key)\n return key[shift:] + key[:shift]", "def right_shift(key,shift):\n if shift > len(key):\n shift = shift % len(key)\n return key[-shift:] + key[:-shift]", "def shift_left(self, size, a, b, flags = None):\n\t\treturn self.expr(core.LLIL_LSL, a.index, b.index, size = size, flags = flags)", "def shift(x, offset, dim, wrap, name=None):\n return ShiftOperation(x, offset, dim, wrap, name=name).outputs[0]", "def lshift(b, n):\n return b[n:] + b[:n]", "def function_2_5_acii_shifting(l1: 'string for shifting characters') -> 'shifted string':\n return [\"\".join([chr(ord(a)+5) if ord(a) <= 117 else chr(ord(a)+5-26) for a in l1])]", "def rotl(x, count):\n ret = 0\n for i in range(64):\n bit = (x >> i) & 1\n ret |= bit << ((i + count) % 64)\n return ret", "def __left_shift__(self, word, n):\n\n return ((word << n) % (1 << self.word_size))", "def lshift(self, attr):\n return self.set_child_and_return(shifter.lshift(self.statement, attr))", "def right_shift(lhs, rhs):\n return _make.right_shift(lhs, rhs)", "def shift_left_bit_length(x: int) -> int:\n return 1 << (x - 1).bit_length()", "def logical_shift_right(self, size, a, b, flags = None):\n\t\treturn self.expr(core.LLIL_LSR, a.index, b.index, size = size, flags = flags)", "def left_shift(lhs, rhs):\n return _make.left_shift(lhs, rhs)", "def shift_bits(x, k):\n if (k >= 0):\n return x << k\n else:\n return x >> -k" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Produce a bitwise RSHIFT operation, typically via the ``>>`` operator.
def bitwise_rshift(self, other: Any) -> ColumnOperators: return self.operate(bitwise_rshift_op, other)
[ "def right_shift(lhs, rhs):\n return _make.right_shift(lhs, rhs)", "def __rshift__(self, other: Any) -> ColumnOperators:\n return self.operate(rshift, other)", "def rshift(self, value):\n return self.clone().rshift_(value)", "def right_shift(key,shift):\n if shift > len(key):\n shift = shift % len(key)\n return key[-shift:] + key[:-shift]", "def __rlshift__(self, *args):\n return _libsbml.string___rlshift__(self, *args)", "def rshift(self):\n self.lcd_byte(0x1C, LCD_CMD)", "def shr(self, bitVal, shiftNum):\n return bitVal >> shiftNum", "def __rshift__(self, other):\n other.set_upstream(self)\n # return other so a >> b >> c works\n return other", "def right_shift_by_1(input, output):\n\n q_math.right_shift(input, output, 1)", "def right_rotate_in_place(circuit, shift):\n output_size = len(circuit._outputs)\n shift %= output_size\n\n i = output_size - shift\n circuit._outputs = circuit._outputs[i:] + circuit._outputs[:i]\n return circuit", "def __rshift__(self, other):\n return rule(self, other)", "def __right_shift__(self, word, n):\n\n return ((word >> n) % (1 << self.word_size))", "def _rightshift(inputInteger):\n _checkInt(inputInteger, minvalue=0)\n\n shifted = inputInteger >> 1\n carrybit = inputInteger & 1\n return shifted, carrybit", "def right_rotate(circuit, shift):\n output_size = len(circuit._outputs)\n shift %= output_size\n\n i = output_size - shift\n\n result = Circuit(circuit.name, len(circuit._inputs), len(circuit._outputs))\n result._inputs = circuit._inputs[:]\n result._outputs = circuit._outputs[i:] + circuit._outputs[:i]\n\n return result", "def undo_right_shift_xor(result, shift_len):\n original = 0\n for i in range(32):\n next_bit = get_bit(result, i) ^ get_bit(original, i - shift_len)\n if next_bit == 1:\n original = set_bit_to_one(original, i)\n\n return original", "def arith_shift_right(self, size, a, b, flags = None):\n\t\treturn self.expr(core.LLIL_ASR, a.index, b.index, size = size, flags = flags)", "def shift(x, b):\n @_convert\n def rrt(x):\n \"\"\"RegRefTransform function\"\"\"\n return b+x\n return rrt(x)", "def shift_right(self):\n self.pointer = (self.pointer + 1) % len(self.data)", "def logical_shift_right(self, size, a, b, flags = None):\n\t\treturn self.expr(core.LLIL_LSR, a.index, b.index, size = size, flags = flags)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
implement the ``NOT IN`` operator. This is equivalent to using negation with
def not_in(self, other: Any) -> ColumnOperators: return self.operate(not_in_op, other)
[ "def notIn(self, value):\n newq = self.copy()\n newq.setOp(Query.Op.IsNotIn)\n\n if isinstance(value, orb.Collection):\n newq.setValue(value)\n elif not isinstance(value, (set, list, tuple)):\n newq.setValue((value,))\n else:\n newq.setValue(tuple(value))\n\n return newq", "def _not_in(self, value: Any) -> bool:\n return value not in self.op", "def test_not_in(self):\n operator = 'notIn'\n\n val = fake.word()\n\n condition = Condition(operator=operator, values=values(value=val))\n self.assertFalse(condition.evaluate(val))\n\n # Not\n condition = Condition(operator=operator, values=values(value=fake.word()))\n self.assertTrue(condition.evaluate(val))", "def value_not_in(self, value_not_in):\n\n self._value_not_in = value_not_in", "def make_where_not_in(cls, key, value_list):\n\n return \"%s NOT IN (%s)\" % (\n cls.to_attr_str(key), \", \".join(cls.to_value_str_list(value_list)))", "def id_not_in(self, id_not_in):\n\n self._id_not_in = id_not_in", "def l_assertNotIn(self, a, b, stop_on_fail=None):\n self.assertNotIn(a, b)\n self.logger.info(\"AssertNotIn : %s is not in %s\" % (a, b))", "def where_not_in(self, column, wheres=[]):\n if isinstance(wheres, QueryBuilder):\n self._wheres += (\n (QueryExpression(column, \"NOT IN\", SubSelectExpression(wheres))),\n )\n else:\n wheres = [str(x) for x in wheres]\n self._wheres += ((QueryExpression(column, \"NOT IN\", wheres)),)\n return self", "def isNot(self, value):\n newq = self.copy()\n newq.setOp(Query.Op.IsNot)\n newq.setValue(value)\n return newq", "def name_not_in(self, name_not_in):\n\n self._name_not_in = name_not_in", "def key_not_in(self, key_not_in):\n\n self._key_not_in = key_not_in", "def negate(self):\n return Clause([ (- x) for x in self ])", "def exclude(self, **query):\n\n if self._query != '':\n query = '(%s) AND NOT (%s)' % (self._query, self._build_query(**query))\n else:\n query = 'NOT (%s)' % self._build_query(**query)\n\n return QueryList(self.model,\n query,\n order_by=self._order_by,\n fields=self._fields,\n limit=self._limit,\n offset=self._offset,\n links_to_names=self._links_to_names)", "def filter_not(self, *arguments, **kwargs):\n from jetengine.query_builder.node import Q, QCombination, QNot\n\n if arguments and len(arguments) == 1 and isinstance(arguments[0], (Q, QCombination)):\n self.filter(QNot(arguments[0]))\n else:\n self.filter(QNot(Q(**kwargs)))\n\n return self", "def not_expr(self, size, value, flags = None):\n\t\treturn self.expr(core.LLIL_NOT, value.index, size = size, flags = flags)", "def test_searchNot(self):\n return self._messageSetSearchTest('NOT 3', [1, 2, 4, 5])", "def vds_num_not_in(self, vds_num_not_in):\n\n self._vds_num_not_in = vds_num_not_in", "def logical_not(data):\n return _make.logical_not(data)", "def bitwise_not(self) -> ColumnOperators:\n\n return self.operate(bitwise_not_op)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
implement the ``NOT LIKE`` operator. This is equivalent to using negation with
def not_like( self, other: Any, escape: Optional[str] = None ) -> ColumnOperators: return self.operate(not_like_op, other, escape=escape)
[ "def _checkNotLike(match):\n if match.group(\"op\").upper() == \"LIKE\":\n raise RuntimeError(\"comparisons using LIKE are not supported\")", "def test_searchNot(self):\n return self._messageSetSearchTest('NOT 3', [1, 2, 4, 5])", "def not_ilike(\n self, other: Any, escape: Optional[str] = None\n ) -> ColumnOperators:\n return self.operate(not_ilike_op, other, escape=escape)", "def not_equals(field: str, value: Any) -> Expression:\n return Expression(_criterion(field, \"iNotEqual\", value))", "def doesNotMatch(self, value, caseSensitive=True):\n newq = self.copy()\n newq.setOp(Query.Op.DoesNotMatch)\n newq.setValue(value)\n newq.setCaseSensitive(caseSensitive)\n return newq", "def _match_not(self):\n return not self.args['search_key'].match(self.ctx)", "def privacy_pass_phrase_not_contains(self, privacy_pass_phrase_not_contains):\n\n self._privacy_pass_phrase_not_contains = privacy_pass_phrase_not_contains", "def isNot(self, value):\n newq = self.copy()\n newq.setOp(Query.Op.IsNot)\n newq.setValue(value)\n return newq", "def privacy_pass_phrase_not_starts_with(self, privacy_pass_phrase_not_starts_with):\n\n self._privacy_pass_phrase_not_starts_with = privacy_pass_phrase_not_starts_with", "def __ne__(self, *args):\n return _libsbml.string___ne__(self, *args)", "def username_not_contains(self, username_not_contains):\n\n self._username_not_contains = username_not_contains", "def bitwise_not(self) -> ColumnOperators:\n\n return self.operate(bitwise_not_op)", "def logical_not(data):\n return _make.logical_not(data)", "def test_both_exist_pos_match_neg_no_match(self):\n eq_(\"foobar\",grepit(\"foobar\",[\"foo\"],[\"nomatch\"]))", "def filter_not(self, *arguments, **kwargs):\n from jetengine.query_builder.node import Q, QCombination, QNot\n\n if arguments and len(arguments) == 1 and isinstance(arguments[0], (Q, QCombination)):\n self.filter(QNot(arguments[0]))\n else:\n self.filter(QNot(Q(**kwargs)))\n\n return self", "def negated(input_words, include_nt=True):\n neg_words = NEGATE\n if any(word.lower() in neg_words for word in input_words):\n return True\n if include_nt:\n if any(\"n't\" in word.lower() for word in input_words):\n return True\n return False", "def test_negation():\n char1 = Character(court=['winter'])\n char2 = Character()\n char3 = Character(court=['summer'])\n res = npc.commands.find_characters([\"court~:winter\"], [char1, char2, char3])\n assert char1 not in res\n assert char2 in res\n assert char3 in res", "def name_not_contains(self, name_not_contains):\n\n self._name_not_contains = name_not_contains", "def notEqual(self, *args):\n return _yarp.ConstString_notEqual(self, *args)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
implement the ``NOT ILIKE`` operator. This is equivalent to using negation with
def not_ilike( self, other: Any, escape: Optional[str] = None ) -> ColumnOperators: return self.operate(not_ilike_op, other, escape=escape)
[ "def not_like(\n self, other: Any, escape: Optional[str] = None\n ) -> ColumnOperators:\n return self.operate(not_like_op, other, escape=escape)", "def _checkNotLike(match):\n if match.group(\"op\").upper() == \"LIKE\":\n raise RuntimeError(\"comparisons using LIKE are not supported\")", "def test_searchNot(self):\n return self._messageSetSearchTest('NOT 3', [1, 2, 4, 5])", "def not_equals(field: str, value: Any) -> Expression:\n return Expression(_criterion(field, \"iNotEqual\", value))", "def _match_not(self):\n return not self.args['search_key'].match(self.ctx)", "def doesNotMatch(self, value, caseSensitive=True):\n newq = self.copy()\n newq.setOp(Query.Op.DoesNotMatch)\n newq.setValue(value)\n newq.setCaseSensitive(caseSensitive)\n return newq", "def test_both_exist_pos_match_neg_no_match(self):\n eq_(\"foobar\",grepit(\"foobar\",[\"foo\"],[\"nomatch\"]))", "def username_not_contains(self, username_not_contains):\n\n self._username_not_contains = username_not_contains", "def privacy_pass_phrase_not_contains(self, privacy_pass_phrase_not_contains):\n\n self._privacy_pass_phrase_not_contains = privacy_pass_phrase_not_contains", "def name_not_contains(self, name_not_contains):\n\n self._name_not_contains = name_not_contains", "def negated(input_words, include_nt=True):\n neg_words = NEGATE\n if any(word.lower() in neg_words for word in input_words):\n return True\n if include_nt:\n if any(\"n't\" in word.lower() for word in input_words):\n return True\n return False", "def privacy_pass_phrase_not_starts_with(self, privacy_pass_phrase_not_starts_with):\n\n self._privacy_pass_phrase_not_starts_with = privacy_pass_phrase_not_starts_with", "def assert_not_match(pattern, string, msg=None):\n assert_none(re.search(pattern, string), msg)", "def isNot(self, value):\n newq = self.copy()\n newq.setOp(Query.Op.IsNot)\n newq.setValue(value)\n return newq", "def __ne__(self, *args):\n return _libsbml.string___ne__(self, *args)", "def test_negation():\n char1 = Character(court=['winter'])\n char2 = Character()\n char3 = Character(court=['summer'])\n res = npc.commands.find_characters([\"court~:winter\"], [char1, char2, char3])\n assert char1 not in res\n assert char2 in res\n assert char3 in res", "def assert_not_match(pattern, string, msg=None):\r\n assert_none(re.search(pattern, string), msg)", "def not_in(self, other: Any) -> ColumnOperators:\n return self.operate(not_in_op, other)", "def not_found(\n expr: str\n ) -> bool:\n return expr.startswith(\"N\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Implements a databasespecific 'match' operator.
def match(self, other: Any, **kwargs: Any) -> ColumnOperators: return self.operate(match_op, other, **kwargs)
[ "def _query_one(self, session: Session, match_value: str, match_key: str = None) -> SqlAlchemyBase:\n if match_key is None:\n match_key = self.primary_key\n\n return session.query(self.sql_model).filter_by(**{match_key: match_value}).one()", "def testSearchWithMatchesAndQuestionMarkWildcard(self):\n objectID1 = uuid4()\n objectID2 = uuid4()\n yield self.index.update(\n {objectID1: {u'test/tag': u'red stone'},\n objectID2: {u'test/tag': u'get rid of the body'},\n uuid4(): {u'test/tag': u'run, forest, run'}})\n yield self.index.commit()\n query = parseQuery(u'test/tag matches \"r?d\"')\n result = yield self.index.search(query)\n self.assertEqual(set([objectID1, objectID2]), result)", "def match(self, ctx):\n self.ctx = ctx\n\n # We look up the method on ourselves that is the search op we\n # are to perform and we call that operation.\n #\n return getattr(self, '_match_%s' % self.op)()", "def testSearchWithMatchesAndStarWildcardAtTheBegining(self):\n objectID1 = uuid4()\n objectID2 = uuid4()\n yield self.index.update(\n {objectID1: {u'test/tag': u'book:Moby Dick'},\n objectID2: {u'test/tag': u'movie:Moby Dick'},\n uuid4(): {u'test/tag': u'One Book'}})\n yield self.index.commit()\n query = parseQuery(u'test/tag matches \"*moby\"')\n result = yield self.index.search(query)\n self.assertEqual(set([objectID1, objectID2]), result)", "def match(self, table_name, field, regex=None, test=None):\n table = self.db.table(table_name)\n if test is not None:\n LOGGER.debug('%r: search(where(%r).test(%r))' % (table_name, field, test))\n return table.search(where(field).test(test))\n elif regex is not None:\n LOGGER.debug('%r: search(where(%r).matches(%r))' % (table_name, field, regex))\n return table.search(where(field).matches(regex))\n else:\n LOGGER.debug(\"%r: search(where(%r).matches('.*'))\" % (table_name, field))\n return table.search(where(field).matches('.*'))", "def test_matches(self):\n operator = 'matches'\n\n val = fake.word()\n\n length = len(val) // 2\n\n substr = val[fake.random_int(0, length):fake.random_int(1, length)]\n regex = \".*{}.*\".format(substr)\n\n condition = Condition(operator=operator, values=values(value=regex))\n self.assertTrue(condition.evaluate(val))\n\n # Not\n condition = Condition(operator=operator, values=values(value=fake.word()))\n self.assertFalse(condition.evaluate(val))", "def sample_match_query(self, env, result):\n table = env.db.fetch_table(\"users\", columns=[\"name\", \"email\"])\n result.table.log(table, description=\"Two columns.\")\n\n expected_table = [\n [\"name\", \"email\"],\n [\"John\", \"john@email\"],\n [\"Mary\", \"mary@email\"],\n ]\n\n # Match the table fetched against the expected.\n result.table.match(actual=table, expected=expected_table)", "def test_regex_case_insensitive_match(self):\n cursor = self.dbh.cursor()\n try:\n expr = self.dbh.get_regex_clause(\"'ABC'\", 'a.*', False)\n qry = self.dbh.get_expr_exec_format() % \"'TRUE'\"\n qry += ' WHERE ' + expr\n\n cursor.execute(qry)\n\n self.assertEqual(cursor.fetchone()[0], 'TRUE')\n finally:\n self.dbh.rollback()\n cursor.close()", "def matches(self, value, caseSensitive=True):\n newq = self.copy()\n newq.setOp(Query.Op.Matches)\n newq.setValue(value)\n newq.setCaseSensitive(caseSensitive)\n return newq", "def testSearchWithMatches(self):\n objectID = uuid4()\n yield self.index.update({objectID: {u'test/tag': u'value'},\n uuid4(): {u'test/tag': u'devalue'}})\n yield self.index.commit()\n query = parseQuery(u'test/tag matches \"value\"')\n result = yield self.index.search(query)\n self.assertEqual(set([objectID]), result)", "def test_regex_case_sensitive_match(self):\n cursor = self.dbh.cursor()\n try:\n expr = self.dbh.get_regex_clause(\"'abc'\", 'a.*')\n qry = self.dbh.get_expr_exec_format() % \"'TRUE'\"\n qry += ' WHERE ' + expr\n\n cursor.execute(qry)\n\n self.assertEqual(cursor.fetchone()[0], 'TRUE')\n finally:\n self.dbh.rollback()\n cursor.close()", "def generic_match(obj, value, *args, **kwargs):\n raise NotImplementedError()", "def get_match_clause(self):\n return self.match_clause", "def test_match_sub_eq(self, subdocument):\n assert subdocument.match({\"and.the\": \"drake\"})\n assert not subdocument.match({\"and.no\": \"drake\"})", "def testSearchWithMatchesIsCaseInsensitive(self):\n objectID1 = uuid4()\n objectID2 = uuid4()\n objectID3 = uuid4()\n yield self.index.update({objectID1: {u'test/tag': u'VALUE'},\n objectID2: {u'test/tag': u'value'},\n objectID3: {u'test/tag': u'VaLuE'},\n uuid4(): {u'test/tag': u'devalue'}})\n yield self.index.commit()\n query = parseQuery(u'test/tag matches \"vAlUe\"')\n result = yield self.index.search(query)\n self.assertEqual(set([objectID1, objectID2, objectID3]), result)", "def test_find_match(self):\n conn, cursor = get_db_cursor()\n build = \"toy_build\"\n transcript_dict = init_refs.make_transcript_dict(cursor, build)\n\n edges = ( 2, 3 )\n matches = talon.search_for_ISM(edges, transcript_dict)\n\n # Make sure that correct match got returned\n correct_gene_ID = fetch_correct_ID(\"TG1\", \"gene\", cursor)\n\n assert matches[0][\"gene_ID\"] == correct_gene_ID\n conn.close()", "def translate_text_match_op_to_sql(query, expr, state):\n # Unnamed invocations require the referenced variable to uniquely select a graph\n # and associated text index; if there is ambiguity, an error will be raised:\n # (1) --match '...(x)-[]->(l)...' --where '...textmatch(l, \"foo bar\")...' --return '...bm25(l)...'\n # column-based match on 'l' only, requires 'l' to be associated with a unique graph and\n # the graph be associated with a unique text index that indexes the node2 column\n # (2) --match '...(x)-[r]->(l)...' --where '...textmatch(r, \"foo bar\")...' --return '...bm25(r)...'\n # all-indexed-column-based match, requires 'r' to be associated with a unique graph and\n # the graph to be associated with a unique text index, for the associated score function\n # either r or l could be used as long as l is as discriminative as r\n # (3) --match '...(x)-[r]->(l)...' --where '...textmatch(r.node2, \"foo bar\")...' --return '...bm25(r.node2)...'\n # column-based match on node2 only, requires 'r' to be associated with a unique graph and\n # the graph be associated with a unique text index that indexes the node2 column; can be used\n # in case the associated l variable does not uniquely specify a text index\n # (4) --match '...(x)-[r]->(l)...' --where '...textmatch(r.id, \"foo bar\")...' --return '...bm25(r.id)...'\n # column-based match on id only, requires 'r' to be associated with a unique graph and\n # the graph be associated with a unique text index that indexes the id column\n #\n # Named invocations specify the name of a specific index to use in addition to a graph variable.\n # This can be used for explicit disambiguation, for example, if a graph has multiple text indexes\n # associated with it. In this case the named index needs to be usable for the specified variable.\n # Index names can be provided with a variable whose name should not match any match variables.\n # We can then use property syntax if we want to specify specific match variables. In the examples\n # below we assume we have a text index named 'myidx2'.\n # (1) --match '...(x)-[]->(l)...' --where '...textmatch(myidx2.l, \"foo bar\")...' --return '...bm25(myidx2.l)...'\n # column-based match on 'l' only, requires 'l' to be associated with the graph of myidx2\n # and myidx2 to index the node2 column\n # (2) --match '...(x)-[r]->(l)...' --where '...textmatch(myidx2.r, \"foo bar\")...' --return '...bm25(myidx2.r)...'\n # all-indexed-column-based match, requires 'r' to be associated with the graph of myidx2\n # (3) --match '...(x)-[r]->(l)...' --where '...textmatch(myidx2.r.node2, \"foo bar\")...' --return '...bm25(myidx2.r.node2)...'\n # column-based match on node2 only, requires 'r' to be associated with the graph of myidx2\n # and myidx2 to index the node2 column\n # (4) --match '...(x)-[r]->(l)...' --where '...textmatch(myidx2.r.id, \"foo bar\")...' --return '...bm25(myidx2.r.id)...'\n # column-based match on id only, requires 'r' to be associated with the graph of myidx2\n # and myidx2 to index the node2 column\n \n normfun = normalize_text_match_operator(expr.function)\n if not normfun:\n raise KGTKException(f\"Unsupported text match function: {expr.function}\")\n arguments = expr.args\n arity = len(arguments)\n if arity < 1:\n raise KGTKException(f\"Missing arguments in {expr.function} expression\")\n\n # handle first argument which can be a variable or property expression (possibly starting with an index name):\n # ('Variable', {'name': 'l'})\n # ('Expression2', {'arg1': ('Variable', {'name': 'r'}), 'arg2': [('PropertyLookup', {'property': 'node2'})]})\n # ('Expression2', {'arg1': ('Variable', {'name': 'myidx'}), 'arg2': [('PropertyLookup', {'property': 'r'})]})\n # ('Expression2', {'arg1': ('Variable', {'name': 'myidx'}), 'arg2': [('PropertyLookup', {'property': 'r'}), ('PropertyLookup', {'property': 'node2'})]})\n arg1 = arguments[0]\n index_name = None\n if isinstance(arg1, parser.Expression2) and isinstance(arg1.arg1, parser.Variable):\n # we have a property reference, check if base variable is an index name:\n sql_vars = state.lookup_variable(arg1.arg1.name, error=False)\n if sql_vars is None:\n index_name = arg1.arg1.name\n props = arg1.arg2\n arg1.arg1.name = props[0].property\n arg1.arg2 = props[1:]\n if len(props) == 1:\n arg1 = arg1.arg1\n\n # figure out the graph and column name:\n # TO DO: for now we ignore the possibility of graph or column-ambiguous variables\n # and simply go with the SQL translation, but this needs to be generalized:\n if isinstance(arg1, parser.Variable):\n # key in on Variableness to figure out whether this might refer to whole index or just a column:\n graph_alias, column_name, sql = query.variable_to_sql(arg1, state)\n elif isinstance(arg1, parser.Expression2) and isinstance(arg1.arg1, parser.Variable):\n graph_alias, column_name, sql = query.property_to_sql(arg1, state)\n else:\n raise KGTKException(f\"First argument to {expr.function} needs to be variable or property\")\n\n # find applicable indexes:\n graph = state.get_alias_table(graph_alias)\n if column_name == query.get_id_column(graph) and isinstance(arg1, parser.Variable):\n # we have an all-indexed-columns match:\n column = None\n else:\n # we have a column-specific match:\n column = column_name\n indexes = find_matching_text_indexes(query, graph, index_name, column)\n if len(indexes) == 0:\n # for now, until we support mode:autotext:\n raise KGTKException(f\"No usable text index found for {expr.function}\")\n elif len(indexes) > 1:\n raise KGTKException(f\"Multiple applicable text indexes found for {expr.function}\")\n\n # generate the SQL translation:\n index = indexes[0]\n index_table = index.get_name()\n qualified_index_table = query.store.get_qualified_table_name(index_table, db=index.db)\n index_alias = state.get_table_aliases(qualified_index_table, create_prefix='txtidx')[0]\n index_column = index_table\n if not (column_name == query.get_id_column(graph) and isinstance(arg1, parser.Variable)):\n # we have a column-specific search:\n index_column = column_name\n index_column = sql_quote_ident(index_column)\n state.add_match_clause_aux_table(qualified_index_table, index_alias)\n\n if normfun in ('match', 'like', 'glob'):\n if arity != 2:\n raise KGTKException(f\"Extraneous {expr.function} arguments\")\n operator = normfun.upper()\n arg2 = query.expression_to_sql(arguments[1], state)\n return f'{index_alias}.{index_column} {operator} {arg2} and {index_alias}.rowid = {graph_alias}.rowid'\n elif normfun in ('score'):\n # scoring function always needs the special table column, even for column-based match:\n return f'BM25({index_alias}.{index_table})'", "def match_external(database, query_patient, node=None):\n # trigger the matching and save the matching id to variable\n matching_obj = external_matcher(database, query_patient, node)\n # save matching object to database only if there are results or error messages\n if matching_obj and (matching_obj.get('has_matches') or matching_obj.get('errors')):\n database['matches'].insert_one(matching_obj)\n return matching_obj", "def test_match_gte(self, document):\n assert document.match({\"_id\": {\"$gte\": 1}})\n assert document.match({\"_id\": {\"$gte\": 0}})\n assert not document.match({\"_id\": {\"$gte\": 2}})" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Implements a databasespecific 'regexp match' operator.
def regexp_match( self, pattern: Any, flags: Optional[str] = None ) -> ColumnOperators: return self.operate(regexp_match_op, pattern, flags=flags)
[ "def _regex_operation(self, left, right):\n if self.dialect == 'mysql':\n return literal(left).op('REGEXP', is_comparison=True)(right)\n elif self.dialect == 'postgresql':\n return literal(left).op('~', is_comparison=True)(right)\n elif self.dialect == 'oracle':\n return func.REGEXP_LIKE(left, right)\n return None", "def _add_regexp_listener(dbapi_con, con_record):\n\n def regexp(expr, item):\n reg = re.compile(expr)\n return reg.search(six.text_type(item)) is not None\n dbapi_con.create_function('regexp', 2, regexp)", "def main(self, regex_string):\n sql_sen = regex_string[0][0]\n reg = \"\\$\\w+\"\n if re.search(reg, sql_sen, re.I):\n\n p = re.compile(reg)\n match = p.findall(sql_sen)\n return match\n return None", "def test_regex_case_insensitive_match(self):\n cursor = self.dbh.cursor()\n try:\n expr = self.dbh.get_regex_clause(\"'ABC'\", 'a.*', False)\n qry = self.dbh.get_expr_exec_format() % \"'TRUE'\"\n qry += ' WHERE ' + expr\n\n cursor.execute(qry)\n\n self.assertEqual(cursor.fetchone()[0], 'TRUE')\n finally:\n self.dbh.rollback()\n cursor.close()", "def test_regex_case_sensitive_match(self):\n cursor = self.dbh.cursor()\n try:\n expr = self.dbh.get_regex_clause(\"'abc'\", 'a.*')\n qry = self.dbh.get_expr_exec_format() % \"'TRUE'\"\n qry += ' WHERE ' + expr\n\n cursor.execute(qry)\n\n self.assertEqual(cursor.fetchone()[0], 'TRUE')\n finally:\n self.dbh.rollback()\n cursor.close()", "def regexp_predicate(value):\n return re.compile(value).match", "def match(self, table_name, field, regex=None, test=None):\n table = self.db.table(table_name)\n if test is not None:\n LOGGER.debug('%r: search(where(%r).test(%r))' % (table_name, field, test))\n return table.search(where(field).test(test))\n elif regex is not None:\n LOGGER.debug('%r: search(where(%r).matches(%r))' % (table_name, field, regex))\n return table.search(where(field).matches(regex))\n else:\n LOGGER.debug(\"%r: search(where(%r).matches('.*'))\" % (table_name, field))\n return table.search(where(field).matches('.*'))", "def convertSQL_LIKE2REGEXP(sql_like_pattern):\n # Replace '_' by equivalent regexp, except when precede by '\\'\n # (escape character)\n regexp = re.sub(r'(?<!\\\\)_', '.', sql_like_pattern)\n # Replace '%' by equivalent regexp, except when precede by '\\'\n # (escape character)\n regexp = re.sub(r'(?<!\\\\)%', '.*', regexp)\n # Set regexp to ignore cases; SQL patterns are case-insensitive by default.\n regexp = \"(?i)^(\" + regexp + \")$\"\n return regexp", "def _supports_regex_operator(self):\n return self.dialect in ['mysql', 'postgresql', 'oracle']", "def match(rule,thestr):\r\n return compile(rule).match(thestr)", "def test_matches(self):\n operator = 'matches'\n\n val = fake.word()\n\n length = len(val) // 2\n\n substr = val[fake.random_int(0, length):fake.random_int(1, length)]\n regex = \".*{}.*\".format(substr)\n\n condition = Condition(operator=operator, values=values(value=regex))\n self.assertTrue(condition.evaluate(val))\n\n # Not\n condition = Condition(operator=operator, values=values(value=fake.word()))\n self.assertFalse(condition.evaluate(val))", "def match(self, regexp, flags=None):\r\n mp = self.match_position\r\n try:\r\n reg = _regexp_cache[(regexp, flags)]\r\n except KeyError:\r\n if flags:\r\n reg = re.compile(regexp, flags)\r\n else:\r\n reg = re.compile(regexp)\r\n _regexp_cache[(regexp, flags)] = reg\r\n\r\n match = reg.match(self.text, self.match_position)\r\n if match:\r\n (start, end) = match.span()\r\n if end == start:\r\n self.match_position = end + 1\r\n else:\r\n self.match_position = end\r\n self.matched_lineno = self.lineno\r\n lines = re.findall(r\"\\n\", self.text[mp:self.match_position])\r\n cp = mp - 1\r\n while (cp >= 0 and cp<self.textlength and self.text[cp] != '\\n'):\r\n cp -=1\r\n self.matched_charpos = mp - cp\r\n self.lineno += len(lines)\r\n #print \"MATCHED:\", match.group(0), \"LINE START:\", self.matched_lineno, \"LINE END:\", self.lineno\r\n #print \"MATCH:\", regexp, \"\\n\", self.text[mp : mp + 15], (match and \"TRUE\" or \"FALSE\")\r\n return match", "def _emulate_python_fullmatch(regex, string, flags=0):\n return re.match(\"(?:\" + regex + r\")\\Z\", string, flags=flags)", "def test_match(self):\n\n # Test of the rematch case.\n regex = r\"([a-z]{1,})\\s([a-z]{1,})\\s\"\n expected = \"is\"\n actual = Regex(self.data, regex, rematch=True, group=1).match()\n\n self.assertEqual(expected, actual)\n\n # Test of the group case\n regex = \"e\"\n expected = \"e\"\n actual = Regex(self.data, regex, group=0).match()\n\n self.assertEqual(expected, actual)", "def testRegex(regex, example):", "def test_regex_case_sensitive_nomatch(self):\n cursor = self.dbh.cursor()\n try:\n expr = self.dbh.get_regex_clause(\"'ABC'\", 'a.*')\n qry = self.dbh.get_expr_exec_format() % \"'TRUE'\"\n qry += ' WHERE ' + expr\n\n cursor.execute(qry)\n\n self.assertIsNone(cursor.fetchone())\n finally:\n self.dbh.rollback()\n cursor.close()", "def get_search_pattern(self):", "def test_multi_match_return_expr(self):\n eq_(self.line,line_matches_greps(self.line,[\"foo\",\"bar\"]))", "def translate_text_match_op_to_sql(query, expr, state):\n # Unnamed invocations require the referenced variable to uniquely select a graph\n # and associated text index; if there is ambiguity, an error will be raised:\n # (1) --match '...(x)-[]->(l)...' --where '...textmatch(l, \"foo bar\")...' --return '...bm25(l)...'\n # column-based match on 'l' only, requires 'l' to be associated with a unique graph and\n # the graph be associated with a unique text index that indexes the node2 column\n # (2) --match '...(x)-[r]->(l)...' --where '...textmatch(r, \"foo bar\")...' --return '...bm25(r)...'\n # all-indexed-column-based match, requires 'r' to be associated with a unique graph and\n # the graph to be associated with a unique text index, for the associated score function\n # either r or l could be used as long as l is as discriminative as r\n # (3) --match '...(x)-[r]->(l)...' --where '...textmatch(r.node2, \"foo bar\")...' --return '...bm25(r.node2)...'\n # column-based match on node2 only, requires 'r' to be associated with a unique graph and\n # the graph be associated with a unique text index that indexes the node2 column; can be used\n # in case the associated l variable does not uniquely specify a text index\n # (4) --match '...(x)-[r]->(l)...' --where '...textmatch(r.id, \"foo bar\")...' --return '...bm25(r.id)...'\n # column-based match on id only, requires 'r' to be associated with a unique graph and\n # the graph be associated with a unique text index that indexes the id column\n #\n # Named invocations specify the name of a specific index to use in addition to a graph variable.\n # This can be used for explicit disambiguation, for example, if a graph has multiple text indexes\n # associated with it. In this case the named index needs to be usable for the specified variable.\n # Index names can be provided with a variable whose name should not match any match variables.\n # We can then use property syntax if we want to specify specific match variables. In the examples\n # below we assume we have a text index named 'myidx2'.\n # (1) --match '...(x)-[]->(l)...' --where '...textmatch(myidx2.l, \"foo bar\")...' --return '...bm25(myidx2.l)...'\n # column-based match on 'l' only, requires 'l' to be associated with the graph of myidx2\n # and myidx2 to index the node2 column\n # (2) --match '...(x)-[r]->(l)...' --where '...textmatch(myidx2.r, \"foo bar\")...' --return '...bm25(myidx2.r)...'\n # all-indexed-column-based match, requires 'r' to be associated with the graph of myidx2\n # (3) --match '...(x)-[r]->(l)...' --where '...textmatch(myidx2.r.node2, \"foo bar\")...' --return '...bm25(myidx2.r.node2)...'\n # column-based match on node2 only, requires 'r' to be associated with the graph of myidx2\n # and myidx2 to index the node2 column\n # (4) --match '...(x)-[r]->(l)...' --where '...textmatch(myidx2.r.id, \"foo bar\")...' --return '...bm25(myidx2.r.id)...'\n # column-based match on id only, requires 'r' to be associated with the graph of myidx2\n # and myidx2 to index the node2 column\n \n normfun = normalize_text_match_operator(expr.function)\n if not normfun:\n raise KGTKException(f\"Unsupported text match function: {expr.function}\")\n arguments = expr.args\n arity = len(arguments)\n if arity < 1:\n raise KGTKException(f\"Missing arguments in {expr.function} expression\")\n\n # handle first argument which can be a variable or property expression (possibly starting with an index name):\n # ('Variable', {'name': 'l'})\n # ('Expression2', {'arg1': ('Variable', {'name': 'r'}), 'arg2': [('PropertyLookup', {'property': 'node2'})]})\n # ('Expression2', {'arg1': ('Variable', {'name': 'myidx'}), 'arg2': [('PropertyLookup', {'property': 'r'})]})\n # ('Expression2', {'arg1': ('Variable', {'name': 'myidx'}), 'arg2': [('PropertyLookup', {'property': 'r'}), ('PropertyLookup', {'property': 'node2'})]})\n arg1 = arguments[0]\n index_name = None\n if isinstance(arg1, parser.Expression2) and isinstance(arg1.arg1, parser.Variable):\n # we have a property reference, check if base variable is an index name:\n sql_vars = state.lookup_variable(arg1.arg1.name, error=False)\n if sql_vars is None:\n index_name = arg1.arg1.name\n props = arg1.arg2\n arg1.arg1.name = props[0].property\n arg1.arg2 = props[1:]\n if len(props) == 1:\n arg1 = arg1.arg1\n\n # figure out the graph and column name:\n # TO DO: for now we ignore the possibility of graph or column-ambiguous variables\n # and simply go with the SQL translation, but this needs to be generalized:\n if isinstance(arg1, parser.Variable):\n # key in on Variableness to figure out whether this might refer to whole index or just a column:\n graph_alias, column_name, sql = query.variable_to_sql(arg1, state)\n elif isinstance(arg1, parser.Expression2) and isinstance(arg1.arg1, parser.Variable):\n graph_alias, column_name, sql = query.property_to_sql(arg1, state)\n else:\n raise KGTKException(f\"First argument to {expr.function} needs to be variable or property\")\n\n # find applicable indexes:\n graph = state.get_alias_table(graph_alias)\n if column_name == query.get_id_column(graph) and isinstance(arg1, parser.Variable):\n # we have an all-indexed-columns match:\n column = None\n else:\n # we have a column-specific match:\n column = column_name\n indexes = find_matching_text_indexes(query, graph, index_name, column)\n if len(indexes) == 0:\n # for now, until we support mode:autotext:\n raise KGTKException(f\"No usable text index found for {expr.function}\")\n elif len(indexes) > 1:\n raise KGTKException(f\"Multiple applicable text indexes found for {expr.function}\")\n\n # generate the SQL translation:\n index = indexes[0]\n index_table = index.get_name()\n qualified_index_table = query.store.get_qualified_table_name(index_table, db=index.db)\n index_alias = state.get_table_aliases(qualified_index_table, create_prefix='txtidx')[0]\n index_column = index_table\n if not (column_name == query.get_id_column(graph) and isinstance(arg1, parser.Variable)):\n # we have a column-specific search:\n index_column = column_name\n index_column = sql_quote_ident(index_column)\n state.add_match_clause_aux_table(qualified_index_table, index_alias)\n\n if normfun in ('match', 'like', 'glob'):\n if arity != 2:\n raise KGTKException(f\"Extraneous {expr.function} arguments\")\n operator = normfun.upper()\n arg2 = query.expression_to_sql(arguments[1], state)\n return f'{index_alias}.{index_column} {operator} {arg2} and {index_alias}.rowid = {graph_alias}.rowid'\n elif normfun in ('score'):\n # scoring function always needs the special table column, even for column-based match:\n return f'BM25({index_alias}.{index_table})'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Implements a databasespecific 'regexp replace' operator.
def regexp_replace( self, pattern: Any, replacement: Any, flags: Optional[str] = None ) -> ColumnOperators: return self.operate( regexp_replace_op, pattern, replacement=replacement, flags=flags, )
[ "def replace_regex(self, args):\n bound_char = args[0]\n pattern = args.split(bound_char)\n if len(pattern) != 4:\n print \"ERROR, format is: !reg /REGEX/REPLACE/ and / can be any char.\"\n else:\n reg, rep = pattern[1], pattern[2]\n regex = re.compile(reg)\n self.save_replace(args)\n for i, line in enumerate(self.data):\n if self.test_regex(regex, line):\n print re.sub(regex, rep, line),", "def rx_replace(rx, sql, new_part):\r\n m = re.search(rx, sql, re.I)\r\n if not m:\r\n raise Exception('rx_replace failed: rx=%r sql=%r new=%r' % (rx, sql, new_part))\r\n p1 = sql[:m.start()]\r\n p2 = sql[m.end():]\r\n return p1 + new_part + p2", "def gsub(self, pattern, replacement, ignore_case=False):\n return H2OFrame(expr=ExprNode(\"gsub\", pattern, replacement, self, ignore_case))", "def replace_params(self):\n raw_sql = self.raw_sql\n for placeholder in self.to_replace:\n newreg = re.compile(placeholder)\n repl = self.get_replacement_value(placeholder)\n if repl:\n raw_sql = newreg.sub(str(repl), raw_sql)\n self.sql = raw_sql", "def find_values_to_replace(self):\n regexp = re.compile(self.raw_pattern)\n self.to_replace = regexp.findall(self.raw_sql)", "def _regex_operation(self, left, right):\n if self.dialect == 'mysql':\n return literal(left).op('REGEXP', is_comparison=True)(right)\n elif self.dialect == 'postgresql':\n return literal(left).op('~', is_comparison=True)(right)\n elif self.dialect == 'oracle':\n return func.REGEXP_LIKE(left, right)\n return None", "def convertSQL_LIKE2REGEXP(sql_like_pattern):\n # Replace '_' by equivalent regexp, except when precede by '\\'\n # (escape character)\n regexp = re.sub(r'(?<!\\\\)_', '.', sql_like_pattern)\n # Replace '%' by equivalent regexp, except when precede by '\\'\n # (escape character)\n regexp = re.sub(r'(?<!\\\\)%', '.*', regexp)\n # Set regexp to ignore cases; SQL patterns are case-insensitive by default.\n regexp = \"(?i)^(\" + regexp + \")$\"\n return regexp", "def applyRegexp(dataset, tag):\n element = dataset.get(tag)\n if element is not None:\n element.value = re.sub(options['find'], options['replace'], element.value)", "def _add_regexp_listener(dbapi_con, con_record):\n\n def regexp(expr, item):\n reg = re.compile(expr)\n return reg.search(six.text_type(item)) is not None\n dbapi_con.create_function('regexp', 2, regexp)", "def __init__(self, char_regex, replacement):\n self.regex = re.compile(char_regex)\n self.repl = replacement", "def replace_all(\n self, pattern: str | Expr, value: str | Expr, *, literal: bool = False\n ) -> Expr:\n pattern = parse_as_expression(pattern, str_as_lit=True)\n value = parse_as_expression(value, str_as_lit=True)\n return wrap_expr(self._pyexpr.str_replace_all(pattern, value, literal))", "def test_evaluate_replace_expression(self):\n value = self.evaluate_common(\"replace('startswith','tart','cake')\")\n self.assertTrue(\n value.type_code == edm.SimpleType.String, \"Expected String\")\n self.assertTrue(value.value == \"scakeswith\")\n value = self.evaluate_common(\"replace('startswith','t','x')\")\n self.assertTrue(value.value == \"sxarxswixh\")\n # not case insensitive\n value = self.evaluate_common(\"replace('sTartswith','t','x')\")\n self.assertTrue(value.value == \"sTarxswixh\")\n value = self.evaluate_common(\"replace('startswith','t','tx')\")\n self.assertTrue(value.value == \"stxartxswitxh\")\n try:\n value = self.evaluate_common(\"replace('3.14','1',2)\")\n self.fail(\"integer as parameter\")\n except odata.EvaluationError:\n pass\n try:\n value = self.evaluate_common(\"replace('3.14','1')\")\n self.fail(\"2 parameter\")\n except odata.EvaluationError:\n pass", "def REGEXREPLACE(text, regular_expression, replacement):\n return re.sub(regular_expression, replacement, text)", "def replace(self, variable, expression):\n raise NotImplementedError", "def redacorator(func):\n def _replace(match):\n ori = match.group()\n text = match.group().strip().lower()\n return func(text, ori)\n return _replace", "def replace(self, pat, repl, n=-1, case=None, flags=0, regex=True):\n if case is not None:\n raise NotImplementedError(\"`case` parameter is not yet supported\")\n elif flags != 0:\n raise NotImplementedError(\"`flags` parameter is not yet supported\")\n\n # Pandas treats 0 as all\n if n == 0:\n n = -1\n\n from cudf.core import Series\n\n return Series(\n self._parent.data.replace(pat, repl, n=n, regex=regex),\n index=self._index,\n name=self._parent.name,\n )", "def test_substitutions_with_regex_chars(self):\n m = strutils.MultiReplace({'cat.+': 'kedi', r'purple': 'mor', })\n self.assertEqual(m.sub('The cat.+ is purple'), 'The kedi is mor')", "def lreplace(pattern, sub, string):\n return re.sub('^%s' % pattern, sub, string)", "def preprocess_regex_check_and_replace(pattern: str, replacement: str) -> Callable[[str], str]:\n\n def preprocess(orig: str) -> str:\n new = re.sub(pattern, replacement, orig, flags=re.DOTALL)\n assert orig != new\n return new\n\n return preprocess" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Implement the ``+`` operator. In a column context, produces the clause ``a + b`` if the parent object has nonstring affinity. If the parent object has a string affinity, produces the concatenation operator, ``a || b``
def __add__(self, other: Any) -> ColumnOperators: return self.operate(add, other)
[ "def concat(self, other: Any) -> ColumnOperators:\n return self.operate(concat_op, other)", "def __add__(self, o):\n if isinstance(o, str):\n o = LiteralString(o)\n if not isinstance(o, (LiteralString, CMacro, CStringExpression)):\n raise TypeError(f\"unsupported operand type(s) for +: '{self.__class__}' and '{type(o)}'\")\n return CStringExpression(*self._expression, o)", "def _rconcat(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(concat_op, other)", "def concat(cls, c1, c2, op):\r\n if c1.clause and c2.clause:\r\n return cls('({}) {} ({})'.format(c1.clause, op, c2.clause), c1.params + c2.params)\r\n elif c1.clause:\r\n return c1\r\n elif c2.clause:\r\n return c2\r\n else:\r\n return cls('', ())", "def plus(self, left):\n self.match('PLUS')\n right = self.expression()\n # return {'type': 'PLUS', 'left': left, 'right': right}\n return production.AddExpr(left, right)", "def __add__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n error_msg = (\n f'Invalid expression during addition to {self}: [{other}]'\n )\n raise excep.biogemeError(error_msg)\n return Plus(self, other)", "def plus_part(self):\n return self.act_right(Matrix(2,2,[1,0,0,-1])) + self", "def __radd__(self, other: Any) -> Var:\n return Var(f\"{other}+{self}\")", "def __radd__(\n self, other: Union[\"LinearOperator\", Float[Array, \"N N\"]]\n ) -> \"LinearOperator\":\n return self + other", "def plus(self, a):\n a = _convert_other(a, raiseit=True)\n return a.__pos__(context=self)", "def _AddBinaryOperator(self, string=None, **unused_kwargs):\n expression = expressions.BinaryExpression(operator=string)\n self._stack.append(expression)\n\n return None", "def operator_addition(A, B):", "def __radd__(self, other): \n return self + other", "def __iadd__(self, other):\n if isinstance(other, String):\n self._pfp__value += other._pfp__value\n else:\n self._pfp__value += PYSTR(other)\n return self", "def visit_string_expr_with_string_part(self, ctx):\n expr = self.visit_string_expr_part(ctx.string_expr_part())\n part = self.visit_string_part(ctx.string_part())\n\n if not part:\n return expr\n\n return f'{expr} + {part}'", "def __radd__(self, other):\n if isinstance(other, Op):\n return Op(lambda x: self._f(x) + other._f(x))\n elif isinstance(other, FunctionType) or isinstance(other, LambdaType):\n return Op(lambda x: self._f(x) + other(x))\n else:\n try:\n return Op(lambda x: self._f(x) + other)\n except:\n return NotImplemented", "def append(self, o):\n if isinstance(o, str):\n o = LiteralString(o)\n if not isinstance(o, (LiteralString, CMacro, CStringExpression)):\n raise TypeError(f\"unsupported operand type(s) for append: '{self.__class__}' and '{type(o)}'\")\n self._expression += (o,)\n o.set_current_user_node(self)", "def __add__(self, other: Any) -> Union[Var, AdditionPart]: # type: ignore\n if is_quantified_unit(other):\n if other.unit == self.unit:\n return QuantifiedUnit(self.value + other.value, self.unit)\n return Addition([self, other])\n if is_addition_part(other):\n return other.__radd__(self)\n return Var(f\"{self}+{other}\")", "def __iadd__(self, other): \n return self + other" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Implement the ``%`` operator. In a column context, produces the clause ``a % b``.
def __mod__(self, other: Any) -> ColumnOperators: return self.operate(mod, other)
[ "def mod(a,b) :\n return a%b", "def mod(x, y):\n\n return x % y", "def __floordiv__(self, other: Any) -> ColumnOperators:\n return self.operate(floordiv, other)", "def percent(a, b):\n if a is None:\n return '0.00%'\n else:\n return '%0.2f%%' % (100.0 * a / b)", "def mydivmod(a, b):\n pass", "def _sample_using_mod(\n self,\n column_name,\n mod: int,\n value: int,\n ):\n return sa.column(column_name) % mod == value", "def modulation(x, y):\n return x % y", "def modulo(val1, val2):\n if coerce_to_int(val2) == 0:\n return None\n return coerce_to_int(val1) % coerce_to_int(val2)", "def mod(num1, num2):\n remainder = num1 % num2\n return remainder", "def the_remainder_of_the_division(numb1, numb2):\r\n return f\"Your result: {numb1%numb2}\"", "def div_mod_p(self, a, b):\n a = a % self.p\n b = b % self.p\n return a * self.pow_mod_p(b, self.p - 2, self.p) % self.p", "def like(\n self, other: Any, escape: Optional[str] = None\n ) -> ColumnOperators:\n return self.operate(like_op, other, escape=escape)", "def percentage(a, b):\n return (a * 100.0) / b", "def percent_of(part, whole):\n return part * 100 / whole", "def resol_modulo(a,b, mod):\r\n\tfor i in range(mod): # Pour tous les nombres du modulo\r\n\t\tif (a*i) % mod == b: # Si a*i modulo mod = b\r\n\t\t\treturn i # Alors on a trouvé ! On renvoit i\r\n\treturn None", "def div(self, a, b):\n return divmod(a, b)", "def percent(value):\n return f\"{value:,.2f} %\"", "def mod(value, modulus):\n quotient = value // modulus\n return value - quotient * modulus", "def escPercent(text):\n pat = re.compile(r'%(?!\\()')\n return pat.sub('%%', text)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Implement the ``//`` operator. In a column context, produces the clause ``a / b``, which is the same as "truediv", but considers the result type to be integer.
def __floordiv__(self, other: Any) -> ColumnOperators: return self.operate(floordiv, other)
[ "def exquo(self, a, b):\n return a / b", "def divu(arg1, arg2):\n R_LO = ExprOp('udiv', arg1, arg2)\n R_HI = ExprOp('umod', arg1, arg2)", "def convert_div_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Div', **kwargs)", "def division(operand1, operand2):\n return operand1/operand2", "def convert_rdiv_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Div', **kwargs)", "def test_scalar_division(self):\n\n a1 = tuples.Tuple([\"a\", \"b\", \"c\", \"d\"], 1, -2, 3, -4)\n\n a2 = a1 / 2\n\n self.assertEqual(a2,\n tuples.Tuple([\"a\", \"b\", \"c\", \"d\"], 0.5, -1, 1.5, -2))", "def __rtruediv__(self, other):\n if type(other) == int:\n other = float(other)\n\n if type(other) == float:\n other = Tensor(other)\n\n return F.Div.apply(other, self)", "def test_div_operation(self):\n div_op = lib.DIV_OPERATOR\n context = env.empty_context()\n\n # Case 1: Two int values -> int\n args = [INT_VALUE, INT_VALUE]\n self.assertEqual(div_op.eval(args, context), INT_VALUE)\n # Case 2: Two float values -> float\n args = [FLOAT_VALUE, FLOAT_VALUE]\n self.assertEqual(div_op.eval(args, context), FLOAT_VALUE)\n # Case 3: One int, one float -> int\n args = [INT_VALUE, FLOAT_VALUE]\n self.assertEqual(div_op.eval(args, context), INT_VALUE)\n # Case 4: Division by 0 int\n args = [INT_VALUE, INT0_VALUE]\n self.assertRaises(env.RuntimeException, div_op.eval, args, context)\n # Case 5: Division by 0 float\n args = [FLOAT_VALUE, FLOAT0_VALUE]\n self.assertRaises(env.RuntimeException, div_op.eval, args, context)", "def _vector_divlike_op(self, other, op) -> np.ndarray | Self:\n # Let numpy handle it\n result = op(self._ndarray, np.asarray(other))\n\n if (is_integer_dtype(other.dtype) or is_float_dtype(other.dtype)) and op in [\n operator.truediv,\n operator.floordiv,\n ]:\n return type(self)._simple_new(result, dtype=result.dtype)\n\n if op in [operator.floordiv, roperator.rfloordiv]:\n mask = self.isna() | isna(other)\n if mask.any():\n result = result.astype(np.float64)\n np.putmask(result, mask, np.nan)\n\n return result", "def __truediv__(self, other):\n if type(other) == int:\n other = float(other)\n\n if type(other) == float:\n other = Tensor(other)\n\n return F.Div.apply(self, other)", "def division(self, first_value, second_value):", "def old_div(a, b):\n if isinstance(a, numbers.Integral) and isinstance(b, numbers.Integral):\n return a // b\n else:\n return a / b", "def test_truediv(self):\n a = int(3)\n self.assertEqual(a / 2, 1) # since \"from __future__ import division\"\n # is in effect\n self.assertEqual(type(a / 2), int)\n\n b = int(2)\n self.assertEqual(a / b, 1) # since \"from __future__ import division\"\n # is in effect\n self.assertEqual(type(a / b), int)\n\n c = int(3) / b\n self.assertEqual(c, 1)\n self.assertTrue(isinstance(c, int))\n\n d = int(5)\n d /= 5\n self.assertEqual(d, 1)\n self.assertTrue(isinstance(d, int))\n\n e = int(10)\n f = int(20)\n e /= f\n self.assertEqual(e, 0)\n self.assertTrue(isinstance(e, int))", "def divf(a, b):\n return int(a // b)", "def rf_local_divide(left_tile_col: Column_type, rhs: Union[float, int, Column_type]) -> Column:\n if isinstance(rhs, (float, int)):\n rhs = lit(rhs)\n return _apply_column_function(\"rf_local_divide\", left_tile_col, rhs)", "def __div__(self, value):\n out = self.copy()\n out.addMath(Query.Math.Divide, value)\n return out", "def divide(self, a, b):\n return a / b", "def __truediv__(self,value):\n \n if not(type(value) in [vector,coordinate]):\n try:\n x = self.x / value\n y = self.y / value\n z = self.z / value\n return self.create(x,y,z)\n except:\n raise TypeError(\"pas possible de diviser par 0\")\n \n \n else:\n raise TypeError(\"pas possible de diviser 2 vecteurs/coordonnées\")", "def divide(x):\n return x // 2" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verifies the bot by solving the website's captcha
def solve_captcha(self): # Switch to the Captcha's iframe captcha = CapatchaSolver(self.driver) while True: self.driver.switch_to.frame(self.driver.find_element_by_tag_name("iframe")) captcha.solve_captcha() # Check if we passed the captcha part by checking the page title wait = WebDriverWait(self.driver, 10) try: wait.until_not(EC.title_is(consts.BLOCKED)) break except TimeoutException: self.driver.refresh()
[ "def funcbot():\n msg.askyesno(\"Captcha\", \"Are you human\")", "def test_bad_captcha(self):\r\n self.F.UserFactory.create(username=\"test\", password=\"sekrit\")\r\n\r\n session_data = {}\r\n\r\n with patch_session(session_data):\r\n res = self.get()\r\n for i in range(6):\r\n res = res.forms[\"loginform\"].submit()\r\n\r\n form = res.forms[\"loginform\"]\r\n answer = session_data[\"auth_captcha_answer\"]\r\n form[\"captcha\"] = answer + 1 # oops, wrong answer!\r\n form[\"username\"] = \"test\"\r\n form[\"password\"] = \"sekrit\"\r\n res = form.submit(status=200)\r\n\r\n res.mustcontain(\"not the answer we were looking for\")", "def captcha_validation(token: str):\n url = \"https://www.google.com/recaptcha/api/siteverify\"\n secret = json.loads(get_secret(\"CAPTCHA_SECRET\"))['CAPTCHA_SECRET']\n payload = {\n \"secret\": secret,\n \"response\": token\n }\n response_raw = requests.post(url, data=payload)\n response_text = response_raw.text\n logger.debug(response_text)\n response = json.loads(response_text)\n return response['success']", "def test_good_captcha(self):\r\n self.F.UserFactory.create(username=\"test\", password=\"sekrit\")\r\n\r\n session_data = {}\r\n\r\n with patch_session(session_data):\r\n res = self.get()\r\n for i in range(6):\r\n res = res.forms[\"loginform\"].submit()\r\n\r\n form = res.forms[\"loginform\"]\r\n answer = session_data[\"auth_captcha_answer\"]\r\n form[\"captcha\"] = answer\r\n form[\"username\"] = \"test\"\r\n form[\"password\"] = \"sekrit\"\r\n res = form.submit(status=302)\r\n\r\n self.assertRedirects(res, reverse(\"home\"))", "def captcha(self):\n notification.send_sms(message=message)\n notification.send_emails(emails=email, message=message)\n sleep(25)\n\n ### this code snippet is for reference only, not to be used ###\n # sleep(3)\n # captcha = self.driver.find_element_by_xpath('/html/body/div/iframe[0]')\n # self.driver.switch_to.frame(captcha)\n # captcha_loc = captcha.location\n # print(captcha_loc)\n # captcha_x = captcha_loc[\"x\"]\n # captcha_y = captcha_loc[\"y\"]\n # self.actions.tap_and_hold(captcha_x, captcha_y)\n # sleep(5)\n # self.actions.release(captcha_x, captcha_y)\n # self.search_input()", "def bypass_captcha(self, rps):\n viewstate_pattern = r\"id=\\\"__VIEWSTATE\\\".*\\\"(.*)\\\"\"\n viewstategenerator_pattern = r\"id=\\\"__VIEWSTATEGENERATOR\\\".*\\\"(.*)\\\"\"\n CAPTCHA_PATTERN = r\"id=\\\"ctl00_ContentPlaceHolder1_ctl00_lblCapcha\\\".*?>(.*?)<\\/span>\"\n viewstate = re.search(viewstate_pattern, rps)\n if viewstate:\n viewstate = viewstate.group(1)\n else:\n print(\"VIEWSTATE value not found!\")\n viewstategenerator = re.search(viewstategenerator_pattern, rps)\n if viewstategenerator:\n viewstategenerator = viewstategenerator.group(1)\n captcha = re.search(CAPTCHA_PATTERN, rps)\n if captcha:\n captcha_text = captcha.group(1)\n print(\"[*] CAPTCHA -> [{}]\".format(captcha_text))\n payload = {\n 'ctl00$ContentPlaceHolder1$ctl00$txtCaptcha':captcha_text,\n '__VIEWSTATE':viewstate,\n '__VIEWSTATEGENERATOR':viewstategenerator,\n '__EVENTARGUMENT':'',\n '__EVENTTARGET':'',\n 'ctl00$ContentPlaceHolder1$ctl00$btnXacNhan': 'Vào website'\n }\n rps = self.session.post(url = home_url, headers = BROWSER_HEADERS, data=payload)\n if CAPTCHA_ELEMENT_ID not in rps.text:\n print(\"[*] CAPTCHA BYPASSED\")\n return True\n else:\n print(\"CAPTCHA NOT BYPASSED! PLEASE REPORT TO DEVELOPER BACHVKHOA!\")\n else:\n print(\"[*] CAPTCHA NOT FOUND\")\n return False", "def askForCaptcha(self, url):\n try:\n import webbrowser\n wikipedia.output(u'Opening CAPTCHA in your web browser...')\n if webbrowser.open(url):\n return wikipedia.input(\n u'What is the solution of the CAPTCHA that is shown in '\n u'your web browser?')\n else:\n raise\n except:\n wikipedia.output(u'Error in opening web browser: %s'\n % sys.exc_info()[0])\n wikipedia.output(\n u'Please copy this url to your web browser and open it:\\n %s'\n % url)\n return wikipedia.input(\n u'What is the solution of the CAPTCHA at this url ?')", "def test_display_captcha(self):\r\n res = self.get()\r\n for i in range(6):\r\n res = res.forms[\"loginform\"].submit()\r\n\r\n form = res.forms[\"loginform\"]\r\n\r\n self.assertIn(\"captcha\", form.fields)", "def handle_captcha(self):\n self.webdriver.save_screenshot('./out/captcha.png')\n sleep(20)\n\n with open('./out/captcha', 'r') as f:\n try:\n self.webdriver.find_element_by_xpath(\"//input[@aria-label='Type the text you hear or see']\").send_keys(f.read())\n except:\n log.error('Captcha input failed. Possibly incorrect captcha?')\n raise\n\n self.webdriver.find_element_by_xpath('//*[@id=\"identifierNext\"]').click()\n sleep(4)\n\n self.webdriver.find_element_by_css_selector(\"input[type=password]\").send_keys(self.bot.getPassword())", "def process_captcha(self, req, resp):\n self.logger.debug(\n \"RPM over Anti-Automation threshold %s\",\n self.cfg.MAX_RPM\n )\n # test the aa cookie if they provided it\n if 'aa' in req.cookies and self.web_util.test_captcha(req.cookies['aa']):\n self.logger.debug('Captcha completed successfully')\n #reset their counter\n now = int(time.time())\n self.session_tracker[req.remote_addr] = {\n 'c':1,\n 't':now\n }\n resp.unset_cookie('aa')\n #if they provided and failed set new one and throw error\n elif 'aa' in req.cookies:\n self.set_captcha_required(resp)\n raise falcon.HTTPError(\n falcon.HTTP_401, #Forbidden\n 'Error',\n \"Captcha Rejected\"\n )\n else:\n self.set_captcha_required(resp)\n raise falcon.HTTPError(\n falcon.HTTP_401, #Forbidden\n 'Error',\n \"Captcha Required\"\n )", "def confirm(client_ip_address, recaptcha_challenge_field, recaptcha_response_field):\r\n result = False\r\n reply = check(client_ip_address, recaptcha_challenge_field, recaptcha_response_field)\r\n if reply:\r\n if reply.lower().startswith('true'):\r\n result = True\r\n return result", "def baixa_captcha(self):\n url = \"https://www.receita.fazenda.gov.br/PessoaJuridica/CNPJ/cnpjreva/captcha/gerarCaptcha.asp\"\n pagina = self.sessao.get(url)\n open('teste.png','wb').write(pagina)\n imagem_data = (ndimage.imread('teste.png'))\n# plt.imshow(imagem_data)\n# plt.show()\n\n # Site da receita exige tempo de espera\n time.sleep(1)\n\n imagem_data = imagem_data.reshape(1,50,180,4)\n predicao = quebra_captcha(imagem_data).flatten()\n predicao = ''.join([ classes[x] for x in predicao ]).lower()\n return(predicao)", "def clean_captcha(self):\r\n answer = self.cleaned_data.get(\"captcha\")\r\n if answer != self.captcha_answer:\r\n raise forms.ValidationError(\r\n \"Sorry, that's not the answer we were looking for.\")", "def captcha(self, url):\n fname = 'captcha.jpg'\n resp = self.session.get(url)\n with open(fname, 'wb') as f:\n try:\n f.write(resp.content)\n return True\n except Exception as e:\n print(\"Erro ao salvar o captcha: %s\" % e)\n return False", "def validate_recaptcha_token(token):\n url = \"https://www.google.com/recaptcha/api/siteverify\"\n data = {\"secret\": settings.RECAPTCHA_SECRET_KEY, \"response\": token}\n response = requests.post(url, data=data)\n return response.json()", "def check(client_ip_address, recaptcha_challenge_field, recaptcha_response_field):\r\n params = urllib.urlencode(dict(privatekey=recaptcha_private_key,\r\n remoteip=client_ip_address,\r\n challenge=recaptcha_challenge_field,\r\n response=to_bytestring(recaptcha_response_field)))\r\n data = None\r\n try:\r\n f = urllib2.urlopen(recaptcha_server_name, params)\r\n data = f.read()\r\n f.close()\r\n except urllib2.HTTPError:\r\n pass\r\n except urllib2.URLError:\r\n pass\r\n return data", "def _validate_captcha(data):\n settings = api.config.get_settings()[\"captcha\"]\n\n post_data = urllib.parse.urlencode(\n {\n \"secret\": settings[\"reCAPTCHA_private_key\"],\n \"response\": data[\"g-recaptcha-response\"],\n \"remoteip\": flask.request.remote_addr,\n }\n ).encode(\"utf-8\")\n\n request = urllib.request.Request(settings[\"captcha_url\"], post_data, method=\"POST\")\n response = urllib.request.urlopen(request).read().decode(\"utf-8\")\n parsed_response = json.loads(response)\n return parsed_response[\"success\"] is True", "def handle_verify_code(self, code):\n r = self.session.get(self.image_url_format.format(code=code))\n\n # FIXME use terminal better\n img_path = os.path.expanduser('~/') + 'pansh.{}.vcode.png'.format(hash(self.username))\n with open(img_path, mode='wb') as fp:\n fp.write(r.content)\n print(\"Saved verification code to {}\".format(os.path.dirname(img_path)))\n vcode = raw_input(\"Please input the captcha:\\n\")\n return vcode", "def check_solution(self, captcha_id: str) -> str:\n data = self.backend.get_check_solution_request_data(captcha_id)\n response = request(\n data[\"url\"],\n data[\"post_data\"],\n timeout=self.network_config[\"timeout\"],\n )\n return self.backend.parse_check_solution_response(response)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Obtains a generic title for a review for a product
def get_review_title(self, language): comment_generator = CommentGenerator(language) return comment_generator.generateTitle()
[ "def _getReviewTitle(self):\n try:\n return re.search('reviewLink\">\"(.+?)\"</a>', self.reviewHTML).group(1)\n except:\n return ''", "def review_restaurant_name(review):\n return review[0]", "def get_product_title(product_page_object):\n return product_page_object.find(\"p\", {\"class\": \"offer-thumb__title\"}).get(\"title\")", "def _create_title_from_description(product: Dict[str, Any]) -> str:\n if 'description' in product:\n title = f'{product[\"description\"][:_CHARS_TO_USE_WHEN_CREATING_TITLE].strip()}…'\n else:\n title = ''\n\n logging.info('Modified item %s: Created title: %s',\n product.get('offerId', ''), title)\n\n return title", "def get_title(title: str):\n return title", "def get_title(self):\n # self.title can be a Struct (for SurveyDetails) or just a string\n if isinstance(self.title, Struct):\n return self.title.text\n else:\n return self.title", "def review_rating(review):\n return review[1]", "def title(self) -> Optional[str]:\n if self._title is not None:\n return self._title\n if self._target_object is not None and isinstance(\n self._target_object, pystac.Catalog\n ):\n return self._target_object.title\n return None", "def get_title(rating):\n title = \"\"\n if rating < 1200:\n title = [\"Newbie\", \"grey-text\"]\n elif rating < 1400:\n title = [\"Pupil\", \"light-green-text\"]\n elif rating < 1600:\n title = [\"Specialist\", \"cyan-text\"]\n elif rating < 1900:\n title = [\"Expert\", \"indigo-text\"]\n elif rating < 2100:\n title = [\"Candidate Master\", \"purple-text\"]\n elif rating < 2300:\n title = [\"Master\", \"amber-text\"]\n elif rating < 2400:\n title = [\"International Master\", \"orange-text\"]\n elif rating < 2600:\n title = [\"Grandmaster\", \"red-text\"]\n elif rating < 3000:\n title = [\"International Grandmaster\", \"red-text\"]\n else:\n title = [\"Legendary Grandmaster\", \"red-text\"]\n return title", "def fetch_name(self, product_id):\n product_url = urljoin(self.endpoint, str(product_id)) + \"?excludes={}\".format(self.excludes) + \"&key={}\".format(self.key)\n\n result = requests.get(product_url)\n\n if result.status_code != requests.codes[\"ok\"]:\n raise ProductNotFoundError(\"could not find product name for ID {}\".format(product_id))\n\n data = result.json()\n\n try:\n name = data[\"product\"][\"item\"][\"product_description\"][\"title\"]\n except KeyError:\n name = None\n\n return name", "def get_title(rating):\n\ttitle = \"\"\n\tif rating < 1200:\n\t\ttitle = [\"Newbie\", \"grey-text\"]\n\telif rating < 1400:\n\t\ttitle = [\"Pupil\", \"light-green-text\"]\n\telif rating < 1600:\n\t\ttitle = [\"Specialist\", \"cyan-text\"]\n\telif rating < 1900:\n\t\ttitle = [\"Expert\", \"indigo-text\"]\n\telif rating < 2100:\n\t\ttitle = [\"Candidate Master\", \"purple-text\"]\n\telif rating < 2300:\n\t\ttitle = [\"Master\", \"amber-text\"]\n\telif rating < 2400:\n\t\ttitle = [\"International Master\", \"orange-text\"]\n\telif rating < 2600:\n\t\ttitle = [\"Grandmaster\", \"red-text\"]\n\telif rating < 3000:\n\t\ttitle = [\"International Grandmaster\", \"red-text\"]\n\telse:\n\t\ttitle = [\"Legendary Grandmaster\", \"red-text\"]\n\treturn title", "def title_or_id(context):\n title = getattr(context, 'title', '')\n if not title:\n if hasattr(context, '__name__'):\n title = getattr(context, '__name__', '')\n elif hasattr(context, 'getId'):\n title = context.getId()\n return title", "def get_title(self):\n\t\treturn self.title", "def title(self):\n type_titles = ['article_title', 'thesis_title', 'conference_title', 'link_title']\n\n for title in type_titles:\n if getattr(self, title):\n return getattr(self, title)", "def item_title(self, item):\n return item.title", "def get_product_name(container) -> str:\r\n title_container = container.findAll(\"a\", {\"class\": \"item-title\"})\r\n # product_title: List[] = title_container[0].text\r\n return title_container[0].text", "def get_title(self):\n return self.metadata['title']", "def get_child_product_title(self):\n title = self.title\n if self.parent_id:\n if not self.title:\n title = self.parent.title\n return unicode(title)", "def get_recipe_title(soup_recipe):\n return soup_recipe.find(\"h1\", {\"itemprop\": \"name\"}).get_text()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Leaves a review in a product page
def leave_review(self, product_url, review, review_title): raise NotImplementedError
[ "def delete_prod_review(request, pk):\n review = get_object_or_404(ProductReview, pk=pk)\n product = review.product_id\n if review.user == request.user:\n review.delete()\n sweetify.success(\n request,\n \"Review deleted\",\n icon='success',\n timer='2500',\n toast='true',\n position='center',\n background='#181818',\n )\n return redirect(single_prod, product)", "def delete_review(request, product_slug, product_id, review_id):\n product = get_object_or_404(Product, slug=product_slug, pk=product_id)\n review = get_object_or_404(Review, pk=review_id)\n # make sure user is the review owner\n if request.user.id == review.buyer_id:\n review.delete()\n messages.success(request, 'You have successfully deleted your review')\n return redirect(Product.get_absolute_url(product))\n else:\n # if not product owner, raise 403 forbidden exception and render\n # 403.html template\n messages.error(request, 'You cannot delete this review')\n raise PermissionDenied", "def review_prod(request, pk):\n product = get_object_or_404(Product, pk=pk)\n user = request.user\n if request.method == \"POST\":\n if ProductReview.objects.filter(user=user, product=product).exists():\n form = ProdReviewForm(request.POST)\n sweetify.error(\n request,\n \"Already reviewed this product\",\n icon='info',\n timer='2500',\n toast='true',\n position='center',\n background='#181818',\n )\n return redirect(single_prod, product.pk)\n else:\n form = ProdReviewForm(request.POST)\n if form.is_valid():\n review = form.save(commit=False)\n review.product = product\n form.instance.user = request.user\n review.save()\n sweetify.success(\n request,\n \"Review added, thanking you\",\n icon='success',\n timer='2500',\n toast='true',\n position='top',\n )\n return redirect(single_prod, product.pk)\n else:\n form = ProdReviewForm()\n return render(request, 'prodreview.html', {\n 'form': form, 'product': product.pk\n }\n )", "def delete_review(review_id):\n\n product_reviewed = mongo.db.reviews.find_one({\"_id\": ObjectId(review_id)})\n product_model = product_reviewed['product_model']\n\n mongo.db.products.update_one({\"product_model\": product_model},\n {'$inc': {\"product_reviews\": -1}})\n\n mongo.db.reviews.delete_one({\"_id\": ObjectId(review_id)})\n flash(\"Your review has been Deleted\")\n\n return redirect(url_for('profile', username=session['user']))", "def add_review(request, product_id):\n product = get_object_or_404(Product, pk=product_id)\n\n if request.method == 'POST': \n review_form = ReviewForm(request.POST)\n if review_form.is_valid():\n review = review_form.save(commit=False)\n review.product = product\n review.user = request.user\n review.save()\n messages.info(request, \"Your review has been received! Thank you for your interest.\")\n return redirect(reverse('product_detail', args=[product_id]))\n else:\n print(review_form.errors)\n \n return redirect(reverse('product_detail', args=[product_id]))", "def submit_review():\n product = db(db.Product_Table.id == request.args[0]).select().first()\n db.Product_Review.product_reference.default = product\n db.Product_Review.product_reference.represent = lambda id, row: \\\n A(product.product_name, _href=URL('default', 'product_page', args=[product.id]))\n db.Product_Review.product_reference.label = \"Your review describes this product\"\n form = SQLFORM(db.Product_Review)\n \n if form.process().accepted:\n tpr = product.total_possible_ratings\n tr = product.total_ratings\n ts = product.total_stock\n ar = ((tr+form.vars.score)/(tpr+10))*100\n product.update_record(\n total_possible_ratings=tpr + 10, \n total_ratings=form.vars.score + tr,\n avg_rating = ar,\n )\n redirect(URL('default', 'product_page', args=[product.id]))\n logger.info(\"My session is: %r\" % session)\n return dict(form=form)", "def uploadProductReview(request):\n reviewHtml = \"uploadProductReview.html\"\n user = request.user\n orderHistoryInstance = orderHistory.objects.filter(user=user)[0]\n items = orderHistoryInstance.products.all\n context = {\n \"items\": items\n }\n\n if request.method == 'POST':\n productName = request.POST['product']\n review = request.POST['review']\n rate = request.POST['rate']\n rate = int(rate)\n products = orderHistory.objects.filter(user=user)[0].products.all()\n for product in products:\n if product.title == productName:\n productInstance = product\n if productInstance:\n print(\"Instance created\")\n productReviewInstance = productReview(reviewer=user, product=productInstance, review=review,\n reviewScore=rate)\n productReviewInstance.save()\n orderHistory.objects.filter(user=request.user)[0].products.remove(productInstance)\n return render(request, reviewHtml, context=context)\n\n return render(request, reviewHtml, context=context)", "def product_detail(request, slug, product_id):\n # get product object\n product = get_object_or_404(Product, slug=slug, pk=product_id)\n # check if product is active\n if product.active:\n\n # get all reviews for product\n product_reviews = product.reviews.all()\n\n # clock up the number of product views\n product.view_count += 1\n product.save()\n # check if user already reviewed. Only 1 review/product allowed\n already_reviewed = False\n if product_reviews.filter(buyer_id=request.user.id).count() >= 1:\n already_reviewed = True\n\n # get a list of user's purchased products\n user_owned_products = Order.objects.purchased_products(request.user)\n\n # load product review form\n form = ReviewForm()\n form_action = Product.get_absolute_url(product)\n form_button = \"Add Review\"\n if request.method == \"POST\":\n if product in user_owned_products:\n if not already_reviewed:\n form = ReviewForm(request.POST)\n if form.is_valid():\n review = form.save(commit=False)\n review.buyer = request.user\n review.product = product\n review.save()\n messages.success(\n request,\n 'You have successfully added a product review'\n )\n # redirect back to the product\n return redirect(Product.get_absolute_url(product))\n else:\n messages.success(\n request, 'You have already reviewed this product')\n return redirect(Product.get_absolute_url(product))\n else:\n messages.success(\n request, \n 'You need to have purchase the product to leave a review'\n )\n return redirect(Product.get_absolute_url(product))\n\n context = {\"product\": product, \"product_reviews\": product_reviews,\n \"form\": form, \"already_reviewed\": already_reviewed,\n \"form_action\": form_action, \"form_button\": form_button,\n \"owned_assets\": user_owned_products}\n\n return render(request, \"product_detail.html\", context)\n\n else:\n messages.error(request, 'Product is no longer available')\n return redirect('products_list')", "def post(self, request, **kwargs):\n product_id = kwargs['pk']\n\n request.session.modified = True\n request.session['reviewed_products'].append(product_id)\n\n form = ReviewForm({'text': request.POST['text']})\n add_product_field = form.save(commit=False)\n if form.is_valid():\n add_product_field.product_id = product_id\n form.save()\n return redirect('product_detail', pk=product_id)\n else:\n return HttpResponse('Заполните форму корректно!')", "def go_product_reviews_page(self, driver, product_id, website):\n try:\n tab_list = driver.find_element_by_id(\"divProductDetailsCustomerReviewOptions\")\n review_tab = tab_list.find_element_by_id(\"tabProductDetailCustomerReviewNav1\")\n review_tab.click()\n except (NoSuchElementException, ElementNotVisibleException):\n pass\n time.sleep(1)", "def edit_review_prod(request, pk):\n review = get_object_or_404(ProductReview, pk=pk)\n product = review.product_id\n if request.method == \"POST\":\n form = ProdReviewForm(request.POST, instance=review)\n if form.is_valid():\n review = form.save(commit=False)\n form.instance.user = request.user\n review.save()\n sweetify.success(\n request,\n \"Review updated\",\n icon='success',\n timer='2500',\n toast='true',\n position='top',\n )\n return redirect(single_prod, product)\n else:\n form = ProdReviewForm(instance=review)\n\n return render(request, 'editprodreview.html', {\n 'form': form, 'product': product\n }\n )", "def edit_review(request, product_slug, product_id, review_id):\n product = get_object_or_404(Product, slug=product_slug, pk=product_id)\n review = get_object_or_404(Review, pk=review_id)\n # make sure user is the review owner\n if request.user.id == review.buyer_id:\n if request.method == \"POST\":\n form = ReviewForm(request.POST, instance=review)\n if form.is_valid():\n form.save()\n messages.success(\n request, 'You have successfully updated your review')\n # redirect to the new product after save\n return redirect(Product.get_absolute_url(product))\n else:\n form = ReviewForm(instance=review)\n\n form_action = Review.get_edit_review_url(review)\n form_button = \"Save Changes\"\n\n context = {'form': form, 'product': product, 'form_action': form_action,\n 'form_button': form_button, 'review': review}\n return render(request, 'review_form_edit.html', context)\n\n else:\n # if not product owner, raise 403 forbidden exception and render\n # 403.html template\n messages.error(request, 'You cannot edit this review')\n raise PermissionDenied", "def review(self, review):\n self._review = review", "def add_review(request, product_id):\n\n product = get_object_or_404(Product, id=product_id) # get product \n\n if request.method == 'POST': # validate form\n form = ReviewForm(request.POST)\n if form.is_valid(): \n instance = form.save(commit=False) # form save/form\n # user session\n instance.user = UserProfile.objects.get(user=request.user)\n instance.product = product\n instance.save() # save form instance\n \n messages.success(request, 'Your Review has been Added!')\n return redirect(reverse('product_detail', args=[product.id]))\n else:\n messages.error(\n request,\n 'Failed to add Review. Please ensure the form is valid.'\n )\n else:\n form = ReviewForm()\n\n template = 'reviews_list/add_review.html' # render add reviews page\n context = {\n 'form': form,\n 'product': product,\n }\n\n return render(request, template, context)", "def edit_review(review_id):\n if request.method == \"POST\":\n update_review = {\n \"product_model\": request.form.get(\"product_model\"),\n \"product_review\": request.form.get(\"product_review\"),\n \"created_by\": session[\"user\"]\n }\n mongo.db.reviews.update({\"_id\": ObjectId(review_id)}, update_review)\n flash(\"Your Review Has Been Successfully Updated\")\n return redirect(url_for('profile', username=session[\"user\"]))\n\n review = mongo.db.reviews.find_one({\"_id\": ObjectId(review_id)})\n\n # Page Title\n title = 'Edit-Review'\n return render_template(\"edit_review.html\", review=review, title=title)", "def add_review(request):\n slug = request.POST.get('slug')\n product = Product.active.get(slug=slug)\n\n # errors = True\n content = request.POST.get('content')\n template = \"catalog/product_review.html\"\n if content.strip() != \"\":\n review = ProductReview.objects.create(\n product=product,\n user=request.user,\n content=content\n )\n review.save()\n html = render_to_string(template, {'review': review})\n response = json.dumps({'success': 'True', 'html': html})\n else:\n response = json.dumps({'success': 'False', 'html': \"\"})\n return HttpResponse(response, content_type='application/javascript; charset=utf-8')", "def add_review(self, review):\n review_issue = IParentGetter(review).get_parent_object_of_type(\"Issue\")\n if review_issue is None:\n review_issue = IParentGetter(review).get_parent_object_of_type(\"Volume\")\n if self.current_issue != review_issue:\n if self.current_issue:\n self.finish_issue()\n self.current_issue = review_issue\n self.reviews_xml.append(review.restrictedTraverse(self.xml_view_name)())", "def review():\r\n\r\n # Ensure isbn_number is submitted\r\n if not request.form.get(\"isbn_number\"):\r\n return apology(\"Invalid book\", 403)\r\n\r\n # Ensure review is submitted\r\n if not request.form.get(\"review\"):\r\n return apology(\"Text is not submitted\", 403)\r\n\r\n # Check if book exist, if not error out\r\n\r\n # add review to db\r\n\r\n return redirect(url_for(details, isbn_number=request.form.get(\"isbn_number\")))", "def myProductReview(request):\n reviewHtml = \"myproductreview.html\"\n user = request.user\n productNameAndProductReview = []\n products = Product.objects.filter(seller=user)\n for eachProduct in products:\n mySingleProductReview = productReview.objects.filter(product=eachProduct)\n if len(mySingleProductReview) > 0:\n print(mySingleProductReview)\n productNameAndProductReview += mySingleProductReview\n\n context = {\n \"reviews\": productNameAndProductReview\n }\n print(productNameAndProductReview)\n return render(request, reviewHtml, context=context)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Wait for the current page to change
def wait_for_page_change(self, current_page): WebDriverWait(self.driver, 5).until(EC.url_changes(current_page))
[ "def wati_until_page_change(driver, url):\n while driver.current_url == url:\n time.sleep(10)", "def wait_until_lemonade_insurance_page_displayed(self):", "def wait_for_page_load(self):\n old_page = self.browser.find_element_by_tag_name('html')\n yield\n WebDriverWait(self.browser, self.wait).until(\n staleness_of(old_page)\n )", "def wait_for_page_load(self):\n # For right now, just wait for 2 seconds since webdriver returns when loaded.\n # TODO: switch to waiting for network idle\n time.sleep(2)", "def wait_until_insurance_displayed(self):", "def wait(self):\n time.sleep(self.next())", "def second_page_execution(self):\n self.errors_and_correct_input_values_helper()\n self.third_page.wait_for_page()", "def wait_until_confirmation_page_is_displayed(self):\n raise NotImplementedError", "def _go_to_page(self):\n self.salesforce.go_to_setup_home()\n self.eda.wait_for_new_window(\"Home | Salesforce\")\n self.selenium.switch_window(\"Home | Salesforce\")\n self.salesforce.wait_until_loading_is_complete()", "def wait_for_page_changes(driver, page_title_to_change_to, wait_for_seconds=60):\n Log.info(f\"Current page title is {driver.title}\")\n Log.info(f\"Waiting {wait_for_seconds} seconds for page to change to {page_title_to_change_to}\")\n try:\n WebDriverWait(driver, wait_for_seconds).until(expected_conditions.title_contains(page_title_to_change_to))\n except TimeoutException as e:\n Log.error(f\"Timed out with page title at {driver.title}\")\n take_screenshot(driver)\n driver.quit()\n raise TimeoutException(f\"Timed out with page title at {driver.title}. {e}\")\n Log.info(f\"Page has changed to {page_title_to_change_to}\")", "def next_page():\n\tprint('-> \\nClicking next page')\n\told_html = driver.find_element_by_tag_name('html').text\n\tlink = driver.find_element_by_xpath(XPATHS['next_page']) \n\tlink.click()\n\treturn wait_for(old_html)", "def _is_current_page(self):\n self.selenium.wait_until_location_contains(\n \"/view\", timeout=60, message=\"Detail view did not open in 1 min\"\n )\n self.selenium.wait_until_page_contains(\"Service Schedule Name\")", "def wait_page_loaded():\n if CONFIG[\"DefaultDocument\"]:\n driver = browser.get_current_browser()\n if driver is None:\n raise QWebDriverError(\"No browser open. Use OpenBrowser keyword\"\n \" to open browser first\")\n try:\n driver.switch_to_default_content()\n except InvalidSessionIdException:\n CONFIG.set_value(\"OSScreenshots\", True)\n raise QWebBrowserError(\"Browser session lost. Did browser crash?\")\n except (NoSuchWindowException, WebDriverException) as e:\n logger.warn(\n 'Cannot switch to default context, maybe window is closed. Err: {}'.format(e))\n if any(s in str(e) for s in FATAL_MESSAGES):\n CONFIG.set_value(\"OSScreenshots\", True)\n raise QWebBrowserError(e)\n driver.switch_to_default_content()\n timeout = CONFIG['XHRTimeout']\n if timeout.lower() == \"none\":\n return\n try:\n xhr.wait_xhr(timestr_to_secs(timeout))\n except(WebDriverException, QWebDriverError) as e:\n logger.info('Unable to check AJAX requests due error: {}'.format(e))", "def third_page_execution(self):\n self.errors_and_correct_input_values_helper()\n self.fourth_page.wait_for_page()", "def wait_step(self):\n pass", "def wait_for(old_html, timeout=60):\n\tstart_time = time.time() \n\twhile time.time() < start_time + timeout: \n\t\tif check_new_page_loaded(old_html): \n\t\t\treturn time.time() - start_time \n\t\telse: \n\t\t\ttime.sleep(0.1) \n\traise Exception('WebPage Load Timeout')", "def first_page_execution(self):\n self.errors_and_correct_input_values_helper(wrong_pattern_error=True)\n self.utility_page.click_next_button()\n self.utility_page.click_next_button()\n self.second_page.wait_for_page()", "def wait(self, state):\n pass", "def wait_for_page_to_load(self):\n self.wait.until(lambda s: self.is_page_loaded())\n return self" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify the mail sent to the mail service
def verify_mail(self): raise NotImplementedError
[ "def test_send_mail(self):\n response = self.client.post(reverse('contact-form'), self.valid_data, follow=True)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].subject, self.valid_data['subject'])\n self.assertEqual(mail.outbox[0].from_email, self.valid_data['sender_email'])\n self.assertEqual(mail.outbox[0].to[1], self.valid_data['sender_email'])", "def test_sendEmailVerification(self, testUser):\n with mail.record_messages() as outbox:\n testUser.send_email_verification()\n assert len(outbox) == 1\n msg = outbox[0]\n assert \"jjones@yahoo.com\" in msg.recipients\n assert msg.subject == 'Ask Your Peeps: Email Verification'\n assert 'To verify your email' in msg.body\n assert 'Dear John' in msg.body", "def test_api_auth_password_reset_mail_send_success(self):\n data = {\"email\": \"testuser@test.com\"}\n response = self.client.post(self.password_reset_url, data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(mail.outbox), 1)\n self.assertContains(response, \"Password reset e-mail has been sent.\")", "def test_email_confirmation_wrong_mail(self):\n res = self.testapp.reset()\n res = self.testapp.get(\n '/verify/NOTEXISTS@shri.de/ABCDEFGHIJ', status=200)\n #print(res.body)\n self.failUnless(\"Please enter your password.\" in res.body)", "def test_email_sent_on_failure(self):\n self._authorize()\n data = {\n 'Subject_Number': '000-1111',\n 'Pin_Code': '1234',\n 'Date_Enrolled': datetime.datetime.now().strftime('%b %d %Y '),\n 'Mobile_Number': '2223334444',\n }\n patient = self.create_xml_patient(data)\n payload = self.create_xml_payload([patient])\n response = self._post(payload)\n self.assertEqual(response.status_code, 500)\n self.assertEqual(len(mail.outbox), 1)", "def fetch_mail(self):\n from interlink import DEFAULT_MAIL_CHECKER\n checker = DEFAULT_MAIL_CHECKER(self)\n try:\n checker.fetch_mail()\n return True\n except:\n traceback.print_exc()\n return False", "def test_django_email(self):\n message = mail.EmailMessage(**self.email_data)\n message.send()\n # this email should not have a UUID\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].subject, self.email_data['subject'])\n self.assertNotIn('X-SMTPAPI', mail.outbox[0].extra_headers)", "def test_mail(self):\n\n assert(not self.do_query('mail'))\n assert(not self.do_query('mail', 'POST'))\n\n retour = self.do_query('mail', 'POST', postParameters={'response_id': ''})\n\n assert(retour)\n assert(retour.json()['result'] != 'Ok')\n\n retour = self.do_query('mail', 'POST', postParameters={'response_id': '42'})\n\n assert(retour)\n assert(retour.json()['result'] == 'Ok')", "def run_mailcheck (self):\n\t\t# TODO: add function in backend to check if all needed things are set\n\t\t# like server/pass/user/... - if not, show error\n\t\t# if it is not currently refreshing\n\t\tif not self.__mailbackend.refreshing:\n\t\t\tself.__status = mail.MailCheckStatus.REFRESH \n\t\t\tself.redraw_canvas()\n\t\t\tself.__mailbackend.start()\n\t\treturn False\t# in case we are run as a timeout", "def test_email_resent_verification_email(self):\n user = UserFactory(is_active=False)\n self.assertEmailCount('Ativa a tua conta', 1)\n\n url = reverse('resend-verification')\n data = {'email': user.email}\n \n response = self.client.post(url, data)\n self.assertEmailCount('Ativa a tua conta', 2)", "def test_send_email(self):\n pass", "def test_send_verification_mail(self):\n self.email_verification = {\"user\": {\n \"username\": \"Ronny\",\n \"email\": \"ronnymageh@gmail.com\",\n \"password\": \"myPass123!\"\n }\n }\n response = self.client.post(\n self.reg_url,\n self.email_verification,\n format=\"json\")\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].subject, \"Activate your account.\")\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "def test_email_good(get_email, capsys):\n e = get_email\n e.send()\n out, err = capsys.readouterr()\n assert \"Message sent\" in out", "def test_email_good(self):\n ad_rep_leads = list(\n AD_REP_LEAD_FACTORY.create_ad_rep_leads(create_count=2))\n for ad_rep_lead in ad_rep_leads:\n ad_rep_lead.create_datetime -= datetime.timedelta(1)\n ad_rep_lead.save()\n AD_REP_INVITE_TASK.run()\n self.assertEqual(len(mail.outbox), 2)\n ad_rep_lead_1_found = False\n ad_rep_lead_2_found = False\n for email in mail.outbox:\n if ad_rep_leads[0].first_name in email.alternatives[0][0]:\n ad_rep_lead_1_found = True\n self.assertTrue(ad_rep_leads[0].first_name in email.body)\n elif ad_rep_leads[1].first_name in email.alternatives[0][0]:\n ad_rep_lead_2_found = True\n self.assertTrue(ad_rep_leads[1].first_name in email.body)\n self.assertTrue(ad_rep_lead_1_found and ad_rep_lead_2_found)", "def test_send_email(self):\n send_mail('test_message.email', {'context_var': \"1\"}, \"from_email\", [\"to_email\"])\n self.assertEqual(len(mail.outbox), 1)\n email = mail.outbox[0]\n # now check the details are correct\n self.assertEqual(email.to[0], \"to_email\")\n self.assertEqual(email.from_email, \"from_email\")\n self.assertEqual(email.subject, \"Test Message\")\n self.assertEqual(email.template_name, \"test_message.email\")\n self.assertEqual(email.body, \"Test Message Body\")\n self.assertEqual(email.message().get_content_type(), \"multipart/alternative\")\n self.assertIn(\"<p>Test Message HTML</p>\", email.message().as_string())", "def test_send_test_email(self):\n pass", "def test_email(self):\n # No email should be send\n self.assertEqual(len(mail.outbox), 0)\n\n # enable plugin and set mail setting to true\n plugin = registry.plugins.get('inventreecorenotificationsplugin')\n plugin.set_setting('ENABLE_NOTIFICATION_EMAILS', True)\n NotificationUserSetting.set_setting(\n key='NOTIFICATION_METHOD_MAIL',\n value=True,\n change_user=self.user,\n user=self.user,\n method=InvenTreeCoreNotificationsPlugin.EmailNotification.METHOD_NAME\n )\n\n # run through\n self._notification_run(InvenTreeCoreNotificationsPlugin.EmailNotification)\n\n # Now one mail should be send\n self.assertEqual(len(mail.outbox), 1)", "def sendRedemptionEmail():\n return", "def process_email(self, mail_, download_, log_):\n log_.info(mail_['subject'])\n return 'return meaningful result here'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of the positions of all locations that meet the target_func criteria
def get_x_in_range(self, start, target_func, max_distance, sort_func=None): if sort_func is None: targets = [] for x in range(-max_distance, max_distance + 1): for y in range(-max_distance, max_distance + 1): distance = abs(x) + abs(y) if distance > max_distance: continue pos = Position(start.x + x, start.y + y) if target_func(pos, distance): targets.append(pos) return targets else: targets = PriorityQueue() for x in range(-max_distance, max_distance + 1): for y in range(-max_distance, max_distance + 1): distance = abs(x) + abs(y) if distance > max_distance: continue pos = Position(start.x + x, start.y + y) if target_func(pos, distance): targets.enqueue(sort_func(pos, distance), pos) return targets.to_list()
[ "def locate_target(grid, target):\n\n grid_size = len(grid)\n return [(r, c)\n for r in range(grid_size)\n for c in range(grid_size)\n if grid[r][c] == target]", "def _inter_pos_list(obs, target):\r\n pos_list = [0]\r\n if len(obs) != 0:\r\n pos_list = [i for i, o in enumerate(obs, start=1) if o in target]\r\n if len(pos_list) == 0:\r\n pos_list = [0]\r\n return pos_list", "def get_camera_target_positions(self):\n raise NotImplementedError('Not implemented')", "def extract_lists(classes, target):\r\n coords = list()\r\n xs = list()\r\n ys = list()\r\n\r\n for element in classes:\r\n if classes[element] == target:\r\n xs.append(element[0])\r\n ys.append(element[1])\r\n\r\n coords.append(xs)\r\n coords.append(ys)\r\n return coords", "def get_positions(sample, target_residue):\n \n positions = [] #All positions of target residue in sample sequence\n #Find positions of target residue in sample sequence\n for p in range(0, len(sample)):\n if (sample[p] == target_residue):\n positions.append(p)\n return positions", "def find_all_elements(grid, target):\n \n indices = []\n \n ### This pattern of iterating through row and col indices is very common\n for row_number in range(len(grid)):\n for col_number in range(len(grid[row_number])):\n \n if grid[row_number][col_number] == target:\n indices.append((row_number, col_number))\n \n return indices", "def searchRange(self, nums, target):\n left = self.leftIndex(nums, target)\n right = self.rightIndex(nums,target)\n \n return [left, right]", "def get_all_possible_locations(loc: int, mask: Mask) -> List[str]:\n mask_loc = apply_mask_to_location(loc, mask)\n mask_loc_ary = np.array(list(mask_loc))\n possible_locs = []\n float_values = [[0, 1] for _ in range(mask.num_floats)]\n for float_value in product(*float_values):\n mask_loc_ary = np.array(list(mask_loc))\n mask_loc_ary[mask_loc_ary == \"X\"] = np.array(float_value)\n possible_locs.append(\"\".join(list(mask_loc_ary)))\n return possible_locs", "def find_value(lists, target):\n loc = []\n l = len(lists)\n for i in range(0, l, 1):\n if(lists[i] == target):\n loc.append(i)\n else:\n continue\n return loc", "def _FindLocations(input_api, search_regexes, files_to_check, files_to_skip):\n def FilterFile(affected_file):\n return input_api.FilterSourceFile(\n affected_file,\n files_to_check=files_to_check,\n files_to_skip=files_to_skip)\n\n no_presubmit = r\"// no-presubmit-check\"\n locations = []\n for f in input_api.AffectedSourceFiles(FilterFile):\n for line_num, line in f.ChangedContents():\n for search_regex in search_regexes:\n if (input_api.re.search(search_regex, line) and\n not input_api.re.search(no_presubmit, line)):\n locations.append(\" %s:%d\" % (f.LocalPath(), line_num))\n break\n return locations", "def find_targets(self):\n for asteroid in self.asteroid_map:\n self.targets.append([angle(self.station, asteroid),\n distance(self.station, asteroid),\n asteroid])", "def _find_locs_w_def_units(state):\n locations = []\n def_units = []\n\n for x in range(state.ARENA_SIZE):\n for y in range(state.HALF_ARENA):\n units = state.game_map[[x, y]]\n if units is None or units == []:\n continue\n else:\n for unit in units:\n if unit.unit_type in [DESTRUCTOR, FILTER, ENCRYPTOR]:\n locations.append([x, y])\n def_units.append(unit)\n # gamelib.debug_write(\"Unit --> \", unit)\n\n return zip(locations, def_units)", "def __dfs_search(self, target_location: Room):\n rooms = self.__recursive_dfs_search(target_location,\n self._start_location, [])\n if rooms[-1] != target_location:\n rooms = [None]\n return rooms", "def hittable_targets(self):\n return [self.current_level.getPlayer()]", "def get_candidate_locations(cur_location, radius, row_num, col_num):\n cur_y, cur_x = cur_location\n delta = int(radius)\n max_x = cur_x + delta if cur_x + delta < col_num else col_num - 1\n min_x = cur_x - delta if cur_x - delta >= 0 else 0\n max_y = cur_y + delta if cur_y + delta < row_num else row_num - 1\n min_y = cur_y - delta if cur_y - delta >= 0 else 0\n candidates = []\n for x in range(min_x, max_x + 1):\n for y in range(min_y, max_y + 1):\n if distance(cur_x, cur_y, x, y) < radius:\n candidates.append((y, x))\n return candidates", "def _gen_matches(target_units, source_units, stoplist_set, features_size):\n for hits2positions in gen_hits2positions(\n target_units, source_units, stoplist_set, features_size):\n overhits2positions = {\n k: np.array(v) for k, v in hits2positions.items()\n if len(v) >= 2}\n for (t_ind, s_ind), positions in overhits2positions.items():\n yield (t_ind, s_ind, positions)", "def __get_position(self, value, state):\n coords = np.argwhere(state == value).flatten()\n return coords", "def extract_target_pixel_location(self):\n #Respective Image location\n pixel_array = self.imageprepare(self.image_path)\n\n #Select less_than_target color point --> must be calibrated\n #?? Should we use an abstract class here instead of an if statment ??\n if self.color == \"g\":\n less_than_target = .15\n else:\n raise ValueError(\"Unknown color value\")\n\n #Chooses target pixels as well as it's location\n target_pixels = []\n for pixel in enumerate(pixel_array):\n if pixel[1] < less_than_target:\n target_pixels.append(pixel[0])\n\n return target_pixels", "def get_target_points_and_indices(fspace, boundary_ids):\n # if just passed an int, convert to an iterable of ints\n # so that just one case to deal with\n if isinstance(boundary_ids, int):\n boundary_ids = [boundary_ids]\n target_markers = set(boundary_ids)\n\n # Check that bdy ids are valid\n if not target_markers <= set(fspace.mesh().exterior_facets.unique_markers):\n raise ValueError(\n \"The following bdy ids are not exterior facet ids: %s\" %\n (target_markers - set(fspace.mesh().exterior_facets.unique_markers)))\n\n if not target_markers & set(fspace.mesh().exterior_facets.unique_markers):\n raise ValueError(\"No bdy ids are exterior facet ids\")\n\n target_indices = set()\n for marker in target_markers:\n target_indices |= set(\n fspace.boundary_nodes(marker, 'topological'))\n target_indices = np.array(list(target_indices), dtype=np.int32)\n\n target_indices = np.array(target_indices, dtype=np.int32)\n # Get coordinates of nodes\n coords = SpatialCoordinate(fspace.mesh())\n function_space_dim = VectorFunctionSpace(\n fspace.mesh(),\n fspace.ufl_element().family(),\n degree=fspace.ufl_element().degree())\n\n coords = Function(function_space_dim).interpolate(coords)\n coords = np.real(coords.dat.data)\n\n target_pts = coords[target_indices]\n # change from [nnodes][ambient_dim] to [ambient_dim][nnodes]\n target_pts = np.transpose(target_pts).copy()\n return (target_indices, PointsTarget(target_pts))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decorator to modify the docstring of an object. For all provided strings, unused empty lines are removed, and the indentation of the first nonempty line is removed from all lines if possible. This allows better indentation when used as a decorator. Unused empty lines means initial enpty lines for ``pre``, and final empty lines for ``post``.
def docstring( docstring: str = None, *, pre: str = None, post: str = None ) -> Callable[[U], U]: def edit_docstring(obj: U) -> U: obj.__doc__ = "".join( ( clean_docstring(pre or "", unused="pre"), clean_docstring(docstring or (obj.__doc__ or "")), clean_docstring(post or "", unused="post"), ) ) return obj return edit_docstring
[ "def docstring(obj: Any):\n obj_doc = obj.__doc__\n if obj_doc:\n docs = []\n indent, obj_doc = dedent_docs(obj_doc)\n for section, lines in doc_sections(obj_doc):\n section_lines = docstring_section(obj, section, lines, indent)\n if section.lower() == 'parameters':\n try:\n name = obj.name\n section_lines = [\n 'To use this normalizer in the :func:`vlnm.normalize` function, ',\n \"use ``method='{}'``.\".format(name),\n '',\n ''\n ] + section_lines\n except AttributeError:\n pass\n docs.extend(section_lines)\n\n obj.__doc__ = textwrap.indent('\\n'.join(docs), indent)\n else:\n if obj.__name__ in REPLACEMENTS:\n obj.__doc__ = REPLACEMENTS[obj.__name__]\n return obj", "def dedent_docstring(text):\n # Problem is that first line might often have no offset, so might\n # need to be ignored from dedent call\n if text is None:\n return None\n if not text.startswith(' '):\n lines = text.split('\\n')\n if len(lines) == 1:\n # single line, no indentation, nothing to do\n return text\n text2 = '\\n'.join(lines[1:])\n return lines[0] + \"\\n\" + textwrap.dedent(text2)\n else:\n return textwrap.dedent(text)", "def indent_docstring_by_1(s):\r\n # In reST, it's useful to have strings that are similarly-indented.\r\n # If we have a classdoc indented by 2 next to an __init__ funcdoc indented\r\n # by 4, reST doesn't format things nicely. Oh, totally-dedenting doesn't\r\n # format nicely either.\r\n\r\n # Docstring indentation: more gnarly than you'd think:\r\n # http://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation\r\n if not s: return s\r\n # Convert tabs to spaces (following the normal Python rules)\r\n # and split into a list of lines:\r\n lines = s.expandtabs().splitlines()\r\n # Determine minimum indentation (first line doesn't count):\r\n indent = 999\r\n for line in lines[1:]:\r\n stripped = line.lstrip()\r\n if stripped:\r\n indent = min(indent, len(line) - len(stripped))\r\n # Remove indentation (first line is special):\r\n trimmed = [lines[0].strip()]\r\n if indent < 999:\r\n for line in lines[1:]:\r\n trimmed.append(line[indent:].rstrip())\r\n # Strip off trailing and leading blank lines:\r\n while trimmed and not trimmed[-1]:\r\n trimmed.pop()\r\n while trimmed and not trimmed[0]:\r\n trimmed.pop(0)\r\n # Return a single string:\r\n return '\\n'.join([\" \" + t for t in trimmed])", "def prepare_docstring(s, ignore=1):\n lines = s.expandtabs().splitlines()\n # Find minimum indentation of any non-blank lines after ignored lines.\n margin = sys.maxsize\n for line in lines[ignore:]:\n content = len(line.lstrip())\n if content:\n indent = len(line) - content\n margin = min(margin, indent)\n # Remove indentation from ignored lines.\n for i in range(ignore):\n if i < len(lines):\n lines[i] = lines[i].lstrip()\n if margin < sys.maxsize:\n for i in range(ignore, len(lines)):\n lines[i] = lines[i][margin:]\n # Remove any leading blank lines.\n while lines and not lines[0]:\n lines.pop(0)\n # make sure there is an empty line at the end\n if lines and lines[-1]:\n lines.append('')\n return lines", "def docstring_format(*values):\n\n def _decorator_(function):\n function.__doc__ = function.__doc__.format(*values).replace('_', '\\_')\n return function\n\n return _decorator_", "def format_docstring_to_markdown(docstr: str) -> str:\n r = re.compile(r\"\\s\\s+\", re.MULTILINE)\n clean_docstr_list = []\n prev_line = None\n in_code_block = False\n in_param = False\n first_code_indentation = None\n\n # Parse each line to determine if it needs formatting\n for original_line in docstr.split(\"\\n\"):\n # Remove excess spaces from lines formed by concatenated docstring lines.\n line = r.sub(\" \", original_line)\n # In some old docstrings, this indicates the start of an example block.\n if line.strip() == \"::\":\n in_code_block = True\n clean_docstr_list.append(\"```\")\n\n # All of our parameter/arg/etc lists start after a line ending in ':'.\n elif line.strip().endswith(\":\"):\n in_param = True\n # This adds a blank line before the header if one doesn't already exist.\n if prev_line != \"\":\n clean_docstr_list.append(\"\")\n # Turn the line into an H4 header\n clean_docstr_list.append(f\"#### {line.strip()}\")\n elif line.strip() == \"\" and prev_line != \"::\":\n # All of our parameter groups end with a line break, but we don't want to exit a parameter block due to a\n # line break in a code block. However, some code blocks start with a blank first line, so we want to make\n # sure we aren't immediately exiting the code block (hence the test for '::' on the previous line.\n in_param = False\n # Add the markdown indicator to close a code block, since we aren't in one now.\n if in_code_block:\n clean_docstr_list.append(\"```\")\n in_code_block = False\n first_code_indentation = None\n clean_docstr_list.append(line)\n else:\n if in_code_block:\n # Determine the number of spaces indenting the first line of code so they can be removed from all lines\n # in the code block without wrecking the hierarchical indentation levels of future lines.\n if first_code_indentation == None and line.strip() != \"\":\n first_code_indentation = len(\n re.match(r\"\\s*\", original_line, re.UNICODE).group(0)\n )\n if line.strip() == \"\" and prev_line == \"::\":\n # If the first line of the code block is a blank one, just skip it.\n pass\n else:\n # Append the line of code, minus the extra indentation from being written in an indented docstring.\n clean_docstr_list.append(original_line[first_code_indentation:])\n elif \":\" in line.replace(\":ref:\", \"\") and in_param:\n # This indicates a parameter. arg. or other definition.\n clean_docstr_list.append(f\"- {line.strip()}\")\n else:\n # This indicates a regular line of text.\n clean_docstr_list.append(f\"{line.strip()}\")\n prev_line = line.strip()\n clean_docstr = \"\\n\".join(clean_docstr_list)\n return clean_docstr", "def autodoc_process_docstring(app, what, name, obj, options, lines):\n\n for i, line in enumerate(lines):\n lines[i] = line.replace(\"# noqa\", \"\")", "def new_docstring(self):\n newlines = list(self.lines_before_args)\n if self.args:\n newlines.append(' '*self.arg_indent + self.arg_section_name + ':')\n newlines.extend(self._indent(list(self.args.values()), self.arg_indent+4))\n newlines.append('')\n newlines.extend(self.lines_after_args)\n\n return '\\n'.join(newlines)", "def trim_docstring(docstring):\r\n lines = docstring.expandtabs().splitlines()\r\n\r\n # Find minimum indentation of any non-blank lines after first line.\r\n from sys import maxint\r\n margin = maxint\r\n for line in lines[1:]:\r\n content = len(line.lstrip())\r\n if content:\r\n indent = len(line) - content\r\n margin = min(margin, indent)\r\n\r\n # Remove indentation.\r\n if lines:\r\n lines[0] = lines[0].lstrip()\r\n if margin < maxint:\r\n for i in range(1, len(lines)):\r\n lines[i] = lines[i][margin:]\r\n\r\n # Remove any trailing or leading blank lines.\r\n while lines and not lines[-1]:\r\n lines.pop()\r\n while lines and not lines[0]:\r\n lines.pop(0)\r\n return '\\n'.join(lines)", "def rewriteDocstringForPerl (docstring):\n\n # Get rid of the /** ... */ and leading *'s.\n docstring = docstring.replace('/**', '').replace('*/', '').replace('*', ' ')\n\n # Get rid of indentation\n p = re.compile('^\\s+(\\S*\\s*)', re.MULTILINE)\n docstring = p.sub(r'\\1', docstring)\n\n # Get rid of paragraph indentation not caught by the code above.\n p = re.compile('^[ \\t]+(\\S)', re.MULTILINE)\n docstring = p.sub(r'\\1', docstring)\n\n # Get rid of blank lines.\n p = re.compile('^[ \\t]+$', re.MULTILINE)\n docstring = p.sub(r'', docstring)\n\n # Get rid of the %foo quoting.\n docstring = re.sub('(\\s)%(\\w)', r'\\1\\2', docstring)\n\n # The following are done in pairs because I couldn't come up with a\n # better way to catch the case where @c and @em end up alone at the end\n # of a line and the thing to be formatted starts on the next one after\n # the comment '*' character on the beginning of the line.\n\n docstring = re.sub('@c *([^ ,.:;()/*\\n\\t]+)', r'C<\\1>', docstring)\n docstring = re.sub('@c(\\n[ \\t]*\\*[ \\t]*)([^ ,.:;()/*\\n\\t]+)', r'\\1C<\\2>', docstring)\n docstring = re.sub('@p +([^ ,.:;()/*\\n\\t]+)', r'C<\\1>', docstring)\n docstring = re.sub('@p(\\n[ \\t]*\\*[ \\t]+)([^ ,.:;()/*\\n\\t]+)', r'\\1C<\\2>', docstring)\n docstring = re.sub('@em *([^ ,.:;()/*\\n\\t]+)', r'I<\\1>', docstring)\n docstring = re.sub('@em(\\n[ \\t]*\\*[ \\t]*)([^ ,.:;()/*\\n\\t]+)', r'\\1I<\\2>', docstring)\n\n docstring = docstring.replace('<ul>', '\\n=over\\n')\n docstring = docstring.replace('<li> ', '\\n=item\\n\\n')\n docstring = docstring.replace('</ul>', '\\n=back\\n')\n\n docstring = docstring.replace('@return', 'Returns')\n docstring = docstring.replace(' < ', ' E<lt> ').replace(' > ', ' E<gt> ')\n docstring = re.sub('<code>([^<]*)</code>', r'C<\\1>', docstring)\n docstring = re.sub('<b>([^<]*)</b>', r'B<\\1>', docstring) \n\n return docstring", "def wrapper(func):\n docstring = func.__doc__\n helpdict = parse_docstring(\n docstring, key_symbol=key_symbol,\n description_symbol=description_symbol)\n func.helpdict = helpdict\n # remove markers\n docstring = docstring.replace(key_symbol, '')\n func.__doc__ = docstring.replace(description_symbol, '')\n return func", "def docstring_or_continued(line, i, obj): # v1.0.0\n global _in_doc_string, _in_continued_line, _in_multiline_string\n if _in_doc_string:\n if '\"\"\"' in line:\n _in_doc_string = False\n return True\n python_token_line = obj.extra['python_token_lines'][i-1]\n if not _in_continued_line and line.startswith('\"\"\"'):\n if i == 1 or has_python_token(i-1, obj, 'key', 'def'):\n if not '\"\"\"' in line[3:]:\n _in_doc_string = True # Multi-line doc string\n return True\n elif line.endswith('\\\\'):\n if _in_continued_line:\n return True\n _in_continued_line = True\n elif has_python_token(i, obj, 'str', ('\"\"\"', \"'''\"), starting_with=True): # Multi-line string\n if not has_python_token(i, obj, 'str', ('\"\"\"', \"'''\"), ending_with=True): # Doesn't end on same line\n if _in_multiline_string:\n _in_continued_line = False\n _in_multiline_string = False\n return True\n _in_multiline_string = True\n if _in_continued_line:\n _in_continued_line = False\n return True\n elif _in_multiline_string:\n if has_python_token(i, obj, 'str', ('\"\"\"', \"'''\"), ending_with=True): # End of multi-line string\n _in_multiline_string = False\n return True\n elif _in_continued_line:\n _in_continued_line = False\n return True\n \n return False", "def describe_docstring(doc_string, indentation=None):\n text = escape_triple_quotes(doc_string)\n text = u'\"\"\"\\n' + text + '\\n\"\"\"\\n'\n\n if indentation:\n text = indent(text, indentation)\n return text", "def dedent_docs(docs: str, indent: str = None):\n _indent = indent or get_doc_indent(docs)\n if not docs.startswith(_indent):\n docs = '\\n' + _indent + docs\n if indent is not None:\n return textwrap.dedent(docs)\n return _indent, textwrap.dedent(docs)", "def docstring_section(obj: Any, section: str, lines: List[str], indent: str):\n docs = []\n for line in lines:\n replaced = False\n for pattern, replacer in PATTERNS:\n match = re.match(pattern, line.strip())\n if match:\n key = match.groups()[0]\n context = type(obj).__name__.lower()\n if section:\n context += '.{}'.format(section).lower()\n replacements = REPLACEMENTS.get(\n context, REPLACEMENTS.get('default'))\n if replacements:\n replacement = replacements.get(key)\n if replacement:\n docs.extend(replacer(key, replacement, indent))\n replaced = True\n break\n if not replaced:\n docs.append(line)\n return docs", "def docstring_parameter(*args, **kwargs):\n\n def dec(obj):\n obj.__doc__ = obj.__doc__.format(*args, **kwargs)\n return obj\n\n return dec", "def format_docstring(self, docstring):\n min_indent = _get_minimum_indentation(docstring)\n for param_name, param_doc in self.items():\n param_doc = textwrap.indent(param_doc, min_indent + \" \" * 4)\n if not param_doc.startswith(\"\\n\"):\n param_doc = \"\\n\" + param_doc\n docstring = _replace_placeholder(docstring, param_name, param_doc)\n\n return docstring", "def expand_docstring(**kwargs):\n def _fn_wrapped(fn):\n \"\"\"Original function with modified `__doc__` attribute.\"\"\"\n doc = _trim(fn.__doc__)\n for k, v in six.iteritems(kwargs):\n # Capture each @{k} reference to replace with v.\n # We wrap the replacement in a function so no backslash escapes\n # are processed.\n pattern = r'@\\{' + str(k) + r'\\}'\n doc = re.sub(pattern, lambda match: v, doc) # pylint: disable=cell-var-from-loop\n fn.__doc__ = doc\n return fn\n return _fn_wrapped", "def format_docstring(param_docs):\n param_docs = ParamDocs(param_docs)\n\n def decorator(func):\n func.__doc__ = param_docs.format_docstring(func.__doc__)\n return func\n\n return decorator" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Class decorator to autoformat string arguments in the __init__ method Modify the class __init__ method in place by wrapping it. The wrapped class will call the format() method of arguments specified in `params` that exist in the original signature, passing all other arguments are dictionary to str.format()
def autoformat( cls: Type[U] = None, /, params: Union[str, Iterable[str]] = ( # pylint: disable=unsubscriptable-object "message", "msg", ), ): if isinstance(params, str): params = (params,) if cls is None: return functools.partial(autoformat, params=params) orig_init = cls.__init__ signature = inspect.signature(orig_init) params = signature.parameters.keys() & set(params) @functools.wraps(orig_init) def init(*args, **kwargs): bounds = signature.bind(*args, **kwargs) bounds.apply_defaults() pre_formatted = { name: bounds.arguments.pop(name) for name in params if name in bounds.arguments } formatted = { name: string.format(**bounds.arguments) for name, string in pre_formatted.items() } for name, arg in formatted.items(): bounds.arguments[name] = arg return orig_init(*bounds.args, **bounds.kwargs) # init.__signature__ = signature setattr(cls, "__init__", init) return cls
[ "def format(self, *args, **kwargs) -> String:\n pass", "def format(*args, **kwargs):\n\n pass", "def create_initstring(classname, base, method, excludes):\n from inspect import getargspec\n\n # creates line: def __init__(self, ...):\n # keywords are deduced from arguments with defaults.\n # others will not be added.\n args = getargspec(method)\n result = \"def __init__(self\"\n if args.defaults is not None: \n nargs = len(args.args) - len(args.defaults)\n for key, value in zip(args.args[nargs:], args.defaults):\n if key in excludes: continue\n result += \", {0}={1!r}\".format(key, value)\n result += \", copy=None, **kwargs):\\n\"\n\n # adds standard doc string.\n result +=\\\n \" \\\"\\\"\\\" Initializes {0} instance.\\n\\n\" \\\n \" This function is created automagically from\\n\" \\\n \" :py:func:`{1.__module__}.{1.func_name}`. Please see that function\\n\" \\\n \" for the description of its parameters.\\n\\n\" \\\n \" :param {2.__name__} copy:\\n\" \\\n \" Deep-copies attributes from this instance to the new (derived)\\n\" \\\n \" object. This parameter makes easy to create meta-functional from\\n\"\\\n \" the most basic wrappers.\\n\" \\\n \" \\\"\\\"\\\"\\n\".format(classname, method, base)\n\n # creates line: from copy import deepcopy\n # used by the copy keyword argument below.\n result += \" from copy import deepcopy\\n\"\n # creates line: super(BASECLASS, self).__init__(...)\n # arguments are taken from BASECLASS.__init__\n result += \" super(self.__class__, self).__init__(\"\n initargs = getargspec(base.__init__)\n if initargs.args is not None and len(initargs) > 1:\n # first add args without defaults.\n # fails if not present in method's default arguments.\n ninitargs = len(initargs.args) - len(initargs.defaults)\n for i, key in enumerate(initargs.args[1:ninitargs]):\n if key in excludes: \n raise Exception('Cannot ignore {1} when synthesizing {0}.'.format(classname, key))\n if key not in args.args[nargs:]:\n raise Exception('Could not synthesize {0}. Missing default argument.'.format(classname))\n result += \", {0}\".format(key)\n if initargs.defaults is not None and args.defaults is not None: \n # then add keyword arguments, ignoring thosse that are not in method\n for i, (key, value) in enumerate(zip(initargs.args[nargs:], initargs.defaults)):\n if key in args.args[ninitargs:]: result += \", {0} = {0}\".format(key)\n # add a keyword dict if present in initargs\n if initargs.keywords is not None or initargs.defaults is not None: result += ', **kwargs'\n result += ')\\n\\n'\n # deals with issues on how to print first argument.\n result = result.replace('(, ', '(')\n\n # create lines: self.attr = value\n # where attr is something in method which is not in baseclass.__init__\n if args.defaults is not None: \n for key, value in zip(args.args[nargs:], args.defaults):\n if key in excludes or key in initargs.args: continue\n result += \" self.{0} = {0}\\n\".format(key)\n\n # create lines which deep-copies base-class attributes to new derived attributes,\n # eg, using copy. Does not include previously set parameters and anything in\n # excludes.\n avoid = set(initargs.args[:ninitargs]) | set(args.args[nargs:]) | set(excludes)\n result += \" if copy is not None:\\n\" \\\n \" avoid = {0!r}\\n\" \\\n \" for key, value in copy.__dict__.iteritems():\\n\" \\\n \" if key not in avoid and key not in kwargs:\\n\" \\\n \" setattr(self, key, deepcopy(value))\\n\" \\\n .format(avoid)\n return result", "def params(cls):\n def method_decorator(method):\n @wraps(method)\n def wrapper(self, *args):\n return method(self, *map(cls, args))\n return wrapper\n return method_decorator", "def setup_from_format(self, *args, **kwargs):\n if \"format\" in kwargs:\n # set the format and call the according init_from_<format> method\n # which initializes the instance with the given vaules (from data)\n # e.g. Model(format=json, data={data})\n f = getattr(self, \"init_from_\" + kwargs[\"format\"], None)\n if f:\n f(kwargs)", "def __call__(self, cls):\n cls_dict = dict(cls.__dict__)\n\n def wrap_str(w_self):\n return self.pformat(w_self)\n\n cls_dict['__repr__'] = wrap_str\n return type(cls.__name__, cls.__bases__ if hasattr(cls, \"__bases__\") else (), cls_dict)", "def __init__(self, class_name, param_dict):\n super(Parameters, self).__init__(class_name, param_dict)", "def __init__(self, *args, **kwargs):\n super(Entry, self).__init__(args[0])\n self._format_func = super(Entry, self).__repr__\n if 'format_func' in kwargs:\n self._format_func = kwargs['format_func']", "def __init__(self, fmt, datefmt=None):\n logging.Formatter.__init__(self, fmt, datefmt)", "def formatter(func, _, params):\n pa = params.args\n format_args = [pa[i] for i in range(0, len(pa)) if i in args]\n\n return func.__doc__.format(*format_args)", "def make_class_decor_paramless(*filters):\n\n def __decor(func_decor):\n return make_decor_paramless(\n make_class_decor_params(*filters)(\n make_decor_params(func_decor)\n )\n )\n\n return __decor", "def __init__(self, format=None, date_format=None, max_width=79):\n self.wrapper = textwrap.TextWrapper(width=max_width,\n subsequent_indent=' ')\n logging.Formatter.__init__(self, format, date_format)", "def __init__(self, *args):\n _snap.TStrTAttrPr_swiginit(self, _snap.new_TStrTAttrPr(*args))", "def __init__(self, *args):\n _snap.TStrPrStrH_swiginit(self, _snap.new_TStrPrStrH(*args))", "def __init__(self, *args):\n _snap.TStrStrPrH_swiginit(self, _snap.new_TStrStrPrH(*args))", "def __init__(self, *args):\n _snap.TStr_swiginit(self, _snap.new_TStr(*args))", "def __init__(self, *args):\n _snap.TStrPr_swiginit(self, _snap.new_TStrPr(*args))", "def __init__(self, *args):\n _snap.TRStr_swiginit(self, _snap.new_TRStr(*args))", "def __init__(self, propertiesDict):\n\t\tself.name = propertiesDict.pop('name', None)\n\n\t\tsuper(BaseLogFormatter, self).__init__(\n\t\t\tpropertiesDict.pop('messagefmt', DEFAULT_FORMAT),\n\t\t\tpropertiesDict.pop('datefmt', None) )\n\t\t\n\t\tif propertiesDict: raise Exception('Unknown formatter option(s) specified: %s'%', '.join(list(propertiesDict.keys())))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes a parameter from a Signature object If param is an int, remove the parameter at that position, else remove any paramater with that name
def _sig_without(sig: inspect.Signature, param: Union[int, str]) -> inspect.Signature: if isinstance(param, int): params = list(sig.parameters.values()) params.pop(param) else: params = [p for name, p in sig.parameters.items() if name != param] return sig.replace(parameters=params)
[ "def remove(self, param):\n if param in self.params:\n self.params.remove(param)\n if param in self.Parser.params:\n self.Parser.params.remove(param)", "def removeParameter(self, *args):\n return _libsbml.KineticLaw_removeParameter(self, *args)", "def param_remove(params, arg):\n d = params.copy()\n if arg in d:\n del d[arg]\n return d.urlencode()", "def delete_parameter(self, Name: str) -> Dict:\n pass", "def removeParameter(self, *args):\n return _libsbml.Model_removeParameter(self, *args)", "def remove_param(self, step_id, name):\n if step_id in self._params:\n del self._params[step_id][name]", "def RemoveParameter(self, name):\n callResult = self._Call(\"RemoveParameter\", name)\n\n if callResult is None:\n return None\n\n return callResult", "def delete_parametertype(request, parametertype, **_kwargs):\n pass", "def delete_parameter(request, parameter, **_kwargs):\n pass", "def removeParameter(self, name):\r\n try:\r\n self._parameters.pop(name).destroy()\r\n except KeyError:\r\n raise InvalidRequest('Can not remove a non existent node '\r\n \"'{0}' from the container.\".format(name))", "def remove_parameter(self, obj):\n try:\n index = self.parameters.index(obj)\n self.parameters.pop(index)\n return True\n except ValueError:\n # the object cannot be removed because it is not present\n logger.warn(\"Parameter {0} not present, can't be remove from the list\".format(obj))\n return False", "def removeParameter(self, name):\n try:\n self._parameters.pop(name).destroy()\n except KeyError:\n raise InvalidRequest('Can not remove a non existent node '\n \"'{0}' from the container.\".format(name))", "def remove_parameters(self):\n self.parameters = []", "def removeLocalParameter(self, *args):\n return _libsbml.KineticLaw_removeLocalParameter(self, *args)", "def remove(self, p_int, p_int_1=None): # real signature unknown; restored from __doc__ with multiple overloads\n pass", "def remove_argument(self, name):\n if name not in self.argnames:\n return\n del self.argnames[name]\n for i, arg in enumerate(self.args):\n if arg.name == name:\n break\n else:\n return # pragma: nocover\n del self.args[i]", "def remove_constraints(self, parameter):\r\n self._parameters[parameter] = []", "def removeParameter(self):\n row = self.ui.parameterList.currentRow()\n\n if row != -1:\n self.ui.parameterList.removeRow(row)", "def unregisterParameter(self, paramName):\n if not self.viewRegistered: return\n self.__lock.acquire()\n try:\n if paramName in self.__paramData:\n LOG(\"Unregistering parameter: \" + paramName)\n # May raise a HiflyException\n self.tmPro.unregisterTM( paramName )\n except:\n LOG(\"Error unregistering parameter \" + repr(paramName))\n finally:\n del self.__paramData[paramName]\n self.__lock.release()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Merges two signature object, dropping the return annotations
def _sig_merge(lsig: inspect.Signature, rsig: inspect.Signature) -> inspect.Signature: return inspect.Signature( sorted( list(lsig.parameters.values()) + list(rsig.parameters.values()), key=lambda param: param.kind, ) )
[ "def ir_merge(target, other):\n if not target[\"params\"]:\n target[\"params\"] = other[\"params\"]\n elif other[\"params\"]:\n target_params, other_params = map(itemgetter(\"params\"), (target, other))\n\n for name in other_params.keys() & target_params.keys():\n if not target_params[name].get(\"doc\") and other_params[name].get(\"doc\"):\n target_params[name][\"doc\"] = other_params[name][\"doc\"]\n if target_params[name].get(\"typ\") is None and other_params[name].get(\"typ\"):\n target_params[name][\"typ\"] = other_params[name][\"typ\"]\n if (\n target_params[name].get(\"default\") in (None, \"None\", NoneStr)\n and \"default\" in other_params[name]\n ):\n target_params[name][\"default\"] = other_params[name][\"default\"]\n\n for name in other_params.keys() - target_params.keys():\n target_params[name] = other_params[name]\n\n target[\"params\"] = target_params\n\n if \"return_type\" not in (target.get(\"returns\") or iter(())):\n target[\"returns\"] = other[\"returns\"]\n elif other[\"returns\"]:\n target[\"returns\"][\"return_type\"] = _join_non_none(\n target[\"returns\"][\"return_type\"], other[\"returns\"][\"return_type\"]\n )\n if \"return_type\" in target.get(\"params\", frozenset()):\n target[\"returns\"][\"return_type\"] = _join_non_none(\n target[\"returns\"][\"return_type\"], target[\"params\"].pop(\"return_type\")\n )\n\n other_internal = other.get(\"_internal\", {})\n if other_internal.get(\"body\"):\n if \"_internal\" in target:\n # Merging internal bodies would be a bad idea IMHO\n target[\"_internal\"].update(other_internal)\n else:\n target[\"_internal\"] = other_internal\n\n return target", "def merge_two_calls(self) -> None:", "def merge_annot(self, other, overwrite=True):\n for k,v in other.get_annotations():\n if k not in self.annotation or overwrite:\n self.add_annotation(k,v)", "def merge(self, other):\n raise Exception()", "def test_mergedFunctionBehavesLikeMergeTarget(self):\n foo_object = object()\n bar_object = object()\n\n def foo():\n return foo_object\n\n def bar(x, y, ab, c=10, *d, **e):\n (a, b) = ab\n return bar_object\n\n baz = util.mergeFunctionMetadata(foo, bar)\n self.assertIs(baz(1, 2, (3, 4), quux=10), bar_object)", "def merge(): #Status: WIP\r\n pass", "def merge(out, left, right, separator=\"\"):\n b = util.read_annotation(right)\n OUT = {}\n\n for key_a, val_a in util.read_annotation_iteritems(left):\n val = [x for x in [val_a, b[key_a]] if x != separator]\n OUT[key_a] = separator.join(list(val)) if val else separator\n\n util.write_annotation(out, OUT)", "def test_ir_merge_same_len_returns(self) -> None:\n target = {\n \"params\": OrderedDict(),\n \"returns\": OrderedDict(\n (\n (\n \"return_type\",\n {\"typ\": \"str\"},\n ),\n )\n ),\n }\n other = {\n \"params\": OrderedDict(),\n \"returns\": OrderedDict(\n (\n (\n \"return_type\",\n {\"doc\": \"so stringy\"},\n ),\n )\n ),\n }\n self.assertDictEqual(\n ir_merge(deepcopy(target), other),\n {\n \"params\": OrderedDict(),\n \"returns\": OrderedDict(\n ((\"return_type\", {\"typ\": \"str\", \"doc\": \"so stringy\"}),)\n ),\n },\n )", "def variant_add(v1: dict, v2: dict) -> Dict[str, Any]:\n left = set(v1.keys()).difference(v2.keys())\n right = set(v2.keys()).difference(v1.keys())\n joint = set(v1.keys()) & set(v2.keys())\n\n # deal with __migrator: ordering\n if \"__migrator\" in v2:\n ordering = v2[\"__migrator\"].get(\"ordering\", {})\n operation = v2[\"__migrator\"].get(\"operation\")\n # handle special operations\n if operation:\n return VARIANT_OP[operation](v1, v2)\n else:\n ordering = {}\n\n # special keys\n if \"__migrator\" in right:\n right.remove(\"__migrator\")\n\n # special keys in joint\n special_variants = {}\n if \"pin_run_as_build\" in joint:\n # For run_as_build we enforce the migrator's pin\n # TODO: should this just be a normal ordering merge, favoring more exact pins?\n joint.remove(\"pin_run_as_build\")\n special_variants[\"pin_run_as_build\"] = {\n **v1[\"pin_run_as_build\"],\n **v2[\"pin_run_as_build\"],\n }\n\n if \"zip_keys\" in joint:\n # zip_keys is a bit weird to join on as we don't have a particularly good way of identifying\n # a block. Longer term having these be named blocks would make life WAY simpler\n # That does require changes to conda-build itself though\n #\n # A zip_keys block is deemed mergeable if zkₛ,ᵢ ⊂ zkₘ,ᵢ\n zk_out = []\n zk_l = {frozenset(e) for e in v1[\"zip_keys\"]}\n zk_r = {frozenset(e) for e in v2[\"zip_keys\"]}\n\n for zk_r_i in sorted(zk_r, key=lambda x: -len(x)):\n for zk_l_i in sorted(zk_l, key=lambda x: -len(x)):\n # Merge the longest common zk first\n if zk_l_i.issubset(zk_r_i):\n zk_l.remove(zk_l_i)\n zk_r.remove(zk_r_i)\n zk_out.append(zk_r_i)\n break\n else:\n # Nothing to do\n pass\n\n zk_out.extend(zk_l)\n zk_out.extend(zk_r)\n zk_out = sorted(\n [sorted(zk) for zk in zk_out], key=lambda x: (len(x), str(x))\n )\n\n joint.remove(\"zip_keys\")\n special_variants[\"zip_keys\"] = zk_out\n\n joint_variant = {}\n for k in joint:\n v_left, v_right = ensure_list(v1[k]), ensure_list(v2[k])\n joint_variant[k] = variant_key_add(\n k, v_left, v_right, ordering=ordering.get(k, None)\n )\n\n out = {\n **toolz.keyfilter(lambda k: k in left, v1),\n **toolz.keyfilter(lambda k: k in right, v2),\n **joint_variant,\n **special_variants,\n }\n\n return out", "def merge(self, other):\n self._moments = merge_pqc([self, other])._moments\n self._parameters = sp.symarray(self.parameter_symbol, len(self.symbols))\n if self.flatten_circuit:\n self.flatten()", "def _merge_two(self, obj1, obj2):\r\n for uniq_ident in obj2.keys():\r\n if (uniq_ident not in obj1) \\\r\n or (obj1[uniq_ident]['modified'] \\\r\n < obj2[uniq_ident]['modified']):\r\n obj1[uniq_ident] = obj2[uniq_ident]\r\n\r\n return obj1 # self._dict_to_list(obj1)\r", "def _merge_tensor_signatures(self, signatures):\n sorted_update = []\n if self._num_signature_dimensions() > 1:\n signature_indices = self._signature_types()\n for _, val in sorted(signatures.items(),\n key=lambda item: signature_indices[item[0]]):\n sorted_update.append(val)\n updates = array_ops_stack.stack(\n sorted_update, axis=0, name='merge_single_op_signatures')\n elif self._num_signature_dimensions() == 1:\n # Avoid stack operation if there is only a single signature.\n (_, val), = signatures.items()\n updates = val\n else:\n raise ValueError('Cannot merge 0 signatures. Check the value passed for '\n 'flag --signatures.')\n return updates", "def merge(self, first, second):\n return second if self.failed(first) else first", "def merge(self, *other):\n # Compute union of Fingerprints\n union = set().union(self, *other)\n # Create new fingerprint from union\n result = super(Fingerprint, type(self)).__new__(type(self), union)\n # Set n_flows to combination of self and other\n result.__setattr__('n_flows', self.n_flows + sum(o.n_flows for o in other))\n # Return result\n return result", "def merge(*args):\n from ..operators.observable.merge import merge_\n return merge_(*args)", "def test_instanceDictionaryIsMerged(self):\n\n def foo():\n pass\n\n foo.a = 1\n foo.b = 2\n\n def bar():\n pass\n\n bar.b = 3\n bar.c = 4\n\n baz = util.mergeFunctionMetadata(foo, bar)\n self.assertEqual(foo.a, baz.a)\n self.assertEqual(foo.b, baz.b)\n self.assertEqual(bar.c, baz.c)", "def merge(self, other: AssertionVerificationTrace) -> None:\n for pos, assertions in other.failed.items():\n self.failed[pos].update(assertions)\n for pos, assertions in other.error.items():\n self.error[pos].update(assertions)", "def merge_extras(extras1, extras2):\n if not extras1:\n return extras2\n if not extras2:\n return extras1\n return tuple(sorted(set(extras1) | set(extras2)))", "def canBeMergedWith(self, other):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Class decorator to automatically support __post_init__() on classes This is useful for .s decorated classes, because __attr_post_init__() doesn't support additional arguments. This decorators wraps the class __init__ in a new function that accept merged arguments, and dispatch them to __init__ and then __post_init__()
def post_init(cls: Type[U]) -> Type[U]: if not isinstance(cls, type): raise TypeError("Can only decorate classes") if not hasattr(cls, "__post_init__"): raise TypeError("The class must have a __post_init__() method") # Ignore the first argument which is the "self" argument sig = init_sig = _sig_without(inspect.signature(cls.__init__), 0) previous = [(cls, "__init__", sig)] for parent in reversed(cls.__mro__): if hasattr(parent, "__post_init__"): post_sig = _sig_without( inspect.signature(getattr(parent, "__post_init__")), 0 ) try: sig = _sig_merge(sig, post_sig) except Exception as err: # find the incompatibility for parent, method, psig in previous: try: _sig_merge(psig, post_sig) except Exception: break else: raise TypeError( "__post_init__ signature is incompatible with the class" ) from err raise TypeError( f"__post_init__() is incompatible with {parent.__qualname__}{method}()" ) from err # No exception previous.append((parent, "__post_init__", post_sig)) # handles type annotations and defaults # inspired by the dataclasses modules params = list(sig.parameters.values()) localns = ( { f"__type_{p.name}": p.annotation for p in params if p.annotation is not inspect.Parameter.empty } | { f"__default_{p.name}": p.default for p in params if p.default is not inspect.Parameter.empty } | cls.__dict__ ) for i, p in enumerate(params): if p.default is not inspect.Parameter.empty: p = p.replace(default=Variable(f"__default_{p.name}")) if p.annotation is not inspect.Parameter.empty: p = p.replace(annotation=f"__type_{p.name}") params[i] = p new_sig = inspect.Signature(params) # Build the new __init__ source code self_ = "self" if "self" not in sig.parameters else "__post_init_self" init_lines = [ f"def __init__({self_}, {_sig_to_def(new_sig)}) -> None:", f"__original_init({self_}, {_sig_to_call(init_sig)})", ] for parent, method, psig in previous[1:]: if hasattr(parent, "__post_init__"): if parent is not cls: init_lines.append( f"super({parent.__qualname__}, {self_}).{method}({_sig_to_call(psig)})" ) else: init_lines.append(f"{self_}.{method}({_sig_to_call(psig)})") init_src = "\n ".join(init_lines) # Build the factory function source code local_vars = ", ".join(localns.keys()) factory_src = ( f"def __make_init__(__original_init, {local_vars}):\n" f" {init_src}\n" " return __init__" ) # Create new __init__ with the factory globalns = inspect.getmodule(cls).__dict__ ns: dict[str, Any] = {} exec(factory_src, globalns, ns) init = ns["__make_init__"](cls.__init__, **localns) self_param = inspect.Parameter(self_, inspect.Parameter.POSITIONAL_ONLY) init.__signature__ = inspect.Signature( parameters=[self_param] + list(sig.parameters.values()), return_annotation=None ) setattr(cls, "__init__", init) return cls
[ "def __init_subclass__(cls, **kwargs):\n if cls.__init__ != object.__init__:\n old_init = cls.__init__\n\n def new_init(self, *args, **kwargs):\n _cls = type(self)\n if getattr(_cls, _mangle(_cls, \"initialized\")) is not True:\n old_init(self, *args, **kwargs)\n setattr(_cls, _mangle(_cls, \"initialized\"), True)\n\n forward_func(new_init, old_init)\n cls.__init__ = new_init", "def kwargs_to_parent(cls):\n original_init = cls.__init__\n\n def new_init(self, *args, **kwargs):\n # pass only those kwargs to the dataclass which are expected\n dataclass_kwargs = {\n key: value\n for key, value in kwargs.items()\n if key in [f.name for f in dataclasses.fields(cls)]\n }\n\n # pass args and kwargs to the dataclasses' __init__\n original_init(self, *args, **dataclass_kwargs)\n\n # update kwargs with default arguments\n kwargs.update(dataclasses.asdict(self))\n\n # Pass only those arguments to solph component's __init__ that\n # are expected.\n init_expected_args = list(\n inspect.signature(super(cls, self).__init__).parameters\n )\n\n kwargs_expected = {\n key: value\n for key, value in kwargs.items()\n if key in init_expected_args\n }\n\n kwargs_unexpected = {\n key: value\n for key, value in kwargs.items()\n if key not in init_expected_args\n }\n\n if \"custom_attributes\" in init_expected_args:\n kwargs_expected[\"custom_attributes\"] = kwargs_unexpected\n\n if kwargs_unexpected and \"custom_attributes\" not in init_expected_args:\n warnings.warn(\n f\"No custom_attributes in parent class {cls.__mro__[1]}\"\n )\n\n super(cls, self).__init__(\n **kwargs_expected,\n )\n\n if not kwargs.get(\"build_solph_components\") is False:\n self.build_solph_components()\n\n cls.__init__ = new_init\n return cls", "def froze_init(cls):\n\n def __setattr__(self, key, value):\n if key[0] != \"_\":\n if self.__frozen:\n if key not in self.__allowed_attributes:\n raise TypeError(\"this can't be setted : %s\" % key)\n else:\n self.__allowed_attributes.add(key)\n\n object.__setattr__(self, key, value)\n\n def init_decorator(func):\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n self.__frozen = False\n self.__allowed_attributes = set()\n func(self, *args, **kwargs)\n self.__frozen = True\n\n return wrapper\n\n cls.__setattr__ = __setattr__\n cls.__init__ = init_decorator(cls.__init__)\n\n return cls", "def _autoargs_decorate(func, # type: Callable\n func_sig, # type: Signature\n att_names # type: Iterable[str]\n ):\n @wraps(func)\n def init_wrapper(self, *args, **kwargs):\n\n # bind arguments with signature: not needed anymore in nominal case since we use `makefun.wraps`\n # bound_values = func_sig.bind(self, *args, **kwargs)\n # apply_defaults(bound_values)\n\n # Assign to self each of the attributes\n need_introspect = False\n i = -1\n for i, att_name in enumerate(att_names):\n try:\n setattr(self, att_name, kwargs[att_name])\n except KeyError:\n # this may happen when the att names are BEFORE a var positional\n # Switch to introspection mode\n need_introspect = True\n break\n if need_introspect and i >= 0:\n bound_values = func_sig.bind(self, *args, **kwargs)\n apply_defaults(bound_values)\n # noinspection PyUnboundLocalVariable\n arg_dict = bound_values.arguments\n for att_name in att_names[i:]:\n setattr(self, att_name, arg_dict[att_name])\n\n # finally execute the constructor function\n return func(self, *args, **kwargs)\n\n # return wrapper\n return init_wrapper", "def __call__(self, cls: object, *args: Any, **kwargs: dict) -> Callable:\n self.__class__.__name__ = cls.__name__\n\n class Wrapped(cls, *args, **kwargs):\n \"\"\"\n Wrap Decorated Callable, Get Configurations and Create required\n attributes in the Decorated Callable. Finally Call the Callable\n :param cls: The Class Object to being decorated\n :param args: Additional arguments\n :param kwargs: Additional Keyword arguments\n :return: The Wrapped Callable\n \"\"\"\n config = self.config\n\n def __init__(self, *args: Any, **kwargs: dict):\n \"\"\"\n Override Classes __init__ method. Create the Configuration\n attributes. Call the Decorated class __init__ method.\n :param cls: The Class Object to being decorated\n :param args: Additional arguments\n :param kwargs: Additional Keyword arguments\n \"\"\"\n for key in self.config.keys():\n if isinstance(self.config[key], dict):\n setattr(self, key, Map(self.config[key]))\n else:\n setattr(self, key, self.config[key])\n cls.__init__(self, *args, **kwargs)\n\n return Wrapped", "def patch_class(cls):\n from ..entrypoints import ijit\n\n if '__init__' not in vars(cls):\n names = [name for name, type in cls.layout]\n cls.__init__ = ijit(fabricate_init(names))", "def listener(cls):\n func = cls.__init__\n\n # Wraps the class constructor to automate the subscription of methods to\n # event handlers\n @wraps(cls.__init__)\n def new_init(self, *args, **kwargs):\n _subscribe_marked_events(self)\n func(self, *args, **kwargs)\n\n # Patching the constructor\n cls.__init__ = new_init\n return cls", "def super_borg(cls):\n cls._state = {}\n _new = cls.__new__\n _init = cls.__init__\n\n @functools.wraps(cls.__new__)\n def wrapper_new(cls, *args, **kwargs):\n new_inst = _new(cls, *args, **kwargs)\n new_inst.__dict__ = cls._state\n return new_inst\n\n @functools.wraps(cls.__init__)\n def wrapper_init(self, *args, **kwargs):\n if not hasattr(self,\"_super_borg\"):\n self._super_borg = True\n _init(self, *args, **kwargs)\n if hasattr(self,\"__inscribe__\") and (args or kwargs):\n self.__inscribe__(*args, **kwargs)\n\n cls.__new__ = MethodType(wrapper_new, cls, type(cls))\n cls.__init__ = wrapper_init\n\n return cls", "def __new__(cls, *args, **kwds):\n cls.__init_lock__.acquire()\n try:\n if not cls.__decorated__:\n cls._decorate()\n cls.__decorated__ = True\n \n return object.__new__(cls, *args, **kwds)\n finally:\n cls.__init_lock__.release()", "def patch_class(cls):\n def _wrap_fun(wrapper, original):\n def wrapped(self, *args, **kwargs):\n return wrapper(self, original, *args, **kwargs)\n return wrapped\n def _wrap_cls(patch):\n for func in dir(patch):\n if not func.startswith('__'):\n wrapper = getattr(patch, func)\n original = getattr(cls, func, None)\n if original:\n setattr(cls, func, _wrap_fun(wrapper, original))\n else:\n setattr(cls, func, wrapper)\n return patch\n return _wrap_cls", "def post_init_func(fn):\n fn.__has_run__ = False\n @functools.wraps(fn)\n def wrapper_fn(*args, **kwargs):\n if fn.__has_run__:\n cui.message('Warning: executing post_init_func %s more than once.' % fn)\n\n result = fn(*args, **kwargs)\n fn.__has_run__ = True\n return result\n\n Core.__post_init_functions__.append(wrapper_fn)\n return wrapper_fn", "def with_init(attrs, defaults=None):\n if defaults is None:\n defaults = {}\n\n def init(self, *args, **kw):\n for a in attrs:\n try:\n v = kw.pop(a)\n except KeyError:\n try:\n v = defaults[a]\n except KeyError:\n raise ValueError(\"Missing value for '{0}'.\".format(a))\n setattr(self, a, v)\n self.__original_init__(*args, **kw)\n\n def wrap(cl):\n cl.__original_init__ = cl.__init__\n cl.__init__ = init\n return cl\n\n return wrap", "def __init_subclass__(cls, **kwargs):\n super().__init_subclass__(**kwargs)\n cls.invocation_layer_providers.append(cls)", "def nodeclass(cls):\n\n init = cls.__init__\n\n def init_wrapper(self, *args, **kwargs):\n if not hasattr(self, \"_init_run_for_class\"):\n self._init_run_for_class = set()\n if cls not in self._init_run_for_class:\n init(self, *args, **kwargs)\n self._init_run_for_class.add(cls)\n\n cls.__init__ = init_wrapper\n\n # Mark this class as decorated.\n del cls._node_decorator_missing_flag\n\n return cls", "def test_replace_dataloader_init_method():\n\n class DataLoaderSubclass1(DataLoader):\n def __init__(self, attribute1, *args, **kwargs):\n # intentionally not setting this attribute, calling super with different args\n # self.attribute1 = attribute1\n super().__init__(*args, **kwargs)\n\n class DataLoaderSubclass2(DataLoaderSubclass1):\n def __init__(self, attribute1, attribute2, *args, **kwargs):\n # intentionally not setting this attribute, calling super with different args\n # self.attribute2 = attribute2\n super().__init__(attribute1, *args, **kwargs)\n\n with _replace_dataloader_init_method():\n dataloader = DataLoaderSubclass1(\"attribute1\", dataset=range(4), batch_size=2)\n assert dataloader.attribute1 == \"attribute1\"\n\n with _replace_dataloader_init_method():\n dataloader = DataLoaderSubclass2(\"attribute1\", \"attribute2\", dataset=range(4), batch_size=2)\n assert dataloader.attribute1 == \"attribute1\"\n assert dataloader.attribute2 == \"attribute2\"", "def __call__(cls, inst = None, initDict = None, *args, **kwargs):\n if not inst:\n inst = blue.classes.CreateInstance(cls.__cid__)\n inst.__klass__ = cls\n if initDict:\n for k, v in initDict.iteritems():\n setattr(inst, k, v)\n\n try:\n inst.__init__()\n except AttributeError:\n pass\n\n return inst", "def __init__(self,decorated_coffee):\n\t\tAbstract_Coffee_Decorator.__init__(self,decorated_coffee)\n\t\t# Formally equivalent approach not taken here:\n\t\t# self.decorated_coffee = decorated_coffee", "def decorate(self, node, cls):\n # Collect classvars to convert them to attrs.\n if self.args[cls][\"auto_attribs\"]:\n ordering = classgen.Ordering.FIRST_ANNOTATE\n else:\n ordering = classgen.Ordering.LAST_ASSIGN\n ordered_locals = classgen.get_class_locals(\n cls.name, allow_methods=False, ordering=ordering, vm=self.vm)\n own_attrs = []\n for name, local in ordered_locals.items():\n typ, orig = local.get_type(node, name), local.orig\n if is_attrib(orig):\n attrib = orig.data[0]\n if typ and attrib.has_type:\n # We cannot have both a type annotation and a type argument.\n self.vm.errorlog.invalid_annotation(self.vm.frames, typ)\n attr = Attribute(\n name=name,\n typ=self.vm.convert.unsolvable,\n init=attrib.init,\n kw_only=attrib.kw_only,\n default=attrib.default)\n elif not typ:\n # Replace the attrib in the class dict with its type.\n attr = Attribute(\n name=name,\n typ=attrib.typ,\n init=attrib.init,\n kw_only=attrib.kw_only,\n default=attrib.default)\n cls.members[name] = classgen.instantiate(node, name, attr.typ)\n else:\n # cls.members[name] has already been set via a typecomment\n attr = Attribute(\n name=name,\n typ=typ,\n init=attrib.init,\n kw_only=attrib.kw_only,\n default=attrib.default)\n self.vm.check_annotation_type_mismatch(\n node, attr.name, attr.typ, attr.default, local.stack,\n allow_none=True)\n own_attrs.append(attr)\n elif self.args[cls][\"auto_attribs\"]:\n if not match_classvar(typ):\n self.vm.check_annotation_type_mismatch(\n node, name, typ, orig, local.stack, allow_none=True)\n attr = Attribute(\n name=name, typ=typ, init=True, kw_only=False, default=orig)\n if not orig:\n cls.members[name] = classgen.instantiate(node, name, typ)\n own_attrs.append(attr)\n\n base_attrs = self.get_base_class_attrs(cls, own_attrs, _ATTRS_METADATA_KEY)\n attrs = base_attrs + own_attrs\n # Stash attributes in class metadata for subclasses.\n cls.metadata[_ATTRS_METADATA_KEY] = attrs\n\n # Add an __init__ method\n if self.args[cls][\"init\"]:\n init_method = self.make_init(node, cls, attrs)\n cls.members[\"__init__\"] = init_method", "def persistent_class(orig_class):\n assert isinstance(orig_class, type)\n if is_persistent(orig_class):\n return orig_class\n\n assert orig_class.__module__ in sys.modules\n orig_module = sys.modules[orig_class.__module__]\n orig_module_src = _module_to_src(orig_module)\n\n class Decorator(orig_class):\n _orig_module_src = orig_module_src\n _orig_class_name = orig_class.__name__\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._init_args = copy.deepcopy(args)\n self._init_kwargs = copy.deepcopy(kwargs)\n assert orig_class.__name__ in orig_module.__dict__\n _check_pickleable(self.__reduce__())\n\n @property\n def init_args(self):\n return copy.deepcopy(self._init_args)\n\n @property\n def init_kwargs(self):\n return dnnlib.EasyDict(copy.deepcopy(self._init_kwargs))\n\n def __reduce__(self):\n fields = list(super().__reduce__())\n fields += [None] * max(3 - len(fields), 0)\n if fields[0] is not _reconstruct_persistent_obj:\n meta = dict(type='class', version=_version, module_src=self._orig_module_src, class_name=self._orig_class_name, state=fields[2])\n fields[0] = _reconstruct_persistent_obj # reconstruct func\n fields[1] = (meta,) # reconstruct args\n fields[2] = None # state dict\n return tuple(fields)\n\n Decorator.__name__ = orig_class.__name__\n _decorators.add(Decorator)\n return Decorator" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
split an iterable based on the truth value of the function for element Arguments func a callable to apply to each element in the iterable iterable an iterable of element to split Returns falsy, truthy two tuple, the first with element e of the itrable where func(e) return false, the second with element of the iterable that are True
def split(func, iterable): falsy, truthy = [], [] for e in iterable: if func(e): truthy.append(e) else: falsy.append(e) return tuple(falsy), tuple(truthy)
[ "def split_data(iterable, pred):\n yes, no = [], []\n for d in iterable:\n if pred(d):\n yes.append(d)\n else:\n no.append(d)\n return yes, no", "def split_on(\n predicate: Callable[[Any], bool], sequence: Sequence\n) -> (list, list):\n return (\n list(filter(predicate, sequence)),\n list(filter(inverse(predicate), sequence)),\n )", "def split_at(iterable, pred):\n buf = []\n for item in iterable:\n if pred(item):\n yield buf\n buf = []\n else:\n buf.append(item)\n yield buf", "def _split_list(s, predicate):\n\n yes = []\n no = []\n for x in s:\n if predicate(x):\n yes.append(x)\n else:\n no.append(x)\n return yes, no", "def partition(iterable, predicate):\n passes = list()\n fails = list()\n for element in iterable:\n if predicate(element):\n passes.append(element)\n else:\n fails.append(element)\n return passes, fails", "def partition(arr, callback):\n truthy_list = []\n falsey_list = []\n for i in arr:\n if callback(i):\n truthy_list.append(i)\n else:\n falsey_list.append(i)\n return [truthy_list, falsey_list]", "def partition(pred, iterable):\n # type: (Optional[Callable[[T], bool]], Iterable[T]) -> Tuple[Iterator[T], Iterator[T]]\n if pred is None:\n pred = bool\n\n evaluations = ((pred(x), x) for x in iterable)\n t1, t2 = tee(evaluations)\n return (\n (x for (cond, x) in t1 if not cond),\n (x for (cond, x) in t2 if cond),\n )", "def split_on(iterable, predicate):\n it = iter(iterable)\n\n # Initialize the chunk list with an item\n # StopIteration will be thrown if there are no further items in the iterator\n chunk = [it.next()]\n\n while True:\n try:\n item = it.next()\n\n if predicate(item):\n # If the next item should be in a new chunk then return the current chunk\n yield chunk\n # Then rest the chunk list\n chunk = [item]\n else:\n # Simply append the item to current chunk if it doesn't match the predicate\n chunk.append(item)\n\n except StopIteration:\n # If the end of the iterator is reached then simply return the current chunk\n yield chunk\n break", "def split_list(items, pred):\n\n thisresult = []\n results = [thisresult]\n for i in items:\n thisresult.append(i)\n if pred(i):\n thisresult = []\n results.append(thisresult)\n return results", "def partition(iterable : Iterable[T], predicate : Callable[[T], bool]) -> Tuple[Iterable[T], Iterable[T]]:\n\n iter1, iter2 = tee(iterable)\n return filterfalse(predicate, iter1), filter(predicate, iter2)", "def partition(pred, iterable):\n # iterable is walked only once; tee handles the intermediate storage.\n t1, t2 = tee(iterable)\n return filterfalse(pred, t1), filter(pred, t2)", "def test_partition_to_lists():\n nums = [1, 2, 1, 3, 1, 4, 0, None, None]\n not_ones, ones = partition_to_lists(nums, lambda n: n == 1)\n assert not_ones == [2, 3, 4, 0, None, None]\n assert ones == [1, 1, 1]\n # The default predicate is the standard Python bool() function\n falsey, truthy = partition_to_lists(nums)\n assert falsey == [0, None, None]\n assert truthy == [1, 2, 1, 3, 1, 4]", "def partition_bool(pred, seq):\n return partition_groups(pred, seq, (True, False))", "def partition(lst, fn):\n\n # Best solution:\n\n a = []\n b = []\n\n for val in lst:\n if fn(val):\n a.append(val)\n else:\n b.append(val)\n\n return [a, b]\n\n # Clever, but less optimal solution --- this runs fn() twice on each element,\n # not once:\n #\n # return [\n # [val for val in lst if fn(val)],\n # [val for val in lst if not fn(val)]\n # ]", "def partition(is_included_fn, items):\n item_by_exclusion = { True : [], False : [] }\n for item in items:\n # \"not\" to normalise all values to either True or False\n item_by_exclusion[not is_included_fn(item)].append(item)\n return (item_by_exclusion[False], item_by_exclusion[True])", "def filter(function, iterable):\n\n if function is bool:\n return [x for x in iterable if x]\n\n return [x for x in iterable if function(x)]", "def partition(pred, iterable):\n stream = list(iterable)\n matched = list(itertools.takewhile(pred, stream))\n unmatched = list(itertools.dropwhile(pred, stream))\n return matched, unmatched", "def partition_strict(function, items):\n left = []\n right = []\n for item in items:\n (left if function(item) else right).append(item)\n return (left, right)", "def every(lst, fn):\n return reduce(lambda acc, elem: acc and fn(elem), lst, True)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Filter multiple iterable at once, selecting values at index i such that func(iterables[0][i], iterables[1][i], ...) is True
def sync_filter(func, *iterables): return tuple(zip(*tuple(i for i in zip(*iterables) if func(*i)))) or ((),) * len( iterables )
[ "def chain_filter(it: Iterable, *filters: Callable) -> Iterator:\n for f in filters:\n it = filter(f, it)\n return it", "def filter(function, iterable):\n\n if function is bool:\n return [x for x in iterable if x]\n\n return [x for x in iterable if function(x)]", "def filter(function: \"Callable[[Any], bool]\", iterable: \"Iterable[T]\") -> \"Iter[T]\":\n return Iter(_filter(function, iterable))", "def xfilter(function, sequence):\n for item in sequence:\n if function:\n if function(item): yield item\n else:\n if item: yield item", "def filter(iterable, filter_func):\n for item in iterable:\n item = filter_func(item)\n if item is not None:\n yield item", "def filter(func, data):\n \n out = []\n for r in data:\n\tif func(r):\n\t out.append(r)\n return out", "def every(lst, fn):\n return reduce(lambda acc, elem: acc and fn(elem), lst, True)", "def chain_apply(it: Iterable, *callables: Callable) -> Iterable:\n for f in callables:\n it = f(it)\n return it", "def filter(self, fn: Callable[[Tuple[K, List[V]]], bool]) -> Iterator[Tuple[K, List[V]]]:\n return (entry for entry in iter(self) if fn(entry))", "def filter(iterable, predicate):\n\n for x in iterable:\n if predicate(x):\n yield x", "def filter(iteratee, seq):\n return _filter(fnc.iteratee(iteratee), seq)", "def __map_and_filter(_input: MutableSequence[T],\n _map: Callable[[T], Any] = lambda x: x,\n _filter: Callable[[T], bool] = lambda x: True) -> MutableSequence[Any]:\n\n return [_map(x) for x in _input if _filter(x)]", "def _filter(lst, func=None):\n\n if func:\n return [l for l in lst if l is not None and func(l)]\n else:\n return [l for l in lst if l is not None]", "def gen_apply_filters(filters):\n def filter_function(value):\n temp = value\n for function in filters:\n temp = function(temp)\n return temp\n return filter_function", "def over_every(funcs):\n def _over_every(*args):\n return all(func(*args) for func in funcs)\n\n return _over_every", "def filter_(func, sequence, *argc):\r\n if isinstance(sequence, (tuple, list)):\r\n return list(filter(lambda i:func(i, *argc), sequence))\r\n return sequence if func(sequence, *argc) else []", "def ifilter_c(func):\n return functools.partial(ifilter, func)", "def filter(func1=None):\r\n temp_seq=[]\r\n try:\r\n if func1==None:\r\n raise TypeError(\"No filter function\")\r\n except TypeError as fltr:\r\n print(\"%s:\"%type(fltr), fltr)\r\n else:\r\n for i in range(len(seq)):\r\n if func1(seq[i]):\r\n temp_seq.append(seq[i])\r\n return tuple(temp_seq)\r\n finally:\r\n return seq", "def map_and_filter(s, map_fn, filter_fn):\n return [map_fn(i) for i in s if filter_fn(i)]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
report a runtime error
def runtime_error(self, error: 'LoxRuntimeError'): output = f'{error.get_message()}{os.linesep}[line {error.token.line}]' print(output, file=sys.stderr) self.had_runtime_error = False
[ "def test_re_raise_runtime_error(self, _mock_pipeline, MockTrial): # noqa: N803\n self._test_re_raise(MockTrial=MockTrial, exception=RuntimeError)", "def reportError(self):\n self.Q['err'].put(sys.exc_info()[:2])", "def serious_error(self, e):\n pass", "def func():\n raise RuntimeError", "def error(self, e):", "def getCompilerError():", "def error(msg):\n print msg\n sys.exit(1)", "def dlinker_error(env, Info, Info1=None):\n env.PrintError(\"Dlinker error found: \" + Info)\n env.PrintError(Info1)\n raise Exception(Info)", "def error_occured(self) -> None:\r\n \r\n warnings.warn(\r\n '''An Error has occured when processing this photo!\r\n The plants are too emerged in some places to analyze.''',\r\n RuntimeWarning)", "def _raise_tcod_error() -> NoReturn:\n raise RuntimeError(ffi.string(lib.TCOD_get_error()).decode(\"utf-8\"))", "def basicerror(self, exc, agent):\n \n sys.stdout.write('Error: ' + str(agent) + ': ' + str(exc) + '\\n')", "def _RaiseFatal(cls, sub, subargs, errorcode, *args):\n ScriptForge.InvokeSimpleScript('ScriptForge.SF_Utils._EnterFunction', sub, subargs)\n cls.RaiseFatal(errorcode, *args)\n raise RuntimeError(\"The execution of the method '\" + sub.split('.')[-1] + \"' failed. Execution stops.\")", "def test_error(self):\n print 'test_error'\n raise Exception", "def raise_error(Err):\n raise Err()", "def error(self, msg, timeout=None):\n raise ModuleErrorException(msg, timeout)", "def inspect_error():\n \n error('Internal Python error in the inspect module.\\n'\n 'Below is the traceback from this internal error.\\n')", "def run(self):\n self.simple_error()\n self.relative_error()", "def error_test():\n checkresult(lib.ErrorTest())", "def ReportError(text):\n raise IOError(text)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Note, SMTPTimeoutError vs SMTPConnectError here depends on processing time.
async def test_timeout_error_with_no_server(event_loop): client = SMTP(hostname="127.0.0.1", port=65534, loop=event_loop) with pytest.raises(SMTPTimeoutError): await client.connect(timeout=0.000000001)
[ "def send_mail(self, mail_to, mail_subject, mail_body, retries=3, sleep_time=5):\n for i in xrange(retries):\n try:\n self._send_mail(mail_to, mail_subject, mail_body)\n log_info('Successful send mail from %s to %s (Subject: %s) at %i try.' % (self.sender, mail_to, mail_subject, i+1))\n return True\n except smtplib.SMTPRecipientsRefused as e:\n # Avoid this being catched by the SMPTException catcher further down\n raise e \n except smtplib.SMTPSenderRefused as e:\n log_error('The server refused the sender (%s) %s@%s:%i' % (self.sender, self.username, self.host, self.port))\n return False \n except smtplib.SMTPAuthenticationError as e:\n log_error('SMTP authentication problem for connection %s@%s:%i' % (self.username, self.host, self.port))\n return False \n except smtplib.SMTPHeloError as e:\n log_error('The server didn\\'t reply properly to the HELO greeting.')\n return False\n except smtplib.SMTPDataError as e:\n log_warn('The server replied with an unexpected error code.')\n if i < retries:\n log_info('Trying to reconnect to %s@%s:%i ...' % (self.username, self.host, self.port))\n self.close()\n sleep(sleep_time)\n self.connected = False\n except socket.error as e:\n log_warn('Socket error: %s .' % e)\n if i < retries:\n log_info('Trying to reconnect to %s@%s:%i ...' % (self.username, self.host, self.port))\n self.close()\n sleep(sleep_time)\n self.connected = False\n except smtplib.SMTPConnectError as e:\n log_warn('Connection error for %s@%s:%i.' % (self.username, self.host, self.port))\n if i < retries:\n log_info('Trying to reconnect to %s@%s:%i ...' % (self.username, self.host, self.port))\n self.close()\n sleep(sleep_time)\n self.connected = False \n except smtplib.SMTPException as e:\n log_error('Smtplib error: %s .' % e)\n if i < retries:\n log_info('Trying to reconnect to %s@%s:%i ...' % (self.username, self.host, self.port))\n self.close()\n sleep(sleep_time)\n self.connected = False \n log_error('To many retries sending to %s:%i via %s. Giving up.' % (self.host, self.port, self.username))\n return False", "def _connect_smtp(self):\n smtp = None\n try:\n smtp = smtplib.SMTP(self.servername, timeout = self.timeout)\n except smtplib.SMTPException as err:\n log.critical('smtp service at {} is not currently available'.format(self.servername))\n log.critical(err)\n except Exception as err:\n log.critical('smtp other error {} is not currently available'.format(self.servername))\n log.critical(err)\n \n if self.auth is not None:\n try:\n smtp.login(self.auth[0], self.auth[1])\n except smtplib.SMTPException as err:\n log.warn('smtp service login error for {}'.format(self.servername))\n log.warn(err)\n return smtp", "def Check_SMTP(name, my_ip):\n\n if nslookup(name)[0] != 0:\n add_info (name, SMTP_SERVER, \"cannot resolve SMTP server\")\n return 1\n if ping_machine(name) != 0:\n add_info(name, SMTP_SERVER, \"cannot ping SMTP server\")\n return 2\n\n status, err = tryconnect(name, SMTP_PORT)\n if status == 1 or status == 2:\n add_info(name, SMTP_SERVER, err)\n if status == 1:\n # if we time'd out, things can still be OK (say reverse DNS problems)\n # so return only an error if no timeout\n return 3\n\n stat, out = port_talker.TCPTalk(name, SMTP_PORT,\n 60, # timeout (>30sec for messed up servers)\n \"HELO \" + my_ip + \"\\r\\nQUIT\\r\\n\",\n None, # terminator\n 1024, # max len\n 1) # use external resolver\n\n # expected answer:\n #220 'mail.forobozz.com' ESMTP\n #250 mail.frobozz.com Hello grue.frobozz.com [192.168.0.21], pleased to meet ya\n #221 mail.frobozz.com closing connection\n\n # Each line can be repeated several times, so we check that all codes appear\n # and that no other codes appear\n codes = map(lambda x: x[:4], string.split(out, '\\n'))\n valid_codes = ('220 ', '250 ', '221 ', '')\n try:\n for code in codes:\n assert(code in valid_codes)\n for valid in valid_codes:\n assert(valid in codes)\n except:\n # If we wanted, we could check whether reverse DNS lookup is not working.\n # This would be the most likely explanation\n add_info(name, SMTP_SERVER, \"cannot HELO SMTP server\")\n return 4\n add_info(name, SMTP_SERVER, \"OK\")\n return 0", "def send_mail_when_failed(self, body):\r\n pass", "def _connect(self):\n smtp = smtplib.SMTP(self.smtp_host, self.smtp_port)\n smtp.ehlo()\n if self.smtp_starttls:\n smtp.starttls()\n smtp.ehlo()\n smtp.login(self.smtp_user, self.smtp_password)\n\n return smtp", "def starttls_scan(domain, smtp_timeout, smtp_localhost, smtp_ports, smtp_cache):\n mail_servers = domain.mail_servers\n if mail_servers is None:\n mail_servers = []\n for mail_server in mail_servers:\n for port in smtp_ports:\n domain.ports_tested.add(port)\n server_and_port = mail_server + ':' + str(port)\n\n if not smtp_cache or (server_and_port not in _SMTP_CACHE):\n domain.starttls_results[server_and_port] = {}\n\n smtp_connection = smtplib.SMTP(timeout=smtp_timeout,\n local_hostname=smtp_localhost)\n # The following line is useful when debugging why an\n # SMTP connection fails. It prints out all the\n # traffic sent to and from the SMTP server.\n smtp_connection.set_debuglevel(1)\n logging.debug('Testing ' + server_and_port + ' for STARTTLS support')\n\n # Look up the IPv4 address for mail_server.\n #\n # By default, smtplib looks for A and AAAA records\n # from DNS and uses the first one that it can connect\n # to. What I find when running in Lambda (at least in\n # my VPC that doesn't support IPv6) is that when DNS\n # returns IPv6 an address I get a low level \"errno 97\n # - Address family not supported by protocol\" error\n # and the other addresses returned by DNS are not\n # tried. Therefore the hostname is not scanned at\n # all.\n #\n # To get around this I look up the A record and use\n # that instead of the hostname in DNS when I call\n # smtp_connection.connect().\n try:\n addr_info = socket.getaddrinfo(\n mail_server, port, socket.AF_INET, socket.SOCK_STREAM\n )\n except socket.gaierror:\n # We get this exception if there is no A record\n # for the given mail server. This does happen,\n # since among their MX records some domains do\n # list some IPv6-only mail servers, but this also\n # happens if there is a DNS error or if the mail\n # server does not exist in DNS, so we can't give\n # them credit and we'll just treat them as\n # unreachable instead.\n error_str = f'The mail server {mail_server} does not have an IPv4 address.'\n handle_error('[STARTTLS]', domain, error_str)\n logging.warn(error_str)\n domain.starttls_results[server_and_port]['is_listening'] = False\n domain.starttls_results[server_and_port]['supports_smtp'] = False\n domain.starttls_results[server_and_port]['starttls'] = False\n continue\n\n # Extract the IP address from the socket addrinfo\n socket_address = addr_info[0][4]\n mail_server_ip_address = socket_address[0]\n\n # Try to connect. This will tell us if something is\n # listening.\n try:\n smtp_connection.connect(mail_server_ip_address, port)\n domain.starttls_results[server_and_port]['is_listening'] = True\n except (socket.timeout, smtplib.SMTPConnectError,\n smtplib.SMTPServerDisconnected,\n ConnectionRefusedError, OSError) as error:\n handle_error('[STARTTLS]', domain, error)\n domain.starttls_results[server_and_port]['is_listening'] = False\n domain.starttls_results[server_and_port]['supports_smtp'] = False\n domain.starttls_results[server_and_port]['starttls'] = False\n\n if smtp_cache:\n _SMTP_CACHE[server_and_port] = domain.starttls_results[server_and_port]\n\n continue\n\n # Now try to say hello. This will tell us if the\n # thing that is listening is an SMTP server.\n try:\n smtp_connection.ehlo_or_helo_if_needed()\n domain.starttls_results[server_and_port]['supports_smtp'] = True\n logging.debug('\\t Supports SMTP')\n except (smtplib.SMTPHeloError, smtplib.SMTPServerDisconnected) as error:\n handle_error('[STARTTLS]', domain, error)\n domain.starttls_results[server_and_port]['supports_smtp'] = False\n domain.starttls_results[server_and_port]['starttls'] = False\n # smtplib freaks out if you call quit on a non-open\n # connection\n try:\n smtp_connection.quit()\n except smtplib.SMTPServerDisconnected as error2:\n handle_error('[STARTTLS]', domain, error2)\n\n if smtp_cache:\n _SMTP_CACHE[server_and_port] = domain.starttls_results[server_and_port]\n\n continue\n\n # Now check if the server supports STARTTLS.\n has_starttls = smtp_connection.has_extn('STARTTLS')\n domain.starttls_results[server_and_port]['starttls'] = has_starttls\n logging.debug('\\t Supports STARTTLS: ' + str(has_starttls))\n\n # If there is a TLSA record, check and see if the TLSA record matches the STARTTLS cert\n if domain.mx_tlsa_records:\n check_starttls_tlsa(domain, smtp_connection, smtp_timeout, mail_server, port)\n\n # Close the connection\n # smtplib freaks out if you call quit on a non-open\n # connection\n try:\n smtp_connection.quit()\n except smtplib.SMTPServerDisconnected as error:\n handle_error('[STARTTLS]', domain, error)\n\n # Copy the results into the cache, if necessary\n if smtp_cache:\n _SMTP_CACHE[server_and_port] = domain.starttls_results[server_and_port]\n\n else:\n logging.debug('\\tUsing cached results for ' + server_and_port)\n # Copy the cached results into the domain object\n domain.starttls_results[server_and_port] = _SMTP_CACHE[server_and_port]", "def connectToSMTP():\n\t#server=smtplib.SMTP(SMTP_SERVER,SMTP_PORT)\n\tserver=smtplib.SMTP(SMTP_SERVER)\n\tserver.ehlo()\n\t#server.login(SMTP_USER,SMTP_PASSWD)\n\treturn server", "def test_smtp(self):\n self._endpointServerTest(\"smtp\", protocols.SMTPFactory)", "def connect(smtp_url: str, timeout: Optional[float] = None) -> smtplib.SMTP:\n return smtplib.SMTP(smtp_url, timeout=timeout)", "def SendTimeout(self) -> int:", "async def test_expn_error(\n smtp_client: SMTP, smtpd_server: asyncio.AbstractServer\n) -> None:\n async with smtp_client:\n with pytest.raises(SMTPResponseException):\n await smtp_client.expn(\"a-list\")", "def test_conn_err_retry(self, retry, get_conn):\r\n get_conn.return_value.open.side_effect = SMTPConnectError(424, \"Bad Connection\")\r\n\r\n test_email = {\r\n 'action': 'Send email',\r\n 'send_to': 'myself',\r\n 'subject': 'test subject for myself',\r\n 'message': 'test message for myself'\r\n }\r\n response = self.client.post(self.send_mail_url, test_email)\r\n self.assertEquals(json.loads(response.content), self.success_content)\r\n\r\n self.assertTrue(retry.called)\r\n (__, kwargs) = retry.call_args\r\n exc = kwargs['exc']\r\n self.assertIsInstance(exc, SMTPConnectError)", "def test_endpointSMTP(self):\n self._endpointTest(\"smtp\")", "def test_tls_timeout(self):\n if not self.has_tls:\n self.skipTest(\"TLS is not set.\")\n import multiprocessing\n client = LDAPClient(self.url, True)\n client.set_cert_policy(\"ALLOW\")\n client.set_ca_cert(None)\n client.set_ca_cert_dir(None)\n proxy = rpc.ServerProxy(\"http://%s:%d/\" % (self.ipaddr, 8000))\n proxy.set_delay(9.0, 15)\n time.sleep(2.0)\n pool = multiprocessing.Pool(processes=1)\n try:\n result = pool.apply_async(receive_timeout_error, args=(client,))\n result.get(timeout=18.0)\n except Exception as exc:\n self.assertIsInstance(exc, bonsai.TimeoutError)\n else:\n self.fail(\"Failed to receive TimeoutError.\")\n finally:\n pool.terminate()\n proxy.remove_delay()", "def postprocess():\n if ERRORS:\n address = 'tamakoshihiroki@gmail.com'\n body = '\\n\\n'.join( ERRORS )\n msg = create_message( body, address )\n send_mail( msg, address )", "def send_error_email(exception_info, from_, subject, body):\n import smtplib\n import traceback\n \n from email.mime.text import MIMEText\n from getpass import getuser\n from socket import gethostname\n\n exceptionType, exceptionValue, exceptionTraceback = exception_info\n\n user, host = getuser(), gethostname()\n\n extra_info = [\"Was running as user '%s' on machine '%s'\" % (user, host),\n #\"Code located at: %s\" % code\n ]\n\n msg = MIMEText(\"\\n\\n\".join(body + extra_info +\n traceback.format_tb(exceptionTraceback)))\n\n msg['Subject'] = subject\n msg['From'] = from_\n \n to = [\n #\"Peter Waller <peter.waller@cern.ch>\",\n \"Peter Onyisi <peter.onyisi@cern.ch>\"\n ]\n \n #operations = (\"Data Quality Operations \"\n # \"<hn-atlas-data-quality-operations@cern.ch>\")\n \n msg['To'] = \", \".join(to)\n #msg['Cc'] = operations; to.append(operations)\n\n s = smtplib.SMTP()\n s.connect()\n s.sendmail(from_, to, msg.as_string())\n s.quit()\n\n log.error(\"Error email sent.\")", "def test_get_imap_smtp_access(self):\n pass", "def test_data_err_retry(self, retry, get_conn):\r\n get_conn.return_value.send_messages.side_effect = SMTPDataError(455, \"Throttling: Sending rate exceeded\")\r\n test_email = {\r\n 'action': 'Send email',\r\n 'send_to': 'myself',\r\n 'subject': 'test subject for myself',\r\n 'message': 'test message for myself'\r\n }\r\n response = self.client.post(self.send_mail_url, test_email)\r\n self.assertEquals(json.loads(response.content), self.success_content)\r\n\r\n # Test that we retry upon hitting a 4xx error\r\n self.assertTrue(retry.called)\r\n (__, kwargs) = retry.call_args\r\n exc = kwargs['exc']\r\n self.assertIsInstance(exc, SMTPDataError)", "def test_rejected(self):\n service = mail.mail.MailService()\n domain = mail.mail.BounceDomain()\n service.addDomain(b\"foo.com\", domain)\n\n factory = mail.protocols.SMTPFactory(service)\n protocol = factory.buildProtocol(None)\n\n deliverer = mail.protocols.SMTPDomainDelivery(service, None, None)\n protocol.delivery = deliverer\n\n transport = StringTransport()\n protocol.makeConnection(transport)\n\n protocol.lineReceived(b\"HELO baz.net\")\n protocol.lineReceived(b\"MAIL FROM:<a@baz.net>\")\n protocol.lineReceived(b\"RCPT TO:<any@foo.com>\")\n protocol.lineReceived(b\"QUIT\")\n\n self.assertTrue(transport.disconnecting)\n protocol.connectionLost(None)\n\n self.assertEqual(\n transport.value().strip().split(b\"\\r\\n\")[-2],\n b\"550 Cannot receive for specified address\",\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Классифицирует условия для INNER_JOIN
def basis_classifier_inner_join(self, join_left_tables, join_right_tables): join_left_tables = set(join_left_tables) join_right_tables = set(join_right_tables) join_expr_equals = [] for i, (basis, (flag, new_basis), columns) in enumerate(zip( self.base_expressions, self.equivalent_basis, self.used_columns )): left_columns, right_columns = columns if isinstance(basis, expr.ComparisonPredicate): assert left_columns or right_columns left_table = { column.table for column in left_columns } right_table = { column.table for column in right_columns } wrong_tables = (left_table | right_table) - (join_left_tables | join_right_tables) if wrong_tables: Select.logger.error( 'The tables (%s) is not included in the join', ', '.join(sorted( '.'.join(*table.full_name()) for table in wrong_tables )) ) continue if left_table <= join_right_tables and right_table <= join_left_tables: # Приведение к виду, где левая часть предиката # соответствует левой таблице, а правая часть # соответствует правой таблице basis.reverse() left_table, right_table = right_table, left_table left_columns, right_columns = right_columns, left_columns if len(left_table) == 1 and len(right_table) == 1: left_table = left_table.pop() right_table = right_table.pop() if left_table == right_table: # Условие на одну таблицу if new_basis: left_table.filters.append(new_basis) self.not_used_expression.append(i) for column in left_columns + right_columns: column.count_used -= 1 elif ( left_table in join_left_tables and right_table in join_right_tables and isinstance(basis.left, st.Column) and isinstance(basis.right, st.Column) ): # left.column = right.column if i in self.all_true and basis.op == ss.equals_operator: # (a.id = b.id) is True join_expr_equals.append((basis.left, basis.right)) elif i in self.all_false and basis.op == ss.not_equals_operator: # (a.id != b.id) is False join_expr_equals.append((basis.left, basis.right)) else: continue self.not_used_expression.append(i) elif len(left_table) == 1 and len(right_table) == 0: # tbl.column <= 10 table = left_table.pop() if new_basis: table.filters.append(new_basis) self.not_used_expression.append(i) for column in left_columns: column.count_used -= 1 elif len(left_table) == 0 and len(right_table) == 1: # tbl.column <= 10 table = right_table.pop() if new_basis: table.filters.append(new_basis) self.not_used_expression.append(i) for column in right_columns: column.count_used -= 1 continue else: assert len(left_columns) table = { column.table for column in left_columns } if len(table) == 1: table = table.pop() if new_basis: table.filters.append(new_basis) self.not_used_expression.append(i) for column in left_columns: column.count_used -= 1 self.join_expr_equals = join_expr_equals return join_expr_equals
[ "def inner_join(\n cls,\n other: type[Selectable],\n on: Union[OnClause, Iterable[BaseColumn]],\n ) -> type[Join]:\n return cls._join(other, \"INNER\", on)", "def InnerJoin(self, table, *args):\n return self.Join(table, \"INNER\", *args)", "def join(self, table, *conditions):\n self.join_by_type(\"INNER\", table, *conditions)", "def join_where(self, table, one, operator, two, type='inner'):\n return self.join(table, one, operator, two, type, True)", "def perform_inner_join(self, other_table, key_column_names):\n joined_table = []\n is_match = 1\n any_match = 0\n header = []\n # get header for joined table\n for i in self.column_names:\n header.append(i)\n is_in = 0\n for j in other_table.column_names:\n is_in = 0\n for k in header:\n if j == k:\n is_in = 1\n if is_in == 0:\n header.append(j)\n\n # join tables by iterating through rows \n for row in self.data:\n any_match = 0\n for other_row in other_table.data:\n is_match = 1\n # check for matching key\n for name in key_column_names:\n if row[self.column_names.index(name)] == other_row[other_table.column_names.index(name)]:\n any_match = 1\n else:\n is_match = 0\n # if it is a key match, join the rows\n if is_match == 1: # if it is a match\n joined_row = [] \n for column in row:\n joined_row.append(column)\n for i in range(0, len(other_table.column_names)):\n same = 0\n for j in range(0, len(self.column_names)):\n if other_table.column_names[i] == self.column_names[j]:\n same = 1\n if same == 0:\n joined_row.append(other_row[i])\n # append the joined row to the joined table\n if any_match == 1:\n joined_table.append(joined_row)\n\n # create a MyPyTable with the joined table data and the header\n inner_join = MyPyTable(header, joined_table)\n # remove duplicate from the end of the table if it exists\n duplicates = inner_join.find_duplicates(key_column_names)\n inner_join.drop_rows(duplicates)\n for item in duplicates:\n inner_join.data.append(item)\n \n # return the table\n return inner_join", "def perform_inner_join(self, other_table, key_column_names):\n joined_table = []\n header = copy.deepcopy(self.column_names)\n for column in other_table.column_names:\n if column not in header:\n header.append(column)\n \n in_both = True\n for row_1 in range(len(self.data)):\n for row_2 in range(len(other_table.data)):\n in_both = True\n for key in key_column_names:\n index_1 = self.column_names.index(key)\n index_2 = other_table.column_names.index(key)\n if self.data[row_1][index_1] != other_table.data[row_2][index_2]:\n in_both = False\n if in_both:\n added_row = copy.deepcopy(self.data[row_1])\n for column in other_table.column_names:\n if column not in self.column_names:\n adding_index = other_table.column_names.index(column)\n added_row.append(other_table.data[row_2][adding_index])\n joined_table.append(added_row)\n return MyPyTable(header,joined_table)", "def _getSelectClauseMitVerwaltungJoin( self ) -> str:\n return \\\n \"select s.shg_id, s.vwg_id, s.mobj_id, s.von, coalesce(s.bis, '') as bis, s.netto, s.ruezufue, \" \\\n \"(s.netto + s.ruezufue) as brutto, s.bemerkung, \" \\\n \"vwg.vw_id, vwg.weg_name \"\\\n \"from sollhausgeld s \" \\\n \"inner join verwaltung vwg on vwg.vwg_id = s.vwg_id \" \\\n \"inner join mietobjekt mo on mo.mobj_id = s.mobj_id \"", "def Join(self, table, condition, mode = ''):\r\n\t\tself.SetCurrMarker('join')\r\n\t\t# exec cur part\r\n\t\treturn self.AddJoin(table, condition, mode)", "def test_crossJoin(self):\n self.assertEquals(\n Select(From=self.schema.FOO.join(self.schema.BOZ)).toSQL(),\n SQLFragment(\"select * from FOO cross join BOZ\")\n )", "def addJoin(joinDef):", "def basis_classifier_full_join(self, join_left_tables, join_right_tables):\n return []", "def relate(self, other_query):\n statement_without_semicolon = self.statement\n if self.statement.endswith(\";\"):\n statement_without_semicolon = self.statement[:-1].strip()\n return self.set_child_and_return(\"SELECT * FROM %s NATURAL INNER JOIN %s\" % (statement_without_semicolon, other_query))", "def test_joinColumnSelection(self):\n self.assertEquals(\n Select(\n [self.schema.FOO.BAZ, self.schema.BOZ.QUX],\n From=self.schema.FOO.join(\n self.schema.BOZ,\n self.schema.FOO.BAR == self.schema.BOZ.QUX\n )\n ).toSQL(),\n SQLFragment(\"select BAZ, QUX from FOO join BOZ on BAR = QUX\")\n )", "def joins(self):\n return self._joins", "def get_from_clause(self):\n result = []\n qn = self.quote_name_unless_alias\n qn2 = self.connection.ops.quote_name\n first = True\n for alias in self.query.tables:\n if not self.query.alias_refcount[alias]:\n continue\n try:\n name, alias, join_type, lhs, _lhs_col, _col, nullable = self.query.alias_map[alias]\n lhs_cols, cols = getattr(_lhs_col, \"columns\", [_lhs_col]), getattr(_col, \"columns\", [_col])\n #assert len(lhs_cols) == len(cols), \"could not join multiple columns with simple column (%s <> %s)\" % (lhs_cols, cols)\n except KeyError:\n # Extra tables can end up in self.tables, but not in the\n # alias_map if they aren't in a join. That's OK. We skip them.\n continue\n alias_str = (alias != name and ' %s' % alias or '')\n if join_type and not first:\n if len(lhs_cols) == len(cols):\n _on_where = \" AND \".join(['%s.%s = %s.%s' %\n (qn(lhs),qn2(lhs_col), qn(alias), qn2(col))\n for lhs_col,col in zip(lhs_cols, cols)\n ])\n result.append('%s %s%s ON (%s)'\n % (join_type, qn(name), alias_str, _on_where))\n else:\n #assert len(lhs_cols) == len(cols), \"could not join multiple columns with simple column (%s <> %s)\" % (lhs_cols, cols)\n c1 = MultipleColumnsIN(lhs_cols, alias=qn(lhs)).inner_sql(qn2, self.connection)\n c2 = MultipleColumnsIN(cols, alias=qn(alias)).inner_sql(qn2, self.connection)\n result.append('%s %s%s ON (%s)'\n % (join_type, qn(name), alias_str, '%s = %s' % (c1, c2)))\n else:\n connector = not first and ', ' or ''\n result.append('%s%s%s' % (connector, qn(name), alias_str))\n first = False\n for t in self.query.extra_tables:\n alias, unused = self.query.table_alias(t)\n # Only add the alias if it's not already present (the table_alias()\n # calls increments the refcount, so an alias refcount of one means\n # this is the only reference.\n if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:\n connector = not first and ', ' or ''\n result.append('%s%s' % (connector, qn(alias)))\n first = False\n return result, []", "def basis_classifier_right_join(self, join_left_tables, join_right_tables):\n join_left_tables = set(join_left_tables)\n join_right_tables = set(join_right_tables)\n join_expr_equals = []\n\n for i, (basis, (flag, new_basis), columns) in enumerate(zip(\n self.base_expressions,\n self.equivalent_basis,\n self.used_columns\n )):\n\n left_columns, right_columns = columns\n if isinstance(basis, expr.ComparisonPredicate):\n assert left_columns or right_columns\n left_table = {\n column.table\n for column in left_columns\n }\n right_table = {\n column.table\n for column in right_columns\n }\n wrong_tables = (left_table | right_table) - (join_left_tables | join_right_tables)\n if wrong_tables:\n Select.logger.error(\n 'The tables (%s) is not included in the join',\n ', '.join(sorted(\n '.'.join(*table.full_name())\n for table in wrong_tables\n ))\n )\n continue\n\n if left_table <= join_right_tables and right_table <= join_left_tables:\n # Приведение к виду, где левая часть предиката\n # соответствует левой таблице, а правая часть\n # соответствует правой таблице\n basis.reverse()\n left_table, right_table = right_table, left_table\n left_columns, right_columns = right_columns, left_columns\n\n if len(left_table) == 1 and len(right_table) == 1:\n left_table = left_table.pop()\n right_table = right_table.pop()\n\n if left_table == right_table and {right_table} <= join_left_tables: # Условие на одну таблицу\n if new_basis:\n left_table.filters.append(new_basis)\n self.not_used_expression.append(i)\n for column in left_columns + right_columns:\n column.count_used -= 1\n elif (\n left_table in join_left_tables and\n right_table in join_right_tables and\n isinstance(basis.left, st.Column) and\n isinstance(basis.right, st.Column)\n ):\n # left.column = right.column\n if i in self.all_true and basis.op == ss.equals_operator: # (a.id = b.id) is True\n join_expr_equals.append((basis.left, basis.right))\n elif i in self.all_false and basis.op == ss.not_equals_operator: # (a.id != b.id) is False\n join_expr_equals.append((basis.left, basis.right))\n else:\n continue\n self.not_used_expression.append(i)\n elif len(left_table) == 1 and len(right_table) == 0 and left_table <= join_left_tables:\n table = left_table.pop()\n if new_basis:\n table.filters.append(new_basis)\n self.not_used_expression.append(i)\n for column in left_columns:\n column.count_used -= 1\n continue\n else:\n assert len(left_columns)\n table = {\n column.table\n for column in left_columns\n }\n if len(table) == 1 and table <= join_left_tables:\n table = table.pop()\n if new_basis:\n table.filters.append(new_basis)\n self.not_used_expression.append(i)\n for column in left_columns:\n column.count_used -= 1\n\n self.join_expr_equals = join_expr_equals\n return join_expr_equals", "def join_key(table_i, table_j):\n\n j_keys = [column for column in table_j.columns if column.foreign_key or column.is_primary_key()]\n j_foreign_keys = dict((column.foreign_key, column) for column in table_j.columns if column.foreign_key)\n\n\n for column in table_i.columns:\n foreign_key = column.foreign_key\n if foreign_key:\n for key in j_keys:\n if foreign_key is key or (key.foreign_key and foreign_key is key.foreign_key):\n return column, key\n elif column.is_primary_key():\n for key in j_keys:\n if column is key.foreign_key:\n return column, key\n return None", "def join_types():\n return [\"\", \"INNER\", \"LEFT OUTER\", \"RIGHT OUTER\", \"FULL OUTER\", \"CROSS\"]", "def generate_join(\n self,\n left: sql.Selectable,\n right: sql.Selectable,\n condition: typing.Optional[sql.ColumnElement],\n kind: dsl.Join.Kind,\n ) -> sql.Selectable:\n opts = {\n 'onclause': condition if condition is not None else sql.literal(True)\n } # onclause=literal(True) -> CROSS JOIN\n if kind in {dsl.Join.Kind.FULL, dsl.Join.Kind.CROSS}:\n opts['full'] = True\n elif kind is not dsl.Join.Kind.INNER:\n opts['isouter'] = True\n if kind is dsl.Join.Kind.RIGHT:\n left, right = right, left\n return left.join(right, **opts)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Классифицирует условия для RIGHT_JOIN
def basis_classifier_right_join(self, join_left_tables, join_right_tables): join_left_tables = set(join_left_tables) join_right_tables = set(join_right_tables) join_expr_equals = [] for i, (basis, (flag, new_basis), columns) in enumerate(zip( self.base_expressions, self.equivalent_basis, self.used_columns )): left_columns, right_columns = columns if isinstance(basis, expr.ComparisonPredicate): assert left_columns or right_columns left_table = { column.table for column in left_columns } right_table = { column.table for column in right_columns } wrong_tables = (left_table | right_table) - (join_left_tables | join_right_tables) if wrong_tables: Select.logger.error( 'The tables (%s) is not included in the join', ', '.join(sorted( '.'.join(*table.full_name()) for table in wrong_tables )) ) continue if left_table <= join_right_tables and right_table <= join_left_tables: # Приведение к виду, где левая часть предиката # соответствует левой таблице, а правая часть # соответствует правой таблице basis.reverse() left_table, right_table = right_table, left_table left_columns, right_columns = right_columns, left_columns if len(left_table) == 1 and len(right_table) == 1: left_table = left_table.pop() right_table = right_table.pop() if left_table == right_table and {right_table} <= join_left_tables: # Условие на одну таблицу if new_basis: left_table.filters.append(new_basis) self.not_used_expression.append(i) for column in left_columns + right_columns: column.count_used -= 1 elif ( left_table in join_left_tables and right_table in join_right_tables and isinstance(basis.left, st.Column) and isinstance(basis.right, st.Column) ): # left.column = right.column if i in self.all_true and basis.op == ss.equals_operator: # (a.id = b.id) is True join_expr_equals.append((basis.left, basis.right)) elif i in self.all_false and basis.op == ss.not_equals_operator: # (a.id != b.id) is False join_expr_equals.append((basis.left, basis.right)) else: continue self.not_used_expression.append(i) elif len(left_table) == 1 and len(right_table) == 0 and left_table <= join_left_tables: table = left_table.pop() if new_basis: table.filters.append(new_basis) self.not_used_expression.append(i) for column in left_columns: column.count_used -= 1 continue else: assert len(left_columns) table = { column.table for column in left_columns } if len(table) == 1 and table <= join_left_tables: table = table.pop() if new_basis: table.filters.append(new_basis) self.not_used_expression.append(i) for column in left_columns: column.count_used -= 1 self.join_expr_equals = join_expr_equals return join_expr_equals
[ "def right_join(self, table, *conditions):\n self.join_by_type(\"RIGHT\", table, *conditions)", "def RightJoin(self, table, *args):\n return self.Join(table, \"RIGHT OUTER\", *args)", "def right_join_where(self, table, one, operator, two):\n return self.join_where(table, one, operator, two, 'right')", "def right_join(\n cls,\n other: type[Selectable],\n on: Union[OnClause, Iterable[BaseColumn]],\n ) -> type[Join]:\n return cls._join(other, \"LEFT\", on)", "def right_join(self, table, one=None, operator=None, two=None):\n if isinstance(table, JoinClause):\n table.type = 'right'\n\n return self.join(table, one, operator, two, 'right')", "def basis_classifier_inner_join(self, join_left_tables, join_right_tables):\n join_left_tables = set(join_left_tables)\n join_right_tables = set(join_right_tables)\n join_expr_equals = []\n\n for i, (basis, (flag, new_basis), columns) in enumerate(zip(\n self.base_expressions,\n self.equivalent_basis,\n self.used_columns\n )):\n left_columns, right_columns = columns\n if isinstance(basis, expr.ComparisonPredicate):\n assert left_columns or right_columns\n left_table = {\n column.table\n for column in left_columns\n }\n right_table = {\n column.table\n for column in right_columns\n }\n wrong_tables = (left_table | right_table) - (join_left_tables | join_right_tables)\n if wrong_tables:\n Select.logger.error(\n 'The tables (%s) is not included in the join',\n ', '.join(sorted(\n '.'.join(*table.full_name())\n for table in wrong_tables\n ))\n )\n continue\n\n if left_table <= join_right_tables and right_table <= join_left_tables:\n # Приведение к виду, где левая часть предиката\n # соответствует левой таблице, а правая часть\n # соответствует правой таблице\n basis.reverse()\n left_table, right_table = right_table, left_table\n left_columns, right_columns = right_columns, left_columns\n\n if len(left_table) == 1 and len(right_table) == 1:\n left_table = left_table.pop()\n right_table = right_table.pop()\n\n if left_table == right_table: # Условие на одну таблицу\n if new_basis:\n left_table.filters.append(new_basis)\n self.not_used_expression.append(i)\n for column in left_columns + right_columns:\n column.count_used -= 1\n elif (\n left_table in join_left_tables and\n right_table in join_right_tables and\n isinstance(basis.left, st.Column) and\n isinstance(basis.right, st.Column)\n ):\n # left.column = right.column\n if i in self.all_true and basis.op == ss.equals_operator: # (a.id = b.id) is True\n join_expr_equals.append((basis.left, basis.right))\n elif i in self.all_false and basis.op == ss.not_equals_operator: # (a.id != b.id) is False\n join_expr_equals.append((basis.left, basis.right))\n else:\n continue\n self.not_used_expression.append(i)\n elif len(left_table) == 1 and len(right_table) == 0:\n # tbl.column <= 10\n table = left_table.pop()\n if new_basis:\n table.filters.append(new_basis)\n self.not_used_expression.append(i)\n for column in left_columns:\n column.count_used -= 1\n elif len(left_table) == 0 and len(right_table) == 1:\n # tbl.column <= 10\n table = right_table.pop()\n if new_basis:\n table.filters.append(new_basis)\n self.not_used_expression.append(i)\n for column in right_columns:\n column.count_used -= 1\n continue\n else:\n assert len(left_columns)\n table = {\n column.table\n for column in left_columns\n }\n if len(table) == 1:\n table = table.pop()\n if new_basis:\n table.filters.append(new_basis)\n self.not_used_expression.append(i)\n for column in left_columns:\n column.count_used -= 1\n\n self.join_expr_equals = join_expr_equals\n return join_expr_equals", "def _rewrite_join(self, node: saldag.Join):\n\n left_in_rel = node.get_left_in_rel()\n right_in_rel = node.get_right_in_rel()\n\n left_join_cols = node.left_join_cols\n right_join_cols = node.right_join_cols\n\n num_join_cols = len(left_join_cols)\n\n out_join_cols = node.out_rel.columns[:num_join_cols]\n key_cols_coll_sets = []\n for i in range(len(left_join_cols)):\n key_cols_coll_sets.append(utils.merge_coll_sets(\n left_join_cols[i].coll_sets, right_join_cols[i].coll_sets))\n out_join_cols[i].coll_sets = key_cols_coll_sets[i]\n\n abs_idx = len(left_join_cols)\n for in_col in left_in_rel.columns:\n if in_col not in set(left_join_cols):\n for key_col_coll_sets in key_cols_coll_sets:\n node.out_rel.columns[abs_idx].coll_sets = \\\n utils.merge_coll_sets(key_col_coll_sets, in_col.coll_sets)\n abs_idx += 1\n\n for in_col in right_in_rel.columns:\n if in_col not in set(right_join_cols):\n for key_col_coll_sets in key_cols_coll_sets:\n node.out_rel.columns[abs_idx].coll_sets = \\\n utils.merge_coll_sets(key_col_coll_sets, in_col.coll_sets)\n abs_idx += 1", "def between_join(left, right, left_on=\"DEPTH\", right_on=(\"DEPTH_FROM\", \"DEPTH_TO\"), suffixes=(\"_left\", \"_right\")):\n # TODO: avoid cross join\n right_from, right_to = right_on\n cross = cross_join(left, right, suffixes=suffixes)\n mask = (cross[left_on] >= cross[right_from]) & (cross[left_on] < cross[right_to])\n return cross[mask]", "def test_if_two_tables(table_one, table_two):\n assert left_join(table_one, table_two) == [['fond', 'enamored', 'averse'], ['guide', 'usher', 'follow'], ['diligent', 'employed', 'idle'], ['wrath', 'anger', 'deligth']]", "def join_types():\n return [\"\", \"INNER\", \"LEFT OUTER\", \"RIGHT OUTER\", \"FULL OUTER\", \"CROSS\"]", "def basis_classifier_full_join(self, join_left_tables, join_right_tables):\n return []", "def generate_join(\n self,\n left: sql.Selectable,\n right: sql.Selectable,\n condition: typing.Optional[sql.ColumnElement],\n kind: dsl.Join.Kind,\n ) -> sql.Selectable:\n opts = {\n 'onclause': condition if condition is not None else sql.literal(True)\n } # onclause=literal(True) -> CROSS JOIN\n if kind in {dsl.Join.Kind.FULL, dsl.Join.Kind.CROSS}:\n opts['full'] = True\n elif kind is not dsl.Join.Kind.INNER:\n opts['isouter'] = True\n if kind is dsl.Join.Kind.RIGHT:\n left, right = right, left\n return left.join(right, **opts)", "def _OR(self, left, right):\n return '(%s OR %s)' % (self.render(left), self.render(right, left))", "def generate_join(\n self,\n left: 'parser.Source',\n right: 'parser.Source',\n condition: typing.Optional['parser.Feature'],\n kind: 'dsl.Join.Kind',\n ) -> 'parser.Source':", "def addJoin(joinDef):", "def test_crossJoin(self):\n self.assertEquals(\n Select(From=self.schema.FOO.join(self.schema.BOZ)).toSQL(),\n SQLFragment(\"select * from FOO cross join BOZ\")\n )", "def AddJoinClauses(self, join_pairs, left=False):\n for join, args in join_pairs:\n assert _IsValidJoin(join), join\n assert join.count('%s') == len(args), join\n self.join_clauses.append(\n ' %sJOIN %s' % (('LEFT ' if left else ''), join))\n self.join_args.extend(args)", "def test_merge_to_nary_join(self):\n def in_cond_idx(position, conditions):\n for i, cond in enumerate(conditions):\n if position in cond:\n return i\n raise Exception(\"Cannot find attribute in join conditions\")\n\n # 1. triangular join\n triangle_join = testNaryJoin.get_phys_plan_root(\n \"A(x,y,z):-R(x,y),S(y,z),T(z,x)\", 64)\n # test root operator type\n self.assertIsInstance(triangle_join, algebra.NaryJoin)\n # test arity of join conditions\n self.assertEqual(len(triangle_join.conditions), 3)\n # test join conditions\n conds = []\n for cond in triangle_join.conditions:\n conds.append([attribute.position for attribute in cond])\n self.assertEqual(in_cond_idx(0, conds), in_cond_idx(5, conds))\n self.assertEqual(in_cond_idx(1, conds), in_cond_idx(2, conds))\n self.assertEqual(in_cond_idx(3, conds), in_cond_idx(4, conds))\n\n # 2. star join\n star_join = testNaryJoin.get_phys_plan_root(\n \"A(x,y,z,p):-R(x,y),S(x,z),T(x,p)\", 64)\n # test root operator type\n self.assertIsInstance(star_join, algebra.NaryJoin)\n # test arity of join conditions\n self.assertEqual(len(star_join.conditions), 1)\n # test join conditions\n conds = []\n for cond in star_join.conditions:\n conds.append([attribute.position for attribute in cond])\n self.assertEqual(in_cond_idx(0, conds), in_cond_idx(2, conds))\n self.assertEqual(in_cond_idx(2, conds), in_cond_idx(4, conds))", "def _rewrite_column_for_right(column, node):\n\n right_join_cols = [n.name for n in node.right_join_cols]\n right_non_join_cols = [c.name for c in node.right_parent.out_rel.columns if c.name not in right_join_cols]\n\n if column.name in right_join_cols:\n for i in range(len(right_join_cols)):\n if right_join_cols[i].name == column.name:\n # join col names from left rel overwrite\n # right col names in output relation.\n column.name = node.out_rel.columns[i].name\n column.idx = i\n return column\n\n elif column.name in right_non_join_cols:\n for i in range(len(right_join_cols), len(node.out_rel.columns)):\n if node.out_rel.columns[i].name == column.name:\n column.idx = i\n return column\n\n else:\n raise Exception(\"Column from right wasn't present in Join output relation\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Классифицирует условия для FULL_JOIN
def basis_classifier_full_join(self, join_left_tables, join_right_tables): return []
[ "def full_join(self, table, *conditions):\n self.join_by_type(\"FULL\", table, *conditions)", "def full_outer_join(\n cls,\n other: type[Selectable],\n on: Union[OnClause, Iterable[BaseColumn]],\n ) -> type[Join]:\n return cls._join(other, \"FULL OUTER\", on)", "def basis_classifier_inner_join(self, join_left_tables, join_right_tables):\n join_left_tables = set(join_left_tables)\n join_right_tables = set(join_right_tables)\n join_expr_equals = []\n\n for i, (basis, (flag, new_basis), columns) in enumerate(zip(\n self.base_expressions,\n self.equivalent_basis,\n self.used_columns\n )):\n left_columns, right_columns = columns\n if isinstance(basis, expr.ComparisonPredicate):\n assert left_columns or right_columns\n left_table = {\n column.table\n for column in left_columns\n }\n right_table = {\n column.table\n for column in right_columns\n }\n wrong_tables = (left_table | right_table) - (join_left_tables | join_right_tables)\n if wrong_tables:\n Select.logger.error(\n 'The tables (%s) is not included in the join',\n ', '.join(sorted(\n '.'.join(*table.full_name())\n for table in wrong_tables\n ))\n )\n continue\n\n if left_table <= join_right_tables and right_table <= join_left_tables:\n # Приведение к виду, где левая часть предиката\n # соответствует левой таблице, а правая часть\n # соответствует правой таблице\n basis.reverse()\n left_table, right_table = right_table, left_table\n left_columns, right_columns = right_columns, left_columns\n\n if len(left_table) == 1 and len(right_table) == 1:\n left_table = left_table.pop()\n right_table = right_table.pop()\n\n if left_table == right_table: # Условие на одну таблицу\n if new_basis:\n left_table.filters.append(new_basis)\n self.not_used_expression.append(i)\n for column in left_columns + right_columns:\n column.count_used -= 1\n elif (\n left_table in join_left_tables and\n right_table in join_right_tables and\n isinstance(basis.left, st.Column) and\n isinstance(basis.right, st.Column)\n ):\n # left.column = right.column\n if i in self.all_true and basis.op == ss.equals_operator: # (a.id = b.id) is True\n join_expr_equals.append((basis.left, basis.right))\n elif i in self.all_false and basis.op == ss.not_equals_operator: # (a.id != b.id) is False\n join_expr_equals.append((basis.left, basis.right))\n else:\n continue\n self.not_used_expression.append(i)\n elif len(left_table) == 1 and len(right_table) == 0:\n # tbl.column <= 10\n table = left_table.pop()\n if new_basis:\n table.filters.append(new_basis)\n self.not_used_expression.append(i)\n for column in left_columns:\n column.count_used -= 1\n elif len(left_table) == 0 and len(right_table) == 1:\n # tbl.column <= 10\n table = right_table.pop()\n if new_basis:\n table.filters.append(new_basis)\n self.not_used_expression.append(i)\n for column in right_columns:\n column.count_used -= 1\n continue\n else:\n assert len(left_columns)\n table = {\n column.table\n for column in left_columns\n }\n if len(table) == 1:\n table = table.pop()\n if new_basis:\n table.filters.append(new_basis)\n self.not_used_expression.append(i)\n for column in left_columns:\n column.count_used -= 1\n\n self.join_expr_equals = join_expr_equals\n return join_expr_equals", "def basis_classifier_right_join(self, join_left_tables, join_right_tables):\n join_left_tables = set(join_left_tables)\n join_right_tables = set(join_right_tables)\n join_expr_equals = []\n\n for i, (basis, (flag, new_basis), columns) in enumerate(zip(\n self.base_expressions,\n self.equivalent_basis,\n self.used_columns\n )):\n\n left_columns, right_columns = columns\n if isinstance(basis, expr.ComparisonPredicate):\n assert left_columns or right_columns\n left_table = {\n column.table\n for column in left_columns\n }\n right_table = {\n column.table\n for column in right_columns\n }\n wrong_tables = (left_table | right_table) - (join_left_tables | join_right_tables)\n if wrong_tables:\n Select.logger.error(\n 'The tables (%s) is not included in the join',\n ', '.join(sorted(\n '.'.join(*table.full_name())\n for table in wrong_tables\n ))\n )\n continue\n\n if left_table <= join_right_tables and right_table <= join_left_tables:\n # Приведение к виду, где левая часть предиката\n # соответствует левой таблице, а правая часть\n # соответствует правой таблице\n basis.reverse()\n left_table, right_table = right_table, left_table\n left_columns, right_columns = right_columns, left_columns\n\n if len(left_table) == 1 and len(right_table) == 1:\n left_table = left_table.pop()\n right_table = right_table.pop()\n\n if left_table == right_table and {right_table} <= join_left_tables: # Условие на одну таблицу\n if new_basis:\n left_table.filters.append(new_basis)\n self.not_used_expression.append(i)\n for column in left_columns + right_columns:\n column.count_used -= 1\n elif (\n left_table in join_left_tables and\n right_table in join_right_tables and\n isinstance(basis.left, st.Column) and\n isinstance(basis.right, st.Column)\n ):\n # left.column = right.column\n if i in self.all_true and basis.op == ss.equals_operator: # (a.id = b.id) is True\n join_expr_equals.append((basis.left, basis.right))\n elif i in self.all_false and basis.op == ss.not_equals_operator: # (a.id != b.id) is False\n join_expr_equals.append((basis.left, basis.right))\n else:\n continue\n self.not_used_expression.append(i)\n elif len(left_table) == 1 and len(right_table) == 0 and left_table <= join_left_tables:\n table = left_table.pop()\n if new_basis:\n table.filters.append(new_basis)\n self.not_used_expression.append(i)\n for column in left_columns:\n column.count_used -= 1\n continue\n else:\n assert len(left_columns)\n table = {\n column.table\n for column in left_columns\n }\n if len(table) == 1 and table <= join_left_tables:\n table = table.pop()\n if new_basis:\n table.filters.append(new_basis)\n self.not_used_expression.append(i)\n for column in left_columns:\n column.count_used -= 1\n\n self.join_expr_equals = join_expr_equals\n return join_expr_equals", "def OuterJoin(self, table, *args):\n return self.Join(table, \"FULL OUTER\", *args)", "def test_if_two_tables(table_one, table_two):\n assert left_join(table_one, table_two) == [['fond', 'enamored', 'averse'], ['guide', 'usher', 'follow'], ['diligent', 'employed', 'idle'], ['wrath', 'anger', 'deligth']]", "def perform_full_outer_join(self, other_table, key_column_names):\n header = copy.deepcopy(self.column_names)\n for column in other_table.column_names:\n if column not in header:\n header.append(column)\n \n joined_table = []\n in_both = True\n for row in range(len(self.data)):\n added_row = [\"NA\" for x in range(len(header))]\n for column in header:\n column_index = header.index(column)\n if column in self.column_names:\n \n added_row[column_index] = self.data[row][self.column_names.index(column)]\n joined_table.append(added_row)\n is_updated = False\n for row_2 in range(len(other_table.data)):\n is_updated = False\n for row_3 in range(len(joined_table)):\n in_both = True\n for key in key_column_names:\n index_1 = header.index(key)\n index_2 = other_table.column_names.index(key)\n if joined_table[row_3][index_1] != other_table.data[row_2][index_2]:\n in_both = False\n if in_both:\n is_updated = True \n for column in other_table.column_names:\n adding_index = other_table.column_names.index(column)\n joined_table[row_3][header.index(column)] = other_table.data[row_2][adding_index]\n if is_updated == False:\n added_row_2 = [\"NA\" for x in range(len(header))]\n for column_2 in header:\n column_index = header.index(column_2)\n if column_2 in other_table.column_names:\n added_row_2[column_index] = other_table.data[row_2][other_table.column_names.index(column_2)]\n joined_table.append(added_row_2)\n outer_join_table = MyPyTable(header, joined_table)\n return outer_join_table # TODO: fix this", "def perform_full_outer_join(self, other_table, key_column_names):\n other_table.reorder_key_cols(key_column_names)\n key_col_table_self = [[row[i] for i in range(len(self.column_names)) if self.column_names[i] in key_column_names] for row in self.data]\n key_col_table_other = [[row[i] for i in range(len(other_table.column_names)) if other_table.column_names[i] in key_column_names] for row in other_table.data]\n \n other_cols = [col for col in other_table.column_names if col not in key_column_names]\n overall_cols = self.column_names+other_cols\n\n table = self.data\n for i in range(len(self.data)):\n for j in range(len(other_table.data)):\n if key_col_table_self[i] == key_col_table_other[j]:\n table[i] = self.data[i]+[other_table.data[j][k] for k in range(len(other_table.column_names)) if other_table.column_names[k] not in key_column_names]\n \n for j in range(len(self.data)):\n if key_col_table_self[j] not in key_col_table_other:\n allowed = [item in self.column_names for item in overall_cols]\n replacement = []\n k = 0\n for i in range(len(allowed)):\n if allowed[i]:\n replacement += [self.data[j][k]]\n k += 1\n else:\n replacement += [\"NA\"]\n table[j] = replacement\n \n for j in range(len(other_table.data)):\n if key_col_table_other[j] not in key_col_table_self:\n allowedItems = [item in other_table.column_names for item in overall_cols]\n appendableData = []\n k = 0\n for i in range(len(allowedItems)):\n if allowedItems[i]:\n appendableData.append(other_table.data[j][k])\n k += 1\n else:\n appendableData.append(\"NA\")\n\n table.append(appendableData)\n table = [row for row in table if row != []]\n table = [[float(item) if self.isNumeric(item) else item for item in row] for row in table]\n return MyPyTable(overall_cols, table)", "def test_crossJoin(self):\n self.assertEquals(\n Select(From=self.schema.FOO.join(self.schema.BOZ)).toSQL(),\n SQLFragment(\"select * from FOO cross join BOZ\")\n )", "def outerjoin(left, right, onclause=None, full=False):\n return Join(left, right, onclause, isouter=True, full=full)", "def test_merge_to_nary_join(self):\n def in_cond_idx(position, conditions):\n for i, cond in enumerate(conditions):\n if position in cond:\n return i\n raise Exception(\"Cannot find attribute in join conditions\")\n\n # 1. triangular join\n triangle_join = testNaryJoin.get_phys_plan_root(\n \"A(x,y,z):-R(x,y),S(y,z),T(z,x)\", 64)\n # test root operator type\n self.assertIsInstance(triangle_join, algebra.NaryJoin)\n # test arity of join conditions\n self.assertEqual(len(triangle_join.conditions), 3)\n # test join conditions\n conds = []\n for cond in triangle_join.conditions:\n conds.append([attribute.position for attribute in cond])\n self.assertEqual(in_cond_idx(0, conds), in_cond_idx(5, conds))\n self.assertEqual(in_cond_idx(1, conds), in_cond_idx(2, conds))\n self.assertEqual(in_cond_idx(3, conds), in_cond_idx(4, conds))\n\n # 2. star join\n star_join = testNaryJoin.get_phys_plan_root(\n \"A(x,y,z,p):-R(x,y),S(x,z),T(x,p)\", 64)\n # test root operator type\n self.assertIsInstance(star_join, algebra.NaryJoin)\n # test arity of join conditions\n self.assertEqual(len(star_join.conditions), 1)\n # test join conditions\n conds = []\n for cond in star_join.conditions:\n conds.append([attribute.position for attribute in cond])\n self.assertEqual(in_cond_idx(0, conds), in_cond_idx(2, conds))\n self.assertEqual(in_cond_idx(2, conds), in_cond_idx(4, conds))", "def Join(self, table, condition, mode = ''):\r\n\t\tself.SetCurrMarker('join')\r\n\t\t# exec cur part\r\n\t\treturn self.AddJoin(table, condition, mode)", "def outer_su(idadf1, key1, idadf2, key2, target = None, features1 = None, features2 = None):\n target1, features1 = _check_input(idadf1, target, features1)\n target2, features2 = _check_input(idadf2, None, features2)\n \n if key1 not in idadf1.columns:\n raise ValueError(\"%s is not a column in idadf1\")\n if key2 not in idadf2.columns:\n raise ValueError(\"%s is not a column in idadf2\")\n \n condition = \"a.\\\"%s\\\" = b.\\\"%s\\\"\"%(key1,key2)\n \n if key2 in features2:\n features2.remove(key2)\n \n afeaturesas = \", \".join([\"a.\\\"%s\\\" as \\\"a.%s\\\" \"%(feature, feature) for feature in features1])\n bfeaturesas = \", \".join([\"b.\\\"%s\\\" as \\\"b.%s\\\" \"%(feature, feature) for feature in features2])\n \n selectlist = [afeaturesas, bfeaturesas]\n \n if target1 is not None:\n atargetas = \", \".join([\"a.\\\"%s\\\" as \\\"a.%s\\\" \"%(tar, tar) for tar in [target1]])\n selectlist.append(atargetas)\n atarget = \"a.\" + target1\n else:\n atarget = None\n \n abfeatures = [\"a.\" + feature for feature in features1] + [\"b.\" + feature for feature in features2]\n selectstr = \", \".join(selectlist)\n \n expression = \"SELECT %s FROM %s as a FULL OUTER JOIN %s as b ON %s\"%(selectstr, idadf1.name, idadf2.name, condition)\n \n viewname = idadf1._idadb._create_view_from_expression(expression)\n \n try:\n idadf_join = ibmdbpy.IdaDataFrame(idadf1._idadb, viewname)\n return su(idadf_join, target = atarget, features = abfeatures)\n except:\n raise\n finally:\n idadf1._idadb.drop_view(viewname)", "def addJoin(joinDef):", "def test_joinColumnSelection(self):\n self.assertEquals(\n Select(\n [self.schema.FOO.BAZ, self.schema.BOZ.QUX],\n From=self.schema.FOO.join(\n self.schema.BOZ,\n self.schema.FOO.BAR == self.schema.BOZ.QUX\n )\n ).toSQL(),\n SQLFragment(\"select BAZ, QUX from FOO join BOZ on BAR = QUX\")\n )", "def create_join_sql(center, table):\n if center != \"all\":\n return f\"JOIN centers ON {table}.member_id = centers.member_id\"\n return \"\"", "def _rewrite_join(self, node: saldag.Join):\n\n left_in_rel = node.get_left_in_rel()\n right_in_rel = node.get_right_in_rel()\n\n left_join_cols = node.left_join_cols\n right_join_cols = node.right_join_cols\n\n num_join_cols = len(left_join_cols)\n\n out_join_cols = node.out_rel.columns[:num_join_cols]\n key_cols_coll_sets = []\n for i in range(len(left_join_cols)):\n key_cols_coll_sets.append(utils.merge_coll_sets(\n left_join_cols[i].coll_sets, right_join_cols[i].coll_sets))\n out_join_cols[i].coll_sets = key_cols_coll_sets[i]\n\n abs_idx = len(left_join_cols)\n for in_col in left_in_rel.columns:\n if in_col not in set(left_join_cols):\n for key_col_coll_sets in key_cols_coll_sets:\n node.out_rel.columns[abs_idx].coll_sets = \\\n utils.merge_coll_sets(key_col_coll_sets, in_col.coll_sets)\n abs_idx += 1\n\n for in_col in right_in_rel.columns:\n if in_col not in set(right_join_cols):\n for key_col_coll_sets in key_cols_coll_sets:\n node.out_rel.columns[abs_idx].coll_sets = \\\n utils.merge_coll_sets(key_col_coll_sets, in_col.coll_sets)\n abs_idx += 1", "def _join():\n df = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],\n 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})\n other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],\n 'B': ['B0', 'B1', 'B2']})\n print(df.join(other, lsuffix='_caller', rsuffix='_other')) # 为重复 column 添加前缀\n print(df.set_index('key').join(other.set_index('key')))\n print(df.join(other.set_index('key'), on='key', how='right')) # left,right表示以哪边的index为准\n print(df.join(other.set_index('key'), on='key', how='inner')) # inner,outer 表示交集、并集", "def SetJoin(self, table, condition, mode = ''):\r\n\t\tself.data[self.mode]['join'] = []\r\n\t\treturn self.AddJoin(table, condition, mode)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Register the device with the provisioning service. This is a synchronous call, meaning that this function will not return until the registration process has completed successfully or the attempt has resulted in a failure. Before returning the client will also disconnect from the Hub. If a registration attempt is made while a previous registration is in progress it may throw an error.
def register(self): logger.info("Registering with Hub...") register_complete = Event() def on_register_complete(result=None, error=None): # This could be a failed/successful registration result from the HUB # or a error from polling machine. Response should be given appropriately if result is not None: if result.status == "assigned": logger.info("Successfully registered with Hub") else: # There be other statuses logger.error("Failed registering with Hub") if error is not None: # This can only happen when the polling machine runs into error logger.info(error) register_complete.set() self._polling_machine.register(callback=on_register_complete) register_complete.wait()
[ "def register_device():\n payload = request.get_json()\n return _register_device(payload)", "def RegisterDeviceAndSendResponse(self, msg, username):\n device_id = self.GetUniqueParam('deviceid')\n if not device_id:\n return (400, 'Missing device identifier')\n\n token_info = self.server.RegisterDevice(\n device_id, msg.machine_id, msg.type, username)\n\n # Send back the reply.\n response = dm.DeviceManagementResponse()\n response.register_response.device_management_token = (\n token_info['device_token'])\n response.register_response.machine_name = token_info['machine_name']\n response.register_response.enrollment_type = token_info['enrollment_mode']\n\n return (200, response)", "def Register(cls, client, user_id, device_dict, is_first=True):\r\n assert 'device_id' in device_dict, device_dict\r\n\r\n device = yield gen.Task(Device.Query,\r\n client,\r\n user_id,\r\n device_dict['device_id'],\r\n None,\r\n must_exist=False)\r\n if device is None:\r\n device = Device.Create(user_id=user_id, timestamp=util.GetCurrentTimestamp(), **device_dict)\r\n else:\r\n device.UpdateFields(**device_dict)\r\n\r\n yield gen.Task(device.Update, client)\r\n\r\n # If this is the first mobile device to be registered, then turn turn off email alerting\r\n # and turn on full push alerting to mobile devices.\r\n if is_first:\r\n settings = AccountSettings.CreateForUser(user_id,\r\n email_alerts=AccountSettings.EMAIL_NONE,\r\n sms_alerts=AccountSettings.SMS_NONE,\r\n push_alerts=AccountSettings.PUSH_ALL)\r\n yield gen.Task(settings.Update, client)\r\n\r\n raise gen.Return(device)", "def register(self):\n if self.hub.is_connected:\n if self._private_key is not None:\n raise SAMPClientError(\"Client already registered\")\n\n result = self.hub.register(self.hub.lockfile[\"samp.secret\"])\n\n if result[\"samp.self-id\"] == \"\":\n raise SAMPClientError(\n \"Registration failed - samp.self-id was not set by the hub.\"\n )\n\n if result[\"samp.private-key\"] == \"\":\n raise SAMPClientError(\n \"Registration failed - samp.private-key was not set by the hub.\"\n )\n\n self._public_id = result[\"samp.self-id\"]\n self._private_key = result[\"samp.private-key\"]\n self._hub_id = result[\"samp.hub-id\"]\n\n if self._callable:\n self._set_xmlrpc_callback()\n self._declare_subscriptions()\n\n if self._metadata != {}:\n self.declare_metadata()\n\n self._is_registered = True\n\n else:\n raise SAMPClientError(\n \"Unable to register to the SAMP Hub. Hub proxy not connected.\"\n )", "def registerDevice(self):\n\t\tr = req.post(\"http://localhost:9090/devices?id={}&sensors={}_{}&board={}\".format(\n\t\t\tBOARD_ID,\n\t\t\tSENSOR1,\n\t\t\tSENSOR2,\n\t\t\tBOARD\n\t\t))\n\t\tprint (\"[{}] Device Registered on Room Catalog\".format(\n\t\t\tint(time.time()),\n\t\t))", "async def _perform_register(self):\n data = {\"username\": self.user, \"password\": self.password}\n return await self._perform_request(\"register\", data, lambda r: r.text())", "def RegisterDevice(self, device_id, machine_id, type, username):\n dmtoken_chars = []\n while len(dmtoken_chars) < 32:\n dmtoken_chars.append(random.choice('0123456789abcdef'))\n dmtoken = ''.join(dmtoken_chars)\n allowed_policy_types = {\n dm.DeviceRegisterRequest.BROWSER: [\n 'google/chrome/user',\n 'google/chrome/extension'\n ],\n dm.DeviceRegisterRequest.USER: [\n 'google/chromeos/user',\n 'google/chrome/extension'\n ],\n dm.DeviceRegisterRequest.DEVICE: [\n 'google/chromeos/device',\n 'google/chromeos/publicaccount',\n 'google/chrome/extension',\n 'google/chromeos/signinextension'\n ],\n dm.DeviceRegisterRequest.ANDROID_BROWSER: [\n 'google/android/user'\n ],\n dm.DeviceRegisterRequest.TT: ['google/chromeos/user',\n 'google/chrome/user'],\n }\n if machine_id in KIOSK_MACHINE_IDS:\n enrollment_mode = dm.DeviceRegisterResponse.RETAIL\n else:\n enrollment_mode = dm.DeviceRegisterResponse.ENTERPRISE\n self._registered_tokens[dmtoken] = {\n 'device_id': device_id,\n 'device_token': dmtoken,\n 'allowed_policy_types': allowed_policy_types[type],\n 'machine_name': 'chromeos-' + machine_id,\n 'machine_id': machine_id,\n 'enrollment_mode': enrollment_mode,\n 'username': username,\n }\n self.WriteClientState()\n return self._registered_tokens[dmtoken]", "def handle_registration(self, message):\n\n if 'device_id' not in message:\n send_error('Missing device id in registration request')\n return\n\n device_id = message['device_id']\n if message['device_id'] in devices:\n send_error(f'Device with id {device_id} already exists.')\n elif 'device_info' not in message:\n send_error('Missing device info in registration request.')\n else:\n devices[device_id] = self\n self.device_id = device_id\n self.device_info = message['device_info']\n send_server_config(self.ws)\n print(f'Registered device with id {device_id}')", "def register_device(project_id, credentials, device_model_id, device_id):\n base_url = '/'.join([DEVICE_API_URL, 'projects', project_id, 'devices'])\n device_url = '/'.join([base_url, device_id])\n session = google.auth.transport.requests.AuthorizedSession(credentials)\n r = session.get(device_url)\n print(device_url, r.status_code)\n if r.status_code == 404:\n print('Registering....', end='', flush=True)\n r = session.post(base_url, data=json.dumps({\n 'id': device_id,\n 'model_id': device_model_id,\n 'client_type': 'SDK_LIBRARY'\n }))\n if r.status_code != 200:\n raise Exception('failed to register device: ' + r.text)\n print('\\rDevice registered.')", "async def _async_register(self) -> None: # pragma: no cover\n metadata = aioxmpp.make_security_layer(None, no_verify=not self.verify_security)\n query = ibr.Query(self.jid.localpart, self.password)\n _, stream, features = await aioxmpp.node.connect_xmlstream(self.jid, metadata)\n await ibr.register(stream, query)", "def register_joining_device_async(self, registrant_address, options, key):\n if registrant_address is None:\n raise ValueError(\"Registrant address cannot be ``None``.\")\n if options is None:\n raise ValueError(\"Options cannot be ``None``.\")\n\n packet_to_send = RegisterJoiningDevicePacket(self.get_next_frame_id(),\n registrant_address,\n options,\n key)\n self.send_packet(packet_to_send, sync=True)", "def registerDevice(device, device_config):\n raise NotImplementedError(\"All inherited classes of DeviceRegisterer must implement registerDevice.\")", "def async_register(\n hass: HomeAssistant, register: system_health.SystemHealthRegistration\n) -> None:\n register.async_register_info(system_health_info)", "async def _async_register(self): # pragma: no cover\n metadata = aioxmpp.make_security_layer(None, no_verify=not self.verify_security)\n query = ibr.Query(self.jid.localpart, self.password)\n _, stream, features = await aioxmpp.node.connect_xmlstream(\n self.jid, metadata, loop=self.loop\n )\n await ibr.register(stream, query)", "def _write_register(self, value):\n try:\n self._hub.write_register(self._slave, self._register, value)\n except ConnectionException:\n self._available = False\n return\n\n self._available = True", "def register_device(self,product_key,device_name=None):\n request_aliyun = RegisterDeviceRequest()\n request_aliyun.set_accept_format('json')\n request_aliyun.set_ProductKey(product_key)\n if device_name is not None:\n request_aliyun.set_DeviceName(device_name)\n response = self.client.do_action_with_exception(request_aliyun)\n response = response.decode('utf8')\n response = json.loads(response)\n if response.get('Success'):\n return response['Data']\n return None", "def register_to_core(self):\n self.channel.basic_publish(exchange='', routing_key='peripheral_register', body=json.dumps({self.name: api}))", "def _process_registration(self):\n # We are registering a new device.\n name = self.data[1]\n\n # First find the start and end index of the input services.\n input_services = []\n input_services_start = 3\n input_services_end = 3 + int(self.data[2])\n for input_service in self.data[input_services_start:input_services_end]:\n # Each service will be an ID that relates to a specific service.\n input_services.append(int(input_service))\n\n # Then find the start and end index of the output services.\n output_services = []\n output_services_start = 3 + int(self.data[2]) + 1\n output_services_end = output_services_start + int(self.data[input_services_end])\n for output_service in self.data[output_services_start:output_services_end]:\n output_services.append(int(output_service))\n\n try:\n # Check to see if the peripheral already exists.\n peripheral = Peripheral.objects.get(address=self.address, queue=self.queue)\n\n # The peripheral already exists so lets make sure the name is correct.\n if peripheral.name != name:\n peripheral.name = name\n peripheral.save()\n\n except Peripheral.DoesNotExist:\n # The peripheral did not exist so we'll create a new one.\n peripheral = Peripheral(queue=self.queue, address=self.address, name=name)\n peripheral.save()\n\n try:\n # Then we will and and remove any services that are necessary.\n Protocol._add_remove_services(peripheral, PeripheralService.INPUT, input_services)\n Protocol._add_remove_services(peripheral, PeripheralService.OUTPUT, output_services)\n except ProtocolException as exception:\n return {'success': False, 'message': exception.args}\n\n # If we made it here then the peripheral was created or updated successfully.\n return {'success': True, 'message': 'Peripheral updated successfully.'}", "def device_registration_push(self,device = None):\n try:\n primary_key = \"aaabbbcccdddeeefffggghhhiiijjjkkklllmmmnnnoo\"\n secondary_key = \"111222333444555666777888999000aaabbbcccdddee\"\n device_id = self.redis_obj_register.get(device)\n info = self.redis_obj_register.get(str(device_id)+\"azure_device_info\")\n if device_id and info:\n return device_id\n else:\n gateway = self.config_obj.gateway()\n device_registration = self.config_obj.device_registration()\n user_authentication = self.config_obj.user_authentication()\n iot_hub = self.config_obj.iothub()\n data= {'device_id': device,\n 'gateway_id' : gateway['gateway_id'],\n 'protocol': iot_hub['protocol'],\n 'access_key' : user_authentication['access_key'], \n }\n headers = {\n 'token': user_authentication['token'],\n 'user_id': user_authentication['user_id'],\n 'user_key': user_authentication['user_key']\n }\n #print device_registration['device_registation_url'],data\n response = requests.post(device_registration['device_registation_url'],\\\n data = data,verify=False)\n if response.status_code == 401:\n authData = {'user_name':user_authentication['user_id'],\n 'password':user_authentication['password'],\n 'application_id' : user_authentication['application_id']\n }\n authResponse = requests.post(user_authentication['Auth_URL'],data = authData, headers=headers, verify=False) \n authResponseData = json.loads(authResponse.content)\n if authResponseData['valid']:\n authResponseData = authResponseData['object']\n token = authResponseData['access_token']\n user_key = authResponseData['userKey']\n access_key = authResponseData['access_key']\n self.config_obj.write_user_authentication(token, user_key, access_key)\n# data['token'] = authResponseData['access_token']\n# data['userKey'] = authResponseData['userKey']\n# data['access_key'] = authResponseData['access_key']\n headers['user_key'] = authResponseData['userKey']\n headers['token'] = authResponseData['access_token']\n response = requests.post(device_registration['device_registation_url'],\\\n data = data, headers=headers, verify=False)\n response_data = json.loads(response.content)\n print \"response_data\",response_data\n self.logger.error(\"--- Registration response --- :{0}\".format(str(response_data)))\n if response_data['valid'] and len(response_data['object']) > 0 :\n \n device_id = response_data['object']\n id = device_id[0]['id']\n print \"id\",id\n deviceId = get_device(id)\n print deviceId\n if deviceId:\n print \"device_id\"\n device_info = \"HostName=ttpliot.azure-devices.net\" + \";\" + str(deviceId) +\";\" + str(primary_key)\n self.redis_obj_register.set(device, device_id[0]['id'])\n self.redis_obj_register.set(str(device_id[0]['id'])+ \"azure_device_info\", device_info)\n else:\n print \"device_id[0]['id']\",device_id[0]['id'],primary_key,secondary_key\n device_info = iothub_create_device(device_id[0]['id'],primary_key,secondary_key)\n self.redis_obj_register.set(device, device_id[0]['id'])\n self.redis_obj_register.set(str(device_id)+\"azure_device_info\", device_info)\n else:\n self.logger.error(\"Device Registration fail %s\", response.text)\n return None\n except Exception as e:\n self.logger.exception('Exception in device_registration_push: %s', e)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This is a synchronous call, meaning that this function will not return until the cancellation process has completed successfully or the attempt has resulted in a failure. Before returning the client will also disconnect from the Hub. In case there is no registration in process it will throw an error as there is no registration process to cancel.
def cancel(self): logger.info("Cancelling the current registration process") cancel_complete = Event() def on_cancel_complete(): cancel_complete.set() logger.info("Successfully cancelled the current registration process") self._polling_machine.cancel(callback=on_cancel_complete) cancel_complete.wait()
[ "def disconnect(self):\n if self.is_connected:\n try:\n self.client.unregister()\n finally:\n if self.client.is_running:\n self.client.stop()\n self.hub.disconnect()", "async def async_cancel(self):\n raise NotImplementedError", "async def test_channel_cancel_tasks_on_disconnect(self):\n await self.http_client_connection.connect()\n request_http_message, _ = self.http_dialogs.create(\n counterparty=self.connection_address,\n performative=HttpMessage.Performative.REQUEST,\n method=\"get\",\n url=\"https://not-a-google.com\",\n headers=\"\",\n version=\"\",\n body=b\"\",\n )\n request_envelope = Envelope(\n to=self.connection_address,\n sender=self.client_skill_id,\n protocol_specification_id=UNKNOWN_PROTOCOL_PUBLIC_ID,\n message=request_http_message,\n )\n\n connection_response_mock = Mock()\n connection_response_mock.status_code = 200\n\n response_mock = Mock()\n response_mock.status = 200\n response_mock.headers = {\"headers\": \"some header\"}\n response_mock.reason = \"OK\"\n response_mock._body = b\"Some content\"\n response_mock.read.return_value = asyncio.Future()\n\n with patch.object(\n aiohttp.ClientSession,\n \"request\",\n return_value=_MockRequest(response_mock),\n ):\n await self.http_client_connection.send(envelope=request_envelope)\n\n assert self.http_client_connection.channel._tasks\n task = list(self.http_client_connection.channel._tasks)[0]\n assert not task.done()\n await self.http_client_connection.disconnect()\n\n assert not self.http_client_connection.channel._tasks\n assert task.done()\n with pytest.raises(CancelledError):\n await task", "def register(self):\n if self.hub.is_connected:\n if self._private_key is not None:\n raise SAMPClientError(\"Client already registered\")\n\n result = self.hub.register(self.hub.lockfile[\"samp.secret\"])\n\n if result[\"samp.self-id\"] == \"\":\n raise SAMPClientError(\n \"Registration failed - samp.self-id was not set by the hub.\"\n )\n\n if result[\"samp.private-key\"] == \"\":\n raise SAMPClientError(\n \"Registration failed - samp.private-key was not set by the hub.\"\n )\n\n self._public_id = result[\"samp.self-id\"]\n self._private_key = result[\"samp.private-key\"]\n self._hub_id = result[\"samp.hub-id\"]\n\n if self._callable:\n self._set_xmlrpc_callback()\n self._declare_subscriptions()\n\n if self._metadata != {}:\n self.declare_metadata()\n\n self._is_registered = True\n\n else:\n raise SAMPClientError(\n \"Unable to register to the SAMP Hub. Hub proxy not connected.\"\n )", "def test_cancel_sync_handle_call_during_execution(serve_instance):\n running_signal_actor = SignalActor.remote()\n cancelled_signal_actor = SignalActor.remote()\n\n @serve.deployment\n class Ingress:\n async def __call__(self, *args):\n await running_signal_actor.send.remote()\n await send_signal_on_cancellation(cancelled_signal_actor)\n\n h = serve.run(Ingress.bind()).options(use_new_handle_api=True)\n\n # Send a request and wait for it to start executing.\n r = h.remote()\n ray.get(running_signal_actor.wait.remote(), timeout=10)\n\n # Cancel it and verify that it is cancelled via signal.\n r.cancel()\n ray.get(cancelled_signal_actor.wait.remote(), timeout=10)\n\n with pytest.raises(ray.exceptions.TaskCancelledError):\n r.result()", "def RegisterClientByPhone(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def register(self):\n logger.info(\"Registering with Hub...\")\n register_complete = Event()\n\n def on_register_complete(result=None, error=None):\n # This could be a failed/successful registration result from the HUB\n # or a error from polling machine. Response should be given appropriately\n if result is not None:\n if result.status == \"assigned\":\n logger.info(\"Successfully registered with Hub\")\n else: # There be other statuses\n logger.error(\"Failed registering with Hub\")\n if error is not None: # This can only happen when the polling machine runs into error\n logger.info(error)\n\n register_complete.set()\n\n self._polling_machine.register(callback=on_register_complete)\n\n register_complete.wait()", "def test_cancel_async_handle_call_during_execution(serve_instance):\n running_signal_actor = SignalActor.remote()\n cancelled_signal_actor = SignalActor.remote()\n\n @serve.deployment\n class Downstream:\n async def __call__(self, *args):\n await running_signal_actor.send.remote()\n await send_signal_on_cancellation(cancelled_signal_actor)\n\n @serve.deployment\n class Ingress:\n def __init__(self, handle):\n self._h = handle.options(use_new_handle_api=True)\n\n async def __call__(self, *args):\n # Send a request and wait for it to start executing.\n r = self._h.remote()\n await running_signal_actor.wait.remote()\n\n # Cancel it and verify that it is cancelled via signal.\n r.cancel()\n await cancelled_signal_actor.wait.remote()\n\n with pytest.raises(ray.exceptions.TaskCancelledError):\n await r\n\n h = serve.run(Ingress.bind(Downstream.bind())).options(use_new_handle_api=True)\n h.remote().result() # Would raise if test failed.", "def test_cancel_sync_handle_call_during_assignment(serve_instance):\n signal_actor = SignalActor.remote()\n\n @serve.deployment(max_concurrent_queries=1)\n class Ingress:\n def __init__(self):\n self._num_requests = 0\n\n async def __call__(self, *args):\n self._num_requests += 1\n await signal_actor.wait.remote()\n\n return self._num_requests\n\n h = serve.run(Ingress.bind()).options(use_new_handle_api=True)\n\n # Send a request and wait for it to be ongoing so we know that further requests\n # will block trying to assign a replica.\n initial_response = h.remote()\n wait_for_condition(lambda: ray.get(signal_actor.cur_num_waiters.remote()) == 1)\n\n # Make a second request, cancel it, and verify that it is cancelled.\n second_response = h.remote()\n second_response.cancel()\n with pytest.raises(concurrent.futures.CancelledError):\n second_response.result()\n\n # Now signal the initial request to finish and check that the second request\n # never reached the replica.\n ray.get(signal_actor.send.remote())\n assert initial_response.result() == 1\n for i in range(2, 12):\n assert h.remote().result() == i", "def testSynchronousConfirmFailure(self):\n # Subscribe\n sub_key = Subscription.create_key_name(self.callback, self.topic)\n self.assertTrue(Subscription.get_by_key_name(sub_key) is None)\n urlfetch_test_stub.instance.expect('get',\n self.verify_callback_querystring_template % 'subscribe', 500, '')\n self.handle('post',\n ('hub.callback', self.callback),\n ('hub.topic', self.topic),\n ('hub.mode', 'subscribe'),\n ('hub.verify', 'sync'),\n ('hub.verify_token', self.verify_token))\n self.assertTrue(Subscription.get_by_key_name(sub_key) is None)\n self.assertTrue(db.get(KnownFeed.create_key(self.topic)) is None)\n self.assertEquals(409, self.response_code())\n\n # Unsubscribe\n Subscription.insert(self.callback, self.topic, self.verify_token, 'secret')\n urlfetch_test_stub.instance.expect('get',\n self.verify_callback_querystring_template % 'unsubscribe', 500, '')\n self.handle('post',\n ('hub.callback', self.callback),\n ('hub.topic', self.topic),\n ('hub.mode', 'unsubscribe'),\n ('hub.verify', 'sync'),\n ('hub.verify_token', self.verify_token))\n self.assertTrue(Subscription.get_by_key_name(sub_key) is not None)\n self.assertEquals(409, self.response_code())", "def guiding_disconnect():\r\n try:\r\n app.guider.disconnect()\r\n return jsonify({\"status\": True})\r\n except Exception as e:\r\n return jsonify(\r\n {\"status\": False, \"error\": \"Failed disconnecting from guider: %s\" % e}\r\n )", "def testClientAPICancel(self):\n results = []\n\n def handle_canceled(iq, session):\n for item in session['custom_data']:\n results.append(item)\n\n def handle_step1(iq, session):\n session['custom_data'].append('bar')\n session['next'] = handle_canceled\n self.xmpp['xep_0050'].cancel_command(session)\n\n session = {'custom_data': ['foo'],\n 'next': handle_step1}\n\n self.xmpp['xep_0050'].start_command(\n 'foo@example.com',\n 'test_client',\n session)\n\n self.send(\"\"\"\n <iq id=\"1\" to=\"foo@example.com\" type=\"set\">\n <command xmlns=\"http://jabber.org/protocol/commands\"\n node=\"test_client\"\n action=\"execute\" />\n </iq>\n \"\"\")\n\n self.recv(\"\"\"\n <iq id=\"1\" to=\"foo@example.com\" type=\"result\">\n <command xmlns=\"http://jabber.org/protocol/commands\"\n node=\"test_client\"\n sessionid=\"_sessionid_\"\n status=\"executing\">\n <x xmlns=\"jabber:x:data\" type=\"form\">\n <field var=\"foo\" type=\"text-single\" />\n </x>\n </command>\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq id=\"2\" to=\"foo@example.com\" type=\"set\">\n <command xmlns=\"http://jabber.org/protocol/commands\"\n node=\"test_client\"\n sessionid=\"_sessionid_\"\n action=\"cancel\" />\n </iq>\n \"\"\")\n\n self.recv(\"\"\"\n <iq id=\"2\" to=\"foo@example.com\" type=\"result\">\n <command xmlns=\"http://jabber.org/protocol/commands\"\n node=\"test_client\"\n sessionid=\"_sessionid_\"\n status=\"canceled\" />\n </iq>\n \"\"\")\n\n # Give the event queue time to process\n time.sleep(0.3)\n\n self.failUnless(results == ['foo', 'bar'],\n 'Incomplete command workflow: %s' % results)", "async def _connect(self):\n \n try:\n while True:\n ready_state = self.ready_state\n if (ready_state is not None):\n self.ready_state = None\n ready_state.cancel()\n ready_state = None\n \n try:\n await self.gateway.run()\n except (GeneratorExit, CancelledError) as err:\n # For now only here. These errors occurred randomly for me since I made the wrapper, only once-once,\n # and it was not the wrapper causing them, so it is time to say STOP.\n # I also know `GeneratorExit` will show up as RuntimeError, but it is already a RuntimeError.\n try:\n await write_exception_async(\n err,\n [\n 'Ignoring unexpected outer Task or coroutine cancellation at ',\n repr(self),\n '._connect:\\n',\n ],\n loop = KOKORO,\n )\n except (GeneratorExit, CancelledError) as err:\n sys.stderr.write(\n f'Ignoring unexpected outer Task or coroutine cancellation at {self!r}._connect as '\n f'{err!r} meanwhile rendering an exception for the same reason.\\n'\n f'The client will reconnect.\\n'\n )\n continue\n \n except DiscordGatewayException as err:\n if err.code in RESHARD_ERROR_CODES:\n sys.stderr.write(\n f'{err.__class__.__name__} occurred, at {self!r}._connect:\\n'\n f'{err!r}\\n'\n f'The client will reshard itself and reconnect.\\n'\n )\n \n await self.client_gateway_reshard(force=True)\n continue\n \n raise\n \n else:\n if not self.running:\n break\n \n while True:\n try:\n await sleep(5.0, KOKORO)\n try:\n # We are down, why not reshard instantly?\n await self.client_gateway_reshard()\n except ConnectionError:\n continue\n else:\n break\n except (GeneratorExit, CancelledError) as err:\n try:\n write_exception_async(\n err,\n [\n 'Ignoring unexpected outer Task or coroutine cancellation at ',\n repr(self),\n '._connect:\\n',\n ],\n loop = KOKORO,\n )\n except (GeneratorExit, CancelledError) as err:\n sys.stderr.write(\n f'Ignoring unexpected outer Task or coroutine cancellation at {self!r}._connect as '\n f'{err!r} meanwhile rendering an exception for the same reason.\\n'\n f'The client will reconnect.\\n'\n )\n continue\n continue\n except BaseException as err:\n if (\n isinstance(err, InvalidToken) or\n (\n isinstance(err, DiscordGatewayException) and\n (err.code in INTENT_ERROR_CODES)\n )\n ):\n sys.stderr.write(\n f'{err.__class__.__name__} occurred, at {self!r}._connect:\\n'\n f'{err!r}\\n'\n )\n else:\n write_exception_async(\n err,\n [\n 'Unexpected exception occurred at ',\n repr(self),\n '._connect\\n',\n ],\n (\n 'If you can reproduce this bug, Please send me a message or open an issue with your code, and '\n 'with every detail how to reproduce it.\\n'\n 'Thanks!\\n'\n ),\n loop = KOKORO,\n )\n \n await ensure_shutdown_event_handlers(self)\n \n finally:\n try:\n await self.gateway.close()\n finally:\n unregister_client(self)\n self.running = False\n \n if not self.guild_profiles:\n return\n \n to_remove = []\n for guild_id in self.guild_profiles.keys():\n try:\n guild = GUILDS[guild_id]\n except KeyError:\n continue\n \n guild._delete(self)\n if not guild.partial:\n continue\n \n to_remove.append(guild_id)\n \n if to_remove:\n for guild_id in to_remove:\n try:\n del self.guild_profiles[guild_id]\n except KeyError:\n pass\n \n # need to delete the references for cleanup\n guild = None\n to_remove = None\n \n ready_state = self.ready_state\n if (ready_state is not None):\n self.ready_state = None\n ready_state.cancel()\n ready_state = None", "def attempt_to_register(self, message: Message):\n\t\tlogger.info(\"Attempting to register client.\")\n\n\t\tsuccessful_parse = re.match(r'\\/regi (.{1,30})', message.body)\n\n\t\tif successful_parse and self.validate_name(successful_parse.group(1)):\n\t\t\tlogger.info(\"Client successfully registered.\")\n\t\t\tself.registry.register(successful_parse.group(1), message.sender)\n\t\telse:\n\t\t\tlogger.info(\"Client not registered\") # Ignore the message", "def test_cancel_async_handle_call_during_assignment(serve_instance):\n signal_actor = SignalActor.remote()\n\n @serve.deployment(max_concurrent_queries=1)\n class Downstream:\n def __init__(self):\n self._num_requests = 0\n\n async def __call__(self, *args):\n self._num_requests += 1\n await signal_actor.wait.remote()\n\n return self._num_requests\n\n @serve.deployment\n class Ingress:\n def __init__(self, handle):\n self._h = handle.options(use_new_handle_api=True)\n\n async def __call__(self, *args):\n # Send a request and wait for it to be ongoing so we know that further\n # requests will block trying to assign a replica.\n initial_response = self._h.remote()\n\n async def one_waiter():\n return await signal_actor.cur_num_waiters.remote() == 1\n\n await async_wait_for_condition(one_waiter)\n\n # Make a second request, cancel it, and verify that it is cancelled.\n second_response = self._h.remote()\n second_response.cancel()\n with pytest.raises(asyncio.CancelledError):\n await second_response\n\n # Now signal the initial request to finish and check that the second request\n # never reached the replica.\n await signal_actor.send.remote()\n assert await initial_response == 1\n for i in range(2, 12):\n assert await self._h.remote() == i\n\n h = serve.run(Ingress.bind(Downstream.bind())).options(use_new_handle_api=True)\n h.remote().result() # Would raise if test failed.", "async def shutdown(self) -> None:\n\n # We only need to delete our test channels once, so we'll let the primary test\n # client take care of it.\n if self.is_primary and self.channel_send is not None:\n await self.channel_send.delete(reason=\"Tests complete\")\n await self.channel_bulletin.delete(reason=\"Tests complete\")\n\n await self.client.logout()\n\n logging.info(\"client shut down\")\n\n return", "async def unregister_client(connection):\n if connection.uuid in connections:\n connections.pop(connection.uuid)\n messages_to_clients.pop(connection.uuid)\n\n await connection.notify_disconnected()", "def unregister(self):\n if self.hub.is_connected:\n self._is_registered = False\n self.hub.unregister(self._private_key)\n self._hub_id = None\n self._public_id = None\n self._private_key = None\n else:\n raise SAMPClientError(\n \"Unable to unregister from the SAMP Hub. Hub proxy not connected.\"\n )", "def disconnect(self):\n from ray.util.client.api import _ClientAPI\n\n if self.client_worker is not None:\n self.client_worker.close()\n self.api = _ClientAPI()\n self.client_worker = None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ask a yes/no/quit question via raw_input() and return their answer. "question" is a string that is presented to the user. "default" is the presumed answer if the user just hits . It must be "yes" (the default), "no", "quit" or None (meaning an answer is required of the user). The "answer" return value is one of "yes", "no" or "quit".
def query_yes_no_quit(question, default="yes"): valid = {"yes":"yes", "y":"yes", "ye":"yes", "no":"no", "n":"no", "quit":"quit", "qui":"quit", "qu":"quit", "q":"quit"} if default == None: prompt = " [y/n/q] " elif default == "yes": prompt = " [Y/n/q] " elif default == "no": prompt = " [y/N/q] " elif default == "quit": prompt = " [y/n/Q] " else: raise ValueError("invalid default answer: '%s'" % default) while 1: sys.stdout.write(question + prompt) choice = raw_input().lower() if default is not None and choice == '': return default elif choice in valid.keys(): return valid[choice] else: sys.stdout.write("Please respond with 'yes', 'no' or 'quit'.\n")
[ "def query_yes_no_quit(question, default=\"yes\"):\n valid = {\"yes\":\"yes\", \"y\":\"yes\", \"ye\":\"yes\",\n \"no\":\"no\", \"n\":\"no\",\n \"quit\":\"quit\", \"qui\":\"quit\", \"qu\":\"quit\", \"q\":\"quit\"}\n if default == None:\n prompt = \" [y/n/q] \"\n elif default == \"yes\":\n prompt = \" [Y/n/q] \"\n elif default == \"no\":\n prompt = \" [y/N/q] \"\n elif default == \"quit\":\n prompt = \" [y/n/Q] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while 1:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return default\n elif choice in valid.keys():\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes', 'no' or 'quit'.\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":\"yes\", \"y\":\"yes\", \"ye\":\"yes\",\n \"no\":\"no\", \"n\":\"no\"}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n while 1:\n sys.stdout.write(question + prompt)\n if sys.version_info[0]==2:\n choice = raw_input().lower()\n elif sys.version_info[0]>2:\n choice = input().lower()\n if default is not None and choice == '':\n return default\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":\"yes\", \"y\":\"yes\", \"ye\":\"yes\",\n \"no\":\"no\", \"n\":\"no\"}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while 1:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return default\n elif choice in valid.keys():\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n\tvalid = {\"yes\": True, \"y\": True, \"ye\": True,\n\t\t\t \"no\": False, \"n\": False}\n\tif default is None:\n\t\tprompt = \" [y/n] \"\n\telif default == \"yes\":\n\t\tprompt = \" [Y/n] \"\n\telif default == \"no\":\n\t\tprompt = \" [y/N] \"\n\telse:\n\t\traise ValueError(\"invalid default answer: '%s'\" % default)\n\n\twhile True:\n\t\tsys.stdout.write(question + prompt)\n\t\ttry:\n\t\t\tchoice = raw_input().lower()\n\t\texcept:\n\t\t\tchoice = input().lower()\n\n\t\tif default is not None and choice == '':\n\t\t\treturn valid[default]\n\t\telif choice in valid:\n\t\t\treturn valid[choice]\n\t\telse:\n\t\t\tsys.stdout.write(\"Please respond with 'yes' or 'no' \"\n\t\t\t\t\t\t\t \"(or 'y' or 'n').\\n\")", "def query_yes_no(self,question, default=\"yes\"):\n valid = {\"yes\":\"yes\", \"y\":\"yes\", \"ye\":\"yes\", \"no\":\"no\", \"n\":\"no\"}\n\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while 1:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return default\n elif choice in valid.keys():\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' (or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"no\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = { \"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False }\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def yes_no_query(question, default=None, interrupt=None):\n valid_answers = {'y': True, 'n': False, 'yes': True, 'no': False}\n default_dict = { # default => prompt default string\n None: \"[y/n]\",\n True: \"[Y/n]\",\n False: \"[y/N]\",\n }\n\n # validate input parameters\n if default not in default_dict:\n raise ValueError(\"Invalid value for parameter 'default': '%s'. Possible values: [%s]\"\n % (default, ','.join(map(str, default_dict.keys()))))\n if interrupt not in default_dict:\n raise ValueError(\"Invalid value for parameter 'interrupt': '%s'. Possible values: [%s]\"\n % (interrupt, ','.join(map(str, default_dict.keys()))))\n\n prompt_str = \"%s %s \" % (question, default_dict[default])\n\n # check user input\n answer = None\n while answer not in valid_answers:\n try:\n answer = (raw_input(prompt_str) if PY2 else input(prompt_str)).strip().lower() # noqa\n # response was an empty string and default value is set\n if not answer and isinstance(default, bool):\n return default\n except KeyboardInterrupt:\n if isinstance(interrupt, bool):\n print()\n return interrupt\n except EOFError:\n if isinstance(default, bool):\n print()\n return default\n else:\n raise\n\n return valid_answers[answer]", "def query_user_bool(question, default=True):\n\n valid_yes_ans = [\"yes\", \"y\"]\n valid_no_ans = [\"no\", \"n\"]\n\n if default is None:\n prompt = \" [y/n] \"\n elif default:\n prompt = \" [Y/n] \"\n else:\n prompt = \" [y/N] \"\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n\n if default is not None and choice == '':\n return default\n\n if choice in valid_yes_ans:\n return True\n\n if choice in valid_no_ans:\n return False\n\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")", "def query_input(question, default=None, color=default_color):\n if default is None or default == '':\n prompt = ' '\n elif type(default) == str:\n prompt = flo(' [{default}] ')\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(color(question + prompt))\n choice = raw_input()\n if default is not None and choice == '':\n return default\n if choice != '':\n return choice", "def ask_question(msg, answers=\"[yes/No]\", default=\"no\"):\n if answers[0] != '[' or answers[-1] != ']':\n msg = \"%s wrongly specified, should be in [] separated by /\" % answers\n raise ValueError(msg)\n\n answer_list = answers[1:-1].split('/')\n \n if len(answer_list) < 2:\n raise ValueError(\"Too few possible answers: %s\" % answers)\n \n answer_list = [item.lower() for item in answer_list[:]]\n default = default.lower()\n \n if default not in answer_list:\n raise ValueError(\"Default answer %s not among answers: %s\" % (default,\n answers))\n \n print_out = \"%s %s: \" % (msg, answers)\n print print_out,\n \n inpt = None\n while inpt == None:\n try:\n inpt = raw_input()\n except KeyboardInterrupt:\n print_msg_exit(\" KeyboardInterrupt, exit.\", exit_code=1)\n except Exception, ex:\n print ex\n inpt = None\n print(\" Couldn't recognize the answer, try again.\")\n print print_out,\n else:\n inpt = inpt.lower()\n # finally, check what the user answered \n for i in range(len(answer_list)):\n if inpt == answer_list[i][0] or inpt == answer_list[i]:\n return answer_list[i]\n else:\n if inpt == '':\n return default\n else:\n inpt = None\n print \" Couldn't recognize the answer, try again.\"\n print print_out,", "def query_yes_no(question, default=\"yes\", force=False):\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\n \"no\":False, \"n\":False}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n if not force:\n choice = raw_input().lower()\n else:\n choice = \"yes\"\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")", "def ask(question, options, default):\n assert default in options\n\n question += \" ({})? \".format(\"/\".join(o.upper() if o == default else o for o in options))\n selected = None\n while selected not in options:\n selected = input(question).strip().lower()\n if selected == \"\":\n selected = default\n else:\n if selected not in options:\n question = \"Please type '{}'{comma} or '{}': \".format(\n \"', '\".join(options[:-1]), options[-1],\n comma=',' if len(options) > 2 else '',\n )\n return selected", "def input_with_default(prompt, default):\n response = raw_input(\"%s (Default %s) \"%(prompt, default))\n if not response:\n return default\n return response", "def getUserInput(self, prompt, default=\"\"):\n accept = \"n\"\n inp = \"\"\n while accept == \"n\" or accept == \"N\":\n inp = raw_input(\"\\n\" + prompt)\n if len(inp.strip()) == 0:\n inp = default\n accept = raw_input(\"Your choice: '%s'. Is this correct? Y/n: \" % inp)\n return inp", "def boolean_input(self, question, default=False):\n if default is None:\n yes_no = \"y/n\"\n default_text = None\n elif default:\n yes_no = \"[Y/n]\"\n default_text = 'y'\n else:\n yes_no = \"[y/N]\"\n default_text = 'n'\n\n prompt = \"{question} {yes_no}? \".format(question=question, yes_no=yes_no)\n\n result = self.selection_input(\n prompt=prompt,\n choices=['y', 'n'],\n default=default_text,\n error_message=\"Please enter Y or N\",\n transform=lambda s: s.lower()[:1],\n )\n if result == 'y':\n return True\n\n return False", "def yes_or_no_or_abort(prompt, default=\"yes\"):\n yes_choices = ['yes', 'y']\n no_choices = ['no', 'n']\n abort_choices = ['abort', 'a']\n\n choice = prompt_with_options(\n prompt, [yes_choices, no_choices, abort_choices], default)\n\n if choice == yes_choices:\n result = True\n elif choice == no_choices:\n result = False\n else:\n assert choice == abort_choices\n result = None\n\n return result", "def prompt_option(self, message, default):\n user_input = raw_input(\" \" + message\n + \" (default = \\\"\"+ default + \"\\\"): \")\n\n if len(user_input) == 0:\n return default\n \n return user_input", "def ask_question(question):\n print('Question: {0}'.format(question))\n return prompt.string('Your answer: ')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the mean absolute error on test set given X, y, and model parameter w.
def mean_absolute_error(w, X, y): ##################################################### # TODO 1: Fill in your code here # ##################################################### err = None temp = np.dot(X, w) err = np.mean(np.abs(_error(y, temp))) return err
[ "def mean_absolute_error(w, X, y):\n #####################################################\n # TODO 1: Fill in your code here #\n #####################################################\n N = len(X)\n err = np.sum(abs(np.dot(w,X.T) - y))\n err = err / N\n return err", "def mean_absolute_error(w, X, y):\n return np.mean(np.abs(np.subtract(np.matmul(X, w), y)))", "def mean_absolute_error(w, X, y):\n #####################################################\n # TODO 1: Fill in your code here #\n #####################################################\n if w is None:\n return None\n\n err = None\n yhat = np.dot(X , w)\n err = np.abs(np.subtract(yhat,y)).mean()\n return err", "def test_error(w, X, y):\n\n\typred = np.matmul(X,w)\n\terr = np.mean(np.square(ypred-y))\n\n\treturn err", "def errorPer(X_train,y_train,w):\n misclassified_count = 0\n total_points = np.shape(X_train)[0]\n avgError = 0\n for i, x in enumerate(X_train):\n pred_val = pred(X_train[i], w)\n if pred_val != y_train[i]:\n misclassified_count += 1\n avgError = misclassified_count / total_points\n return avgError", "def MeanSquaredError(y_data, y_model):\n\tn = np.size(y_model)\n\tMSE = (1/n)*np.sum((y_data-y_model)**2)\n\n\treturn MSE", "def error_function(w, X, y):\n ##################################################\n # TODO: write code here to compute error correctly\n ##################################################\n X_temp = X.copy()\n X_temp = np.insert(X_temp, 0, 1, axis=1)\n y_out = np.dot(X_temp,w)\n y_hat = np.where(y_out >= 0, 1, -1)\n error = sum(np.where(y_hat == y, 0, 1)) / len(y)\n return(error)", "def mean_absolute_percentage_error(y_true, y_pred, sample_weight=..., multioutput=...):\n ...", "def get_rmse(x,y):\r\n return np.sqrt(mean_squared_error(x,y))", "def my_mse(x, y):\n return np.mean(np.mean((x - y)**2, axis=-1))", "def compute_mean_squared_error(self, X_data, y_data):\n #assert isinstance(X_data, np.ndarray)\n #assert isinstance(y_data, np.ndarray)\n #assert X_data.shape[0] == y_data.shape[0]\n \n return np.square(np.subtract(X_data, y_data)).mean()", "def mean_absolute_error(self):\n print('Mean absolute error regression loss: ' + str(mean_absolute_error(self.model.dataset.get_y_test(),\n self.model.get_predicted())))", "def compute_loss(y, tx, w):\n \n return 1/2 * np.mean((y - tx @ w)**2) #MSE\n #return np.mean(abs(y - tx @ w)) #MAE", "def compute_loss_rmse(y, tx, w):\n return np.sqrt(compute_loss_mse(y,tx,w))", "def avg_abs_err(fitx, std0, target_cov, n):\n err = np.empty(n)\n for i in range(n):\n err[i] = get_abs_err(fitx, std0, target_cov)\n print(fitx, err.mean())\n return err.mean()", "def mae(actual, prediction):\n actual, prediction = np.array(actual), np.array(prediction)\n return metrics.mean_absolute_error(actual, prediction)", "def mean_squared_error(y_true, y_pred):\n mse = np.mean(np.power(y_true - y_pred, 2))\n return mse", "def mean_squared_error(y_true, y_pred) -> np.ndarray:\n # Calculates the mean of the array\n return np.mean((y_pred - y_true)**2)", "def find_error(X, y, w):\n return np.linalg.norm(X@w - y, ord=2)**2" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Iterate over modes. Synchronized iterator to iterate the modes in an order.
def modes(self): try: order = self._current_order except AttributeError: raise AttributeError('Cannot iterate over modes without iterating over orders!') from None mode = -order while mode <= order: yield mode mode += 1
[ "def use_modes(self, modes):\n if isinstance(modes, (str, Mode)):\n modes = [modes]\n old_modes = self.active_modes\n try:\n self.active_modes = modes\n yield\n finally:\n self.active_modes = old_modes", "def get_modes(self):\n return [i for i, j in enumerate(self._modemap._map) if j is not None]", "async def _load_modes(self) -> None:\n modes: List[Dict[str, Any]] = await self._api_request(\"modes\")\n _LOGGER.debug(\"Loaded modes\")\n self._modes = [Mode(m) for m in modes]", "def iter_mode(n, obj='ndarray'):\n for mode in cap[obj][MODE]:\n for char in fmtdict[mode]:\n yield randitems(n, obj, mode, char)", "def get_modes(self):\n modes = set()\n for er in self.exercise_recordings:\n if er.mode not in modes:\n modes.add(er.mode)\n return list(modes)", "def get_modes(self):\n return [x for x in self.active if x is not None]", "def modes(self, modes):\n\n self._modes = modes", "def get_all_modes(self, index = False):\n if index:\n out = []\n for i in range(self.mode_num):\n out.append( self.data[i][index] )\n else:\n out = self.data\n return out", "def all_modes(self):\n\n # Find \"post-proj all modes\"\n # Jump to first value, ignoring text.\n # Move through data, adding it to a list\n # continue onto next line.\n # Repeat until the following line is known to be empty.\n\n # output.dat is the psi4 output file.\n with open('output.dat', 'r') as file:\n lines = file.readlines()\n for count, line in enumerate(lines):\n if \"post-proj all modes\" in line:\n start_of_vals = count\n break\n else:\n raise EOFError('Cannot locate modes in output.dat file.')\n\n # Barring the first (and sometimes last) line, dat file has 6 values per row.\n end_of_vals = start_of_vals + (3 * len(self.molecule.molecule['input'])) // 6\n\n structures = lines[start_of_vals][24:].replace(\"'\", \"\").split()\n structures = structures[6:]\n\n for row in range(1, end_of_vals - start_of_vals):\n # Remove double strings and weird formatting.\n structures += lines[start_of_vals + row].replace(\"'\", \"\").replace(\"]\", \"\").split()\n\n all_modes = [float(val) for val in structures]\n\n return array(all_modes)", "def all_modes(self):\n\n # Find \"post-proj all modes\"\n # Jump to first value, ignoring text.\n # Move through data, adding it to a list\n # continue onto next line.\n # Repeat until the following line is known to be empty.\n\n # output.dat is the psi4 output file.\n with open(\"output.dat\", \"r\") as file:\n lines = file.readlines()\n for count, line in enumerate(lines):\n if \"post-proj all modes\" in line:\n start_of_vals = count\n break\n else:\n raise EOFError(\"Cannot locate modes in output.dat file.\")\n\n # Barring the first (and sometimes last) line, dat file has 6 values per row.\n end_of_vals = start_of_vals + (3 * len(self.molecule.atoms)) // 6\n\n structures = lines[start_of_vals][24:].replace(\"'\", \"\").split()\n structures = structures[6:]\n\n for row in range(1, end_of_vals - start_of_vals):\n # Remove double strings and weird formatting.\n structures += (\n lines[start_of_vals + row].replace(\"'\", \"\").replace(\"]\", \"\").split()\n )\n\n all_modes = [float(val) for val in structures]\n\n return np.array(all_modes)", "def _sort_modes(self):\n sort_idx = np.lexsort((self.modes[:, 1], self.modes[:, 0], self.modes[:, 2]))\n self._modes = self.modes[sort_idx]", "def getModes(this):\n\t\tthis.checkInit()\n\t\t\n\t\t# On sauvegarde la config actuelle\n\t\tinit = this.config(get=True)\n\t\t\n\t\t# Ensembles de modes\n\t\tformats = Camera.formats.copy()\n\t\tmodes = set()\n\t\t\n\t\t# On averti du départ\n\t\tprint '\\nLooping modes for the camera... (%d modes)' % (len(formats))\n\t\t\t\n\t\t# Pour chaques formats\n\t\twhile formats:\n\t\t\t\n\t\t\t# On récupère le format à tester\n\t\t\tformat = formats.pop()\n\t\t\t\n\t\t\t# Configuration actuelle\n\t\t\tmode = this.config(\n\t\t\t\theight = float(format[1]),\n\t\t\t\twidth = float(format[0])\n\t\t\t)\n\t\t\t\n\t\t\t# On enregistre le mode\n\t\t\tcurrentFormat = (mode['width'], mode['height'])\n\t\t\tmodes.add(currentFormat)\n\t\t\tif currentFormat in formats:\n\t\t\t\tformats.remove(currentFormat)\n\t\t\t\n\t\t\t# On affiche l'itération courante\n\t\t\tprintf('%d%5s\\r' % (len(formats), ''))\n\t\t###\n\t\t\n\t\t# On remet comme avant et on retourne la liste de modes\n\t\tthis.config(params=init); print 'Done, found %d.' % (len(modes))\n\t\treturn [(int(mode[0]), int(mode[1])) for mode in modes]", "def generate_modes(self):\n # Generate the dynamic decomposition\n results = dynamic_decomposition(self)\n\n # Add regular dynamic mode info\n self._modes = mode_grp = self._file.require_group('modes')\n items = {\n 'eigenvalues': results.eigenvalues,\n 'eigenvectors': results.eigenvectors,\n 'modes': results.modes,\n 'amplitudes': results.amplitudes\n }\n for key, values in items:\n dset = mode_grp.require_dataset(\n name=self.snapshot_dataset_key + '_' + key,\n shape=values.shape,\n dtype=values.dtype)\n dset[...] = values\n\n # Add POD modes (from SVD) to data\n pod_data = zip(results.pod_modes,\n ('spatial', 'pod_coeffs', 'temporal'))\n for values, name in pod_data:\n dset = mode_grp.require_dataset(\n name=self.snapshot_dataset_key + '_' + name,\n shape=values.shape,\n dtype=values.dtype)\n dset[...] = values", "def modes(self):\n return np.hstack(tuple(self.operator.modes))", "def get_modes(self, code_block):\r\n # FUCK YOU INDEX ERRORS, LIST COMPS, AND EVEN YOU LAMBDAS I DON'T NEED PRETTY\r\n # 0 = pos mode\r\n # 1 = imm mode\r\n modes, mode_codes = [0, 0], list(reversed(str(code_block[0])))[2:]\r\n x = 0\r\n for mode in mode_codes:\r\n modes[x] = int(mode)\r\n x += 1\r\n print('Get modes: ')\r\n print(modes)\r\n return modes", "def _sort_modes_(self) -> None:\n sort_idx = np.lexsort((self.__modes[:, 0], self.__modes[:, 1],\n self.__modes[:, 2]))\n self.__modes = self.__modes[sort_idx]", "def loop_algos():\n for name, infos in CIPHER_LIST.items():\n if infos[\"type\"] == \"block\":\n for mode in MODES.keys():\n algo = get_cipher(infos[\"algo\"], mode)\n yield algo\n else:\n algo = get_cipher(infos[\"algo\"])\n yield algo", "def test_mode(self):\n self.assertEqual(utils.mode([1, 2, 3, 4]), 1)\n self.assertEqual(utils.mode([1, 1, 3, 4]), 1)\n self.assertEqual(utils.mode([1, 2, 2, 4]), 2)\n self.assertEqual(utils.mode([1, 1, 2, 2]), 1)", "def advanceMode(self):\n # If the modeSwitch method is random, shuffle the list each time\n if config.modeSwitch == 1:\n random.shuffle(self.modes)\n\n # Loop through the modes by priority to find an activated one\n for i in range(len(self.modes)):\n if self.modes[i].active:\n self.modes[i].deactivate()\n if i == len(self.modes)-1:\n self.currentMode = self.modes[0]\n break\n else:\n self.currentMode = self.modes[i+1]\n break\n else:\n # No currently active modes so just activate first mode\n self.currentMode = self.modes[0]\n\n # Activate the current mode\n self.currentMode.activate()\n\n print 'mode activated: ', self.currentMode.name\n\n # Initate the first picture and set the chunk drawing underway\n self.addPictureToMode()\n self.currentChunkNumber = None\n self.advanceChunk()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r"""Find the approximate location of a levitation trap. Find an approximate position of a acoustic levitation trap close to a starting point. This is done by following the radiation force in the sound field using an differential equation solver. The differential equation is the unphysical equation
def find_trap(array, start_position, complex_transducer_amplitudes, tolerance=10e-6, time_interval=50, path_points=1, **kwargs): from scipy.integrate import solve_ivp from numpy.linalg import lstsq if 'radius' in kwargs: from .fields import SphericalHarmonicsForce as Force, SphericalHarmonicsForceGradient as ForceGradient else: from .fields import RadiationForce as Force, RadiationForceGradient as ForceGradient evaluator = Force(array, **kwargs) + ForceGradient(array, **kwargs) mg = evaluator.fields[0].field.mg def f(t, x): F = evaluator(complex_transducer_amplitudes, x)[0] F[2] -= mg return F def bead_close(t, x): F, dF = evaluator(complex_transducer_amplitudes, x) F[2] -= mg dx = lstsq(dF, F, rcond=None)[0] distance = np.sum(dx**2, axis=0)**0.5 return np.clip(distance - tolerance, 0, None) bead_close.terminal = True outs = solve_ivp(f, (0, time_interval), np.asarray(start_position), events=bead_close, vectorized=True, dense_output=path_points > 1) if outs.message != 'A termination event occurred.': print('End criterion not met. Final path position might not be close to trap location.') if path_points > 1: return outs.sol(np.linspace(0, outs.sol.t_max, path_points)) else: return outs.y[:, -1]
[ "def anl_solution(self):\r\n\r\n m = float(self.mass) / self.nu_m\r\n qe = 1 / self.nu_m * (self.nu_t * self.nu_t / self.nu_x) * 1.0 \\\r\n / float(self.size_tick * self.size_tick)\r\n print 'qE=', qe\r\n c = self.light_vel\r\n for i in range(0, len(self.obs.obt_g)):\r\n ddt = float(self.obs.obt[i] - self.obs.obt[i - 1])\r\n x = m * c ** 2 / qe * (math.sqrt(1.0 + (qe * self.t[i] / (m\r\n * c)) ** 2) - 1.0)\r\n self.xa_track.append(x)\r\n p = qe * self.t[i]\r\n self.pa.append(p)\r\n v = p / math.sqrt(m ** 2 + (p / c) ** 2)\r\n jv = self.t[i] * qe / (m * c)\r\n v = math.sqrt(jv * jv / (1 + jv * jv)) * c\r\n self.va.append(v)\r\n print 'Analytical solution of the differential equation of motion'", "def find_lower_tangent(l_x_as, l_y_as, r_x_as, r_y_as, r_yp_as):\n # logg = logging.getLogger(f\"c.{__name__}.find_lower_tangent\")\n # logg.debug(f\"Start find_lower_tangent\")\n\n # compute the second derivative\n r_ypp = r_yp_as[1:] - r_yp_as[:-1]\n mean_r_ypp = np.mean(r_ypp)\n\n # logg.debug(f\"r_yp_as: {r_yp_as}\")\n # logg.debug(f\"r_ypp: {r_ypp}\")\n\n if mean_r_ypp >= 0:\n # logg.debug(f\"ypp positive\")\n range_xid = range(r_x_as.shape[0])\n else:\n # logg.debug(f\"ypp negative\")\n range_xid = range(r_x_as.shape[0])[::-1]\n\n tangent_start = timer()\n for xid in range_xid:\n # point tangent to the *right* segment\n tang_op = OrientedPoint(r_x_as[xid], r_y_as[xid], slope2deg(r_yp_as[xid]))\n tang_coeff = tang_op.to_ab_line()\n\n # sample it on the *left* segment sample\n l_tang_y_as = poly_model(l_x_as, tang_coeff, flip_coeff=True)\n # ax.plot(l_x_as, l_tang_y_as, color=\"b\", ls=\"-\", marker=\"\")\n # ax.plot(l_x_as, l_tang_y_as, color=\"b\", ls=\"\", marker=\".\")\n\n # find if the left segment has some points lower than the tangent\n lower = l_y_as < l_tang_y_as\n # logg.debug(f\"lower: {lower} {np.sum(lower)}\")\n if np.sum(lower) == 0:\n # logg.debug(f\"Breaking at xid: {xid}\")\n break\n\n tangent_end = timer()\n tangent_time = tangent_end - tangent_start\n # logg.debug(f\"Time to find tangent: {tangent_end - tangent_start:.6f}\")\n\n # find distance from left segment to tangent\n dist_left_tangent = l_y_as - l_tang_y_as\n min_dist_left_tangent = np.min(dist_left_tangent)\n argmin_dist_left_tangent = np.argmin(dist_left_tangent)\n recap = f\"min_dist_left_tangent: {min_dist_left_tangent:.6f}\"\n recap += \" argmin_dist_left_tangent: {argmin_dist_left_tangent}\"\n # logg.debug(recap)\n\n if min_dist_left_tangent < 0:\n # logg.debug(f\"Tangent not found\")\n return -1, -1, None, tangent_time\n\n l_xid = argmin_dist_left_tangent\n r_xid = xid\n\n return l_xid, r_xid, l_tang_y_as, tangent_time", "def startingPoint(G, c, A, b, guess):\n m,n = A.shape\n x0, y0, l0 = guess\n\n N = np.zeros((n+m+m, n+m+m))\n N[:n, :n] = G\n N[:n, n+m:] = -A.T\n N[n:n+m, :n] = A\n N[n:n+m, n:n+m] = -np.eye(m)\n N[n+m:, n:n+m] = np.diag(l0)\n N[n+m:, n+m:] = np.diag(y0)\n\n\n rhs = np.empty(n+m+m)\n rhs[:n] = -(G.dot(x0) - A.T.dot(l0)+c)\n rhs[n:n+m] = -(A.dot(x0) - y0 - b)\n rhs[n+m:] = -(y0*l0)\n\n sol = la.solve(N, rhs)\n\n dx = sol[:n]\n dy = sol[n:n+m]\n dl = sol[n+m:]\n\n y0 = np.maximum(1, np.abs(y0 + dy))\n l0 = np.maximum(1, np.abs(l0+dl))\n\n return x0, y0, l0", "def _relative_accel_mod(self):\n a_ego = self.car_data.a_ego\n a_lead = self.lead_data.a_lead\n min_consider_time = 0.75 # minimum amount of time required to consider calculation\n if len(self.df_data.v_rels) > 0: # if not empty\n elapsed_time = self.df_data.v_rels[-1]['time'] - self.df_data.v_rels[0]['time']\n if elapsed_time > min_consider_time:\n a_ego = (self.df_data.v_rels[-1]['v_ego'] - self.df_data.v_rels[0]['v_ego']) / elapsed_time\n a_lead = (self.df_data.v_rels[-1]['v_lead'] - self.df_data.v_rels[0]['v_lead']) / elapsed_time\n\n mods_x = [-1.5, -.75, 0]\n mods_y = [1, 1.25, 1.3]\n if a_lead < 0: # more weight to slight lead decel\n a_lead *= interp(a_lead, mods_x, mods_y)\n\n if a_lead - a_ego > 0: # return only if adding distance\n return 0\n\n rel_x = [-2.6822, -1.7882, -0.8941, -0.447, -0.2235, 0.0, 0.2235, 0.447, 0.8941, 1.7882, 2.6822]\n mod_y = [0.3245 * 1.1, 0.277 * 1.08, 0.11075 * 1.06, 0.08106 * 1.045, 0.06325 * 1.035, 0.0, -0.09, -0.09375, -0.125, -0.3, -0.35]\n return interp(a_lead - a_ego, rel_x, mod_y)", "def estimate_prior_solar_longitude(cls, lam, tee):\n rate = cls.MEAN_TROPICAL_YEAR / 360.0\n tau = tee - (rate * mod(cls.solar_longitude(tee) - lam, 360))\n cap_Delta = mod(cls.solar_longitude(tau) - lam + 180, 360) - 180\n return min(tee, tau - (rate * cap_Delta))", "def get_closest_loc(self):\n raise NotImplementedError()", "def GetClosestPoint(self):\n ...", "def determine_hand_location(self):\n if self.orientation == 'normal':\n xloc,yloc,zloc,f1prox,f2prox,f3prox=0,0,0,0,0,0\n elif self.orientation == 'top':\n size=self._get_obj_size()\n\n if self.obj_size=='b':\n Z=0.15\n elif self.obj_size=='m':\n Z=0.14\n elif self.obj_size=='s':\n Z=0.13\n stuff=np.matmul(self.Tfw[0:3,0:3],[-0.005,-0.155,Z+0.06])\n #stuff=np.matmul(self.Tfw[0:3,0:3],[0,-0.15,0.1+size[-1]*1.8])\n xloc,yloc,zloc,f1prox,f2prox,f3prox=-stuff[0],-stuff[1],stuff[2],0,0,0\n else:\n temp=np.matmul(self.Tfw[0:3,0:3],np.array([0.051,-0.075,0.06]))\n #print('temp',temp)\n xloc,yloc,zloc,f1prox,f2prox,f3prox=-temp[0],-temp[1],temp[2],0,0,0\n\n return xloc,yloc,zloc,f1prox,f2prox,f3prox", "def _get_closest_light_ahead(self, pose):\n #\n # we have very few lights in either the simulation or the live test,\n # so it is easiest just to loop thru them rather than use KDTree\n #\n pos = pose.position\n x = pos.x\n y = pos.y\n closest_idx = -1\n closest_dist2 = None\n idx = 0\n for light in self.lights:\n xl = light.pose.pose.position.x\n yl = light.pose.pose.position.y\n\n #\n # make sure light is ahead, otherwise ignore it\n # we can only do this if the car velocity is nonzero\n #\n skip_light = False\n if self.velocity_unit_vector:\n dx = xl - x\n dy = yl - y\n car_to_light = [dx,dy]\n val = self.dot2d(car_to_light,self.velocity_unit_vector)\n if val < 0.0:\n #\n # light is behind us so continue\n #\n skip_light = True\n\n if not skip_light:\n if closest_dist2 is None:\n closest_idx = idx\n closest_dist2 = (x-xl)*(x-xl) + (y-yl)*(y-yl)\n else:\n dist2 = (x-xl)*(x-xl) + (y-yl)*(y-yl)\n if dist2 < closest_dist2:\n closest_idx = idx\n closest_dist2 = dist2\n idx+=1\n \n return closest_idx", "def lidar_relative(self):\n return self.distance", "def calc_measurement_loc(self):\n\n from pysat.utils import coords\n\n az_keys = [kk[5:] for kk in list(self.data.keys())\n if kk.find('azdir') == 0]\n el_keys = [kk[5:] for kk in list(self.data.keys())\n if kk.find('eldir') == 0]\n good_dir = list()\n\n for i, kk in enumerate(az_keys):\n if kk in el_keys:\n try:\n good_dir.append(int(kk))\n except ValueError:\n logger.warning(\"unknown direction number [{:}]\".format(kk))\n\n # Calculate the geodetic latitude and longitude for each direction\n if len(good_dir) == 0:\n raise ValueError(\"No matching azimuth and elevation data included\")\n\n for dd in good_dir:\n # Format the direction location keys\n az_key = 'azdir{:d}'.format(dd)\n el_key = 'eldir{:d}'.format(dd)\n lat_key = 'gdlat{:d}'.format(dd)\n lon_key = 'gdlon{:d}'.format(dd)\n # JRO is located 520 m above sea level (jro.igp.gob.pe./english/)\n # Also, altitude has already been calculated\n gdaltr = np.ones(shape=self['gdlonr'].shape) * 0.52\n gdlat, gdlon, _ = coords.local_horizontal_to_global_geo(self[az_key],\n self[el_key],\n self['range'],\n self['gdlatr'],\n self['gdlonr'],\n gdaltr,\n geodetic=True)\n\n # Assigning as data, to ensure that the number of coordinates match\n # the number of data dimensions\n self.data = self.data.assign({lat_key: gdlat, lon_key: gdlon})\n\n # Add metadata for the new data values\n bm_label = \"Beam {:d} \".format(dd)\n self.meta[lat_key] = {self.meta.units_label: 'degrees',\n self.meta.name_label: bm_label + 'latitude',\n self.meta.desc_label: bm_label + 'latitude',\n self.meta.plot_label: bm_label + 'Latitude',\n self.meta.axis_label: bm_label + 'Latitude',\n self.meta.scale_label: 'linear',\n self.meta.min_label: -90.0,\n self.meta.max_label: 90.0,\n self.meta.fill_label: np.nan}\n self.meta[lon_key] = {self.meta.units_label: 'degrees',\n self.meta.name_label: bm_label + 'longitude',\n self.meta.desc_label: bm_label + 'longitude',\n self.meta.plot_label: bm_label + 'Longitude',\n self.meta.axis_label: bm_label + 'Longitude',\n self.meta.scale_label: 'linear',\n self.meta.fill_label: np.nan}\n\n return", "def FindClosestPoint(self, ):\n ...", "def closest_cruising_altitude(altitude):\n return 1000 * ((altitude + 500) // 1000)", "def findPathToClosestDot(self, gameState):\n # Here are some useful elements of the startState\n startPosition = gameState.getPacmanPosition()\n food = gameState.getFood()\n walls = gameState.getWalls()\n problem = AnyFoodSearchProblem(gameState)\n\n \"*** YOUR CODE HERE ***\"\n return breadthFirstSearch(problem)\n # util.raiseNotDefined()", "def esw(self, t):\n\n\n es0 = 6.1078\n\n # ES0 = SATURATION VAPOR RESSURE OVER LIQUID WATER AT 0C \n pol = t * (t * (t * (t * (t * (t * (t * (t * (t * \n - 3.0994571e-20 + 1.1112018e-17) - 1.7892321e-15) + \n 2.1874425e-13) - 2.9883885e-11) + 4.3884187e-9) - \n 6.1117958e-7) + 7.8736169e-5) - 0.0090826951) + 0.99999683\n\n # Computing 8th power\n r1 = pol\n r1 *= r1\n r1 *= r1\n ret_val = es0 / (r1 * r1)\n return ret_val\n \n \n def tcon(self, t, d):\n \"\"\" THIS FUNCTION RETURNS THE TEMPERATURE TCON (CELSIUS) AT THE LIFTING */\n CONDENSATION LEVEL, GIVEN THE TEMPERATURE T (CELSIUS) AND THE\n DEW POINT D (CELSIUS).\n\n BAKER,SCHLATTER 17-MAY-1982 Original version \"\"\"\n\n # COMPUTE THE DEW POINT DEPRESSION S.\n\n s = t - d;\n\n # THE APPROXIMATION BELOW, A THIRD ORDER POLYNOMIAL IN S AND T,\n # IS DUE TO HERMAN WOBUS. THE SOURCE OF DATA FOR FITTING THE\n # POLYNOMIAL IS UNKNOWN.\n\n dlt = s * (t * 0.001278 + 1.2185 + s * (s * 1.173e-5\n - 0.00219 - t * 5.2e-6))\n ret_val = t - dlt\n return ret_val\n \n def tsa(self, os, pres):\n \"\"\" Very little documentation on these following routines, so unsure \n of the origin and derivation of these algorithms. \"\"\"\n\n rocp = 0.28571482\n\n a = os # +273.16\n tq = 253.16\n d = 120.0\n \n i = 0\n for i in range(12):\n tqk = tq - 273.16\n d /= 2\n x = a * exp(- 2.6518986 * self.w(tqk, pres) / tq) - tq * pow((1000.0 / pres), rocp) \n if (fabs(x) <= 0.0):\n break\n if x < 0.0:\n sign = - 1\n else:\n sign = 1 \n tq += (d * sign)\n\n return tq # -273.16\n \n def w(self, temp, pres):\n \"\"\" Very little documentation on these following routines, so unsure \n of the origin and derivation of these algorithms. \"\"\"\n \n x = self.esat(temp)\n return (622.0 * x / (pres - x))\n \n def temp_of_te(self, te, press):\n import Temp_of_te\n return Temp_of_te.temp_of_te(te,press)\n\n def capeFunc(self, usetv, p_dat_PPointer, tve_dat_PPointer, p0, th0, sh0):\n import CapeFunc\n return CapeFunc.capeFunc(usetv, p_dat_PPointer, tve_dat_PPointer, p0, th0, sh0)\n \n def lfcpar(self, eptpar, pcb, tcb, hcb, t1, t2, p1, ht1):\n \"\"\" his routine computes the level of free convection of a rising parcel.\n History.\n -------- \n Don Baker 01 Jun 85 Original version.\n Dale Perry Oct 96 Adapted code to work with WFO\n\n Description of input and output.\n --------------------------------\n On input:\n --------- \n EPTPAR Real Moist adiabat along which parcel rises above\n the LCL (K).\n PCB Real LCL pressure (mb).\n TCB Real LCL temperature (K).\n HCB Real LCL height (m asl).\n T1 Real Array Parcel temperatures at lifted parcel levels (K).\n T2 Real Array Sounding temperatures at parcel levels (K).\n P1 Real Array Lifted parcel pressure levels (mb).\n HT1 Real Array Lifted parcel level heights (m asl).\n NPAR Integer Number of lifted parcel levels passed.\n\n On output:\n ---------- \n PLFC1 Real Level of free convection pressure (mb).\n HLFC1 Real Level of free convection height (m asl).\n TLFC1 Real Level of free convection temperature (K). \"\"\"\n \n lfcReturn = zeros((1, 6), 'float32')\n TOLER = 0.05\n npar = p.shape[0]\n print \"npar=\", npar\n # Find the location in the parcel arrays that corresponds to the LCL\n i = 0\n for ii in range(npar) :\n i = ii\n if math.fabs(p1[i] - pcb) < 0.1 :\n break\n else :\n continue\n print \"found pressure at \", i\n # Initially assign flag values to the LFC in case no buoyancy exists.\n plfc1 = meteo.TOP_FLG\n hlfc1 = meteo.TOP_FLG\n tlfc1 = meteo.TOP_FLG\n plfc2 = meteo.TOP_FLG\n hlfc2 = meteo.TOP_FLG\n tlfc2 = meteo.TOP_FLG\n \n if i == npar :\n lfcReturn[0][0] = plfc1\n lfcReturn[0][1] = hlfc1\n lfcReturn[0][2] = tlfc1\n lfcReturn[0][3] = plfc2\n lfcReturn[0][4] = hlfc2\n lfcReturn[0][5] = tlfc2\n return lfcReturn\n \n # Check and see if parcel is positively buoyant at the LCL already. If\n # this is true, then the LFC is coincident with the LCL. This may be\n # the case in 00Z soundings when a super-adiabatic layer exists near\n # the surface.\n \n if t1[i] >= t2[i] :\n plfc1 = pcb\n hlfc1 = hcb\n tlfc1 = tcb\n lfcReturn[0][0] = plfc1\n lfcReturn[0][1] = hlfc1\n lfcReturn[0][2] = tlfc1\n lfcReturn[0][3] = plfc2\n lfcReturn[0][4] = hlfc2\n lfcReturn[0][5] = tlfc2\n return lfcReturn\n \n # Loop upward from the LCL until the parcel temperature becomes warmer\n # than the environment. If this does not occur, no positive buoyancy\n # exists and the routine exits with whatever flag value was assigned to\n # the level of free convection.\n # To prevent a stack out of bounds error when I=1, set it equal to the\n # next level if I=1.\n \n if i == 0 : \n i = 1\n \n runLoop = True\n print \"entering loop1 with i=\", i\n for j in range(i, npar) :\n if t1[j] >= t2[j] :\n pt = p1[j]\n pb = p1[j - 1]\n plog1 = math.log(p1[j])\n plog3 = math.log(p1[j - 1])\n \n print \"entering inner loop1 j=\", j\n for count in range(100) :\n pm = 0.5 * (pb + pt)\n plog2 = math.log(pm)\n etpar = eptpar * math.pow((pm / 1000.0), 0.286)\n t1m = self.temp_of_te(etpar, pm)\n t2m = self.interp1(t2[j], t2[j - 1], plog1, plog2, plog3)\n if math.fabs(t1m - t2m) <= TOLER :\n plfc1 = pm\n hlfc1 = self.interp1(ht1[j], ht1[j - 1], plog1, math.log(plfc1), plog3)\n tlfc1 = t1m\n runLoop = False;\n print \"attempting to break out of loop 1\"\n break\n if (t1m - t2m) > TOLER :\n pt = pm\n if (t2m - t1m) > TOLER :\n pb = pm\n if runLoop != True :\n break\n else :\n continue\n\n # Continue looping to find a possible second LFC per conditions\n # above rules.\n j = j + 1\n print \"entering loop2 with j=\", j\n for k in range(j, npar) :\n if t1[k] >= t2[k] :\n pt = p1[k]\n pb = p1[k - 1]\n plog1 = math.log(p1[k])\n plog3 = math.log(p1[k - 1])\n \n print \"entering inner loop2 k=\", k\n for count in range(100) :\n pm = 0.5 * (pb + pt)\n plog2 = math.log(pm)\n etpar = eptpar * math.pow(pm / 1000.0, 0.286)\n t1m = self.temp_of_te(etpar, pm)\n t2m = self.interp1(t2[k], t2[k - 1], plog1, plog2, plog3)\n if math.fabs(t1m - t2m) <= TOLER :\n plfc2 = pm\n hlfc2 = self.interp1(ht1[k], ht1[k - 1], plog1, math.log(plfc2, plog3))\n tlfc2 = t1m\n lfcReturn[0][0] = plfc1\n lfcReturn[0][1] = hlfc1\n lfcReturn[0][2] = tlfc1\n lfcReturn[0][3] = plfc2\n lfcReturn[0][4] = hlfc2\n lfcReturn[0][5] = tlfc2\n print \"exiting loop2 k=\", k\n return lfcReturn\n if (t1m - t2m) > TOLER :\n pt = pm\n if (t2m - t1m) > TOLER :\n pb = pm\n \n lfcReturn[0][0] = plfc1\n lfcReturn[0][1] = hlfc1\n lfcReturn[0][2] = tlfc1\n lfcReturn[0][3] = plfc2\n lfcReturn[0][4] = hlfc2\n lfcReturn[0][5] = tlfc2\n return lfcReturn\n \n def richno(self, ht, hw, uw, vw, rho, buoy):\n \"\"\" Statement of purpose.\n Compute the dimensionless bulk Richardson number as defined by\n Weisman and Klemp (1982).\n History.\n -------- \n Tom Schlatter Late 1982 Original code based on MWR article by \n Weisman and Klemp (1982).\n D. Baker 01 Jun 84 Removed computation of positive energy...\n made it an input argument.\n D. Baker 01 Jul 85 Updated code for documentation.\n J. Ramer 16 Jun 92 Added divide-by-zero prevention.\n D. Perry 10 Oct 96 Adapted code for WFO \n\n Description of input and output.\n --------------------------------\n On input:\n --------- \n HT Sounding heights (m asl).\n HW Heights of wind reports (m asl).\n UW Wind u-components (m/s).\n VW Wind v-components (m/s).\n RHO Air density at each sounding level (kg/m**3).\n BUOY Positive buoyant energy (J/kg).\n\n On output:\n ---------- \n RICHNUM Dimensionless bulk Richardson number. \"\"\"\n \n \n mnl = 500\n nlvls = ht.shape[0]\n nw = uw.shape[0]\n HALFKM = 500.0\n SIXKM = 6000.0\n richnum = meteo.MISSING\n rhow = rho\n # Interpolate an air density value to each reported wind level\n if nlvls != nw :\n rhow = self.wndrho(rho, ht, hw)\n else :\n for i in range(nlvls) :\n rhow[i] = rho[i]\n \n # QC\n qc = 1\n for i in range (2, nw) :\n if uw[i] != uw[0] and vw[i] != vw[0] :\n qc = 0\n \n if nlvls < 3 or nlvls > 500 :\n qc = 1\n \n for i in range(nw) :\n if rhow[i] <= 0.0 : \n qc = 1\n break\n \n for i in range(2, nw) :\n if (hw[i] - hw[i - 1]) <= 0.0 :\n qc = 1\n break\n \n for i in range(2, nlvls) :\n if (ht[i] - ht[i - 1]) <= 0.0 :\n qc = 1\n \n if qc == 1 :\n return richnum\n \n # initialize sums\n \n sumu = 0\n sumv = 0\n sumr = 0\n sumul = 0\n sumvl = 0\n sumrl = 0\n \n # define shear layer bounds (above ground level)\n hbl = hw[0] + HALFKM\n htop = hw[0] + SIXKM\n \n if hw[nw] < htop or hw[1] > htop :\n return richnum\n \n # Loop to calculate shear terms\n \n i = 0\n rulay = 0.5 * (rhow[i] * uw[i])\n rvlay = 0.5 * (rhow[i] * vw[i])\n rlay = 0.5 * rhow[i]\n dz = hw[i]\n \n for i in range(1, nw) :\n rulay = 0.5 * (rhow[i] * uw[i] + rhow[i - 1] * uw[i - 1])\n rvlay = 0.5 * (rhow[i] * vw[i] + rhow[i - 1] * vw[i - 1])\n rlay = 0.5 * (rhow[i] + rhow[i - 1])\n dz = hw[i] - hw[i - 1]\n if hw[i] > htop :\n break\n sumu = sumu + rulay * dz\n sumv = sumv + rvlay * dz\n sumr = sumr + rlay * dz\n if hw[i] > hbl and i > 1 :\n sumul = sumul + rulay * dz\n sumvl = sumvl + rvlay * dz\n sumrl = sumrl + rlay * dz\n \n sumu = sumu + rulay * dz\n sumv = sumv + rvlay * dz\n sumr = sumr + rlay * dz\n \n if sumr <= 0.0 :\n u6 = 0.0\n v6 = 0.0\n else : \n u6 = sumu / sumr\n v6 = sumv / sumr\n \n if sumrl <= 0.0 :\n ul = 0.0\n vl = 0.0\n else :\n ul = sumul / sumrl\n vl = sumvl / sumrl\n \n # calculate one half the square of the shear vector in the lowest 6 km\n u6 = u6 - ul\n v6 = v6 - vl\n ske = 0.5 * (u6 * u6 + v6 * v6)\n \n # compute the bulk richardson number\n \n if ske > 0 :\n richnum = buoy / ske\n \n return richnum\n \n def wndrho(self, rho, ht, hw):\n \"\"\" PURPOSE:\n --------\n INTERPOLATE TO DETERMINE DENSITY AT WIND LEVELS GIVEN DENSITY AT\n PRESSURE LEVELS IN A SOUNDING. INTERPOLATION IS LINEAR BY HEIGHT.\n\n T. Schlatter late 82 Probable original author.\n D. Baker 17 Dec 85 Added doc and indentation (?)\n D. Baker (?) after Dec 85 Replaced 100 loop with 300 loop. It\n appears that the interpolation is out.\n J. Wakefield 17 Nov 92 Added parameter list documentation.\n D. Perry Sep 96 Adapted code to work with WFO.\n\n Argument I/O Description\n -------- --- -----------------------------------------------\n Rho I Density (kg m-3) at sounding levels.\n Ht I Heights (m) at sounding levels.\n NLvls I Number of sounding levels.\n HW I Heights (m) of wind obs.\n NW I Number of wind obs.\n RhoW O Density interpolated to wind obs heights. \"\"\"\n \n \n # Interpolate to derive density at wind heights\n j = 0\n nw = len(hw)\n skip = False\n for i in range(nw) :\n if skip == True :\n break\n k = j\n for j in range(k, nlvls - 1) :\n if hw[i] >= ht[j] and hw[i] <= ht[j + 1] :\n rhow[i] = self.interp1(rho[j], rho[j + 1], ht[j], hw[i], ht[j + 1])\n skip = True\n break\n \n rhow[0] = rho[0]\n k1 = 0\n k2 = 1\n \n for i in range(1, nw) :\n if ht[k2] < hw[i] :\n k1 = k2\n k2 = k2 + 1\n if k2 > nlvls :\n for j in range(i, nw) :\n rhow[j] = rho[k1]\n return rhow\n \n rhow[i] = self.interp1(rho[k1], rho[k2], ht[k1], hw[i], ht[k2])\n \n return rhow\n \n def lclpar(self, meanmix, ts, p, ht, t, td):\n \"\"\" Statement of purpose.\n ---------------------\n This routine computes the pressure, height, and temperature of the\n lifting condensation level (LCL) from a sounding.\n \n History.\n -------- \n Dale Perry 20 Sep 96 Bootlegged version of cclpar.f modified for\n determining the LCL.\n \n Description of input and output.\n --------------------------------\n On input:\n --------- \n MEANMIX Mixing ratio used to intersect the sounding (g/kg).\n TS Surface temp (12Z-forecast max temp;00Z-sfc temp) (K). \n P Sounding pressures (mb).\n HT Sounding heights (m asl).\n T Sounding temperatures (K).\n TD Sounding dewpoint temperatures (K).\n \n On output:\n ---------- \n PLCL Pressure of the lifting condensation level (mb).\n TLCL Temperature of the lifting condensation level (K).\n HTLCL Height of the lifting condensation level (m asl).\n \n User notes:\n -----------\n The low level mean mixing ratio is input to this routine...\n computed outside. \"\"\"\n\n TOLER = 0.5\n nlvls = len(p)\n lfcReturn = zeros((1, 3), 'float32')\n \n # Loop up through sounding until mixing ratio line corsses the dry \n # adiabat through the surface temperature. Initially set the LCL\n # parameters to MISSING values in case no LCL is found\n \n plcl = meteo.TOP_FLG\n hlcl = meteo.TOP_FLG\n tlcl = meteo.TOP_FLG\n t2 = ts * math.pow(1000.0 / p[0], 0.286)\n \n for i in range(nlvls) :\n t1 = self.temp_mixratio(p[i], meanmix)\n t1 = t1 * math.pow(1000.0 / p[i], 0.286)\n if t1 >= t2 :\n break\n \n if i == 1 : #LCL at the surface\n plcl = p[0]\n hlcl = ht[0]\n tlcl = t[0]\n lfcReturn[0][0] = plcl\n lfcReturn[0][1] = hlcl\n lfcReturn[0][2] = tlcl\n return lfcReturn\n \n # We were at the top of the sounding, but 'I' got incremented one more\n # beyond. Reset it to the top of the sounding index 'NLVLS'\n if i > nlvls :\n i = nlvls - 1\n \n pt = p[i]\n pb = p[i - 1]\n plog1 = math.log(p[i])\n plog3 = math.log(p[i - 1])\n \n # Iterate to find the LCL. Keep cutting level in half until the point\n # of intersection is found\n \n for count in range(100) :\n pm = 0.5 * (pt + pb)\n plog2 = math.log(pm)\n t1 = self.temp_mixratio(pm, meanmix)\n t1 = t1 * math.pow(1000.0 / pm, 0.286)\n if math.fabs(t1 - t2) <= TOLER :\n plcl = pm\n tlcl = t1 * math.pow(plcl / 1000.0, 0.286) \n hlcl = self.interp1(ht[i], ht[i - 1], plog1, math.log(plcl), plog3)\n lfcReturn[0][0] = plcl\n lfcReturn[0][1] = hlcl\n lfcReturn[0][2] = tlcl\n return lfcReturn \n if (t1 - t2) > TOLER :\n pt = pm\n if (t2 - t1) > TOLER :\n pb = pm\n \n lfcReturn[0][0] = plcl\n lfcReturn[0][1] = hlcl\n lfcReturn[0][2] = tlcl\n return lfcReturn", "def calculate_position(c, t):\n return c[0] * t**5 + c[1] * t**4 + c[2] * t**3 + c[3] * t**2 + c[4] * t + c[5]", "def calc_nearest_ind(self, robot_pose):\n pass", "def __getDewpoint(self, temp, rh):\n \n retVal = 243.04 * (math.log(rh / 100) + ((17.625 * temp) / (243.04 + temp))) / \\\n (17.625 - math.log(rh / 100) - ((17.625 * temp) / (243.04 + temp))) \n \n return round(retVal, 1)", "def get_lookahead_point(self):\n lookahead_target_dist = self.lookahead_dist #+ (1 + self.curr_v)\n\n if self.path_point_idx == len(self.current_path) - 1 or self.path_point_idx == -1:\n #End of path, no more lookahead\n return self.path_point\n\n prev_pt = self.current_path[self.path_point_idx]\n curr_pt = self.current_path[self.path_point_idx + 1]\n pt_dist = np.hypot((prev_pt - curr_pt)[0], (prev_pt - curr_pt)[1])\n curr_dist = pt_dist\n c = self.path_point_idx\n while curr_dist < lookahead_target_dist and c < len(self.current_path) - 1:\n prev_pt = self.current_path[c]\n curr_pt = self.current_path[c + 1]\n pt_dist = np.hypot((prev_pt - curr_pt)[0], (prev_pt - curr_pt)[1])\n curr_dist += pt_dist\n c += 1\n\n if curr_dist < lookahead_target_dist:\n return self.current_path[-1]\n else:\n #Interpolate to get the actual lookahead point\n frac = (curr_dist - lookahead_target_dist) / pt_dist\n pt = frac * prev_pt + (1-frac) * curr_pt\n return pt" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Play a song based on its path.
def play_song(self): path = input('Give path to wanted song: ') # Request path to song path = path.replace('\\', '/') if not self.path_storage_re.match(path): # Check if the wanted song is from the storage directory print("Give a valid path") else: p = vlc.MediaPlayer(path) # Create VLC instance and play the song p.play() self.playSong.append(p) self.isPlaying = True
[ "def play(self, song):\n pass", "def play_music(path, play_cnt):\n\tcanonicalized_path = path.replace('/', os.sep).replace('\\\\', os.sep)\n\tmixer.music.load(canonicalized_path)\n\tmixer.music.play(play_cnt)\n\treturn 1", "async def play(self, ctx, *, song: str):\r\n #await self.queue_music(ctx, song)\r\n await self.bot.delete_message(ctx.message)\r\n await self.play_url(ctx, song)", "def play(self, songpos=None):\n # TODO: implement songpos !\n if songpos is None:\n resp = yield from self.command('play')\n return True", "def play_song(self):\n # stop if already playing\n if self.music:\n self.music.stop()\n\n # play next song\n self.music = arcade.Sound(self.music_list[self.current_song], streaming=True)\n self.music.play(MUSIC_VOLUME)\n # this is a delay for update, if we don't do this,\n # update will think the music is over and advance us to next song\n time.sleep(0.03)", "def play(song):\n # Show the metadata\n if (verbose==True):\n for s in song.keys():\n print s, \":\", \n print song[s]\n else:\n print \"Title:\", song[\"title\"]\n print \"Artisit:\", song[\"artist\"]\n print \"Album:\", song[\"albumtitle\"]\n print \"Year\", song[\"public_time\"]\n print \"Company:\", song[\"company\"]\n print \"Length\", song[\"length\"]\n print \"Playing...\"\n mp3_url = song[\"url\"]\n song_length = song[\"length\"]\n p = subprocess.Popen([\"mplayer\", \"-msglevel\", \"all=0\", mp3_url])\n\n # At the same time, download the song:\n u = urllib2.urlopen(mp3_url)\n local_mp3 = open(song[\"title\"] + \"-\" + song[\"artist\"] + \".mp3\", \"w\")\n local_mp3.write(u.read())\n local_mp3.close()\n # time.sleep(song_length)\n i = 0\n while(True):\n time.sleep(1)\n i += 1\n if i == song_length:\n # Kill the process when the song is finished.\n p.terminate()\n print \"#\" * 80\n break", "def play(sound):\n if SOUNDDIR != \"\":\n call([\"aplay\", SOUNDDIR + sound])", "def _play_audio(self, path_or_location):\n url = path_or_location.replace('https', 'http')\n audi_commd = self._vlc_audio_command + [url]\n logger.info('VLC command: {}'.format(audi_commd))\n process = subprocess.Popen(audi_commd)\n self._player_pid = process.pid\n logger.info(\"vlc pid \" + str(process.pid))\n\n # add pid to child_pids\n self._child_pids[process.pid] = True", "def media_player_select_song(self, device_song_path):\n media_cmd = ['am', 'start', '-a', 'android.intent.action.VIEW', '-d', '\"file://{}\"'.format(device_song_path),\n '-t', 'audio/wav', '--user', '0']\n if self.activity is not None:\n media_cmd.extend(['-n', self.activity])\n self.connection.shell(media_cmd)\n # delay script (by sleeping) in an attempt to avoid media player crashes / red screens\n if self.delay:\n time.sleep(self.delay)", "def _play(self, uri, loop):\n assert self.instance is None\n self.proto._uglix.ensure_file('music/' + uri, uri, static=True)\n e = threading.Event()\n def SongFinished(event):\n \"\"\"\n Callback for VLC's event manager.\n \"\"\"\n e.set()\n options = ['--quiet']\n if loop:\n options.append('--input-repeat=9999')\n self.instance = self.vlc.Instance(options)\n self.player = self.instance.media_player_new()\n self.event_manager = self.player.event_manager()\n self.media = self.instance.media_new(self.proto._uglix.local_path(uri))\n self.player.set_media(self.media)\n self.event_manager.event_attach(self.vlc.EventType.MediaPlayerEndReached, SongFinished)\n self.player.play()\n e.wait()\n self.event_manager.event_detach(self.vlc.EventType.MediaPlayerEndReached)", "def play_sound(self):\n # http://soundbible.com/2103-1-Person-Cheering.html\n my_path = os.path.dirname(__file__)\n sound_path = os.path.join(my_path, 'yay.mp3')\n sound = SoundLoader.load(sound_path)\n sound.play()", "def load(self, song):\n self.currentSongName = song\n self.currentSong = pygame.mixer.music.load(song)", "def play_music(self):\n song_index = -1\n if self.num_songs == 0:\n sys.stdout.write(\"No songs found\\n\")\n sys.exit(0)\n \n # FIXME: spacebar/pause is an mplayer-specific command\n sys.stdout.write(\"Press spacebar to pause songs\\n\")\n sys.stdout.write(\"Press ctrl+c once to skip a song\\n\")\n sys.stdout.write(\"Hold ctrl+c to exit\\n\")\n sys.stdout.write(\"%d files found.\\n\" % self.num_songs)\n while True:\n try:\n song_index = self._get_song_index(song_index)\n if song_index == None:\n sys.exit(0)\n song = self.songs[song_index]\n sys.stdout.write(\"%s\\n\" % song)\n \n # Disabled the following as it got pretty annoying seeing a \n # torrent of notifications for non-music files (mplayer \n # gracefully skips these). \n #try:\n # notify_cmd=\"notify-send -t 1000 '%s'\" % \\\n # song.split(\"/\")[-1]\n # subprocess.check_call(notify_cmd, shell=True)\n #except:\n # pass\n #FIXME: escape quotes in songs\n play_cmd = '\"%s\" \"%s\" > /dev/null 2>&1 ' % \\\n (self.music_client, song) \n subprocess.check_call(play_cmd, shell=True)\n except KeyboardInterrupt:\n try:\n # HACK to allow repeated ctrl+c to exit outright\n time.sleep(0.1) \n except KeyboardInterrupt:\n sys.stderr.write(\"\\nExiting...\\n\")\n sys.exit(0)", "def play_button_pressed(self):\n \n song = self.songbox.get(tk.ACTIVE)\n song = os.path.join(f\"{self.directory}\", f\"{song}\")\n \n\n pygame.mixer.music.load(song)\n pygame.mixer.music.play()\n self.get_playtime()\n slide_position = int(song_length)\n self.song_slider.config(to = slide_position)\n self.song_slider.set(0)\n self.check_event()", "def play(self):\n try:\n self.playing = self.explorer.get(self.explorer.curselection())\n self.set_status_label('PLAYING ' + self.playing)\n self.status_label.update_idletasks()\n ffplay = [\n \"ffplay\", self.downloads + self.playing,\n \"-nodisp\", \"-autoexit\"\n ]\n Popen(ffplay, stdout=PIPE, stderr=STDOUT)\n except Exception:\n self.set_status_label(\"Please select a track to play\")", "async def play(self, ctx, *, song: str):\n state = self.get_voice_state(ctx.message.server)\n opts = {\n 'default_search': 'ytsearch',\n 'quiet': True,\n }\n\n if state.voice is None:\n success = await ctx.invoke(self.summon)\n if not success:\n return\n if state.voice.channel != ctx.message.author.voice_channel:\n await self.bot.say('You can only modify the queue if you\\'re in the same channel as me!')\n return\n if len(state.songs._queue) >= 6:\n await self.bot.say('There can only be up to 6 items in queue!')\n return\n\n status = await self.bot.say('Loading... 🌚')\n pg_task = self.loop.create_task(self.progress(status, 'Loading'))\n state.voice.encoder_options(sample_rate=48000, channels=2)\n try:\n player = await state.voice.create_ytdl_player(song, ytdl_options=opts, after=state.toggle_next)\n except Exception as e:\n if type(e).__name__.endswith('DownloadError') or type(e).__name__.endswith('IndexError'):\n pg_task.cancel()\n await self.bot.delete_message(status)\n await self.bot.say('**That video couldn\\'t be found!**')\n return False\n else:\n raise e\n\n player.volume = 0.7\n entry = VoiceEntry(ctx.message, player, False)\n was_empty = state.songs.empty()\n await state.songs.put(entry)\n if state.current:\n await self.bot.say('Queued ' + str(entry))\n pg_task.cancel()\n await self.bot.delete_message(status)", "def play(filename):\n SoundClient(blocking=True).playWave(filename)", "def play_music(sid):\n # Get the parameters for the get_song_id request\n artist = None\n album = None\n title = None\n if not request.json:\n # If no JSON parameters were given, just resume playing the song\n db = sqlite3.connect('/home/tropius/TROPIUS/TROPIUS.db')\n host = hosts.get_detail(db, sid)\n spotify.resume(host['ip'])\n return jsonify({})\n else:\n try:\n # Get the host data from the database\n db = sqlite3.connect('/home/tropius/TROPIUS/TROPIUS.db')\n host = hosts.get_detail(db, sid)\n artist = None\n album = None\n track = None\n if request.json.has_key('track') and request.json.get('track'):\n track = request.json.get('track')\n elif request.json.has_key('album') and request.json.get('album'):\n album = request.json.get('album')\n elif request.json.has_key('artist') and request.json.get('artist'):\n artist = request.json.get('artist')\n else:\n spotify.resume(host['ip'])\n return jsonify({})\n spotify.compound_play(host['ip'], artist=artist, album=album, song=track)\n return jsonify({})\n except:\n abort(400)", "async def play(self, ctx, *, url): #joins the vc automatically if not in one and searchs the song using youtube_dl then plays it.\n\t\ttry: #it will try to play the song \n\t\t\tasync with ctx.typing():\n\t\t\t\tplayer = await YTDLSource.from_url(url, loop=self.bot.loop, stream=True)\n\t\t\t\tctx.voice_client.play(player, after=lambda e: print(f'Player error: {e}') if e else None)\n\n\t\t\tawait ctx.send(f'Now playing: {player.title}')\n\n\t\texcept: #if it cant play the song \n\t\t\tawait ctx.send(\"Something went wrong.\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Stop the current playing/paused song.
def stop_song(self): if self.isPlaying: self.playSong[0].stop() self.playSong.clear() self.isPlaying = False print("Music stopped") else: print("Play a song first...")
[ "async def stop(self):\n # Are we already stopped?\n if self._current_track is None:\n return\n \n await self._stop()\n \n # do not change `self._paused`\n self._paused_track = None\n self._current_track = None", "def stop(self):\r\n pygame.mixer.music.stop()\r\n self.musicPlaying = False\r\n self.musicPaused = False", "def stop(self):\n self.playing = False", "def stop(self):\r\n self._video_is_playing = False\r\n self._audio.stop()", "async def stop(self) -> None:\n if self._play_task is not None:\n # If we stop during a song, add it to the front of the queue to be resumed later\n if self._now_playing is not None:\n if self._play_start_time is not None:\n # Add the time spent playing this track to the starting offset, so it resumes\n # where it left off\n self._now_playing.offset += max(\n time.perf_counter() - self._play_start_time,\n 0.0\n )\n self._play_start_time = None\n self._queue.appendleft(self._now_playing)\n self._now_playing = None\n self._play_task.cancel()\n await self._play_task\n self._end_stream()", "def stop(self):\n if self.is_playing:\n self.is_playing = False\n self.tstamp_play = None\n self.thread.stop()", "def stop_soundtrack(self):\n SoundManager.remove_music(self.source, self)", "async def _stop(self, ctx: commands.Context):\n\n ctx.voice_state.songs.clear()\n\n if not ctx.voice_state.is_playing:\n ctx.voice_state.voice.stop()\n await ctx.message.add_reaction('⏹')", "async def stop(self):\n if len(self.misc_audio) > 0:\n self.misc_audio.popleft().player.stop()\n elif self.cur_player is not None:\n self.cur_player.stop()\n self.cur_player = None\n # await self.clean_up()", "def Music_Stop(self):\n pygame.mixer.music.stop()", "def stopMusic(self, music):\n pygame.mixer.music.stop()", "def stop():\n\n app._media_backend.stop_playing()\n return ''", "async def stop(self, ctx): #stops playing the music in vc (if there is any playing)\n\t\tawait ctx.voice_client.disconnect()", "def stop_sound(self):\n if self._audio:\n self._audio.kill()\n self._audio = None", "def unpause_music(self):\n\n self.my_pygame.mixer.music.unpause() # unpause the music after pause", "def unPause(self):\r\n if self.musicPlaying and self.musicPaused:\r\n pygame.mixer.music.unpause()\r\n self.musicPaused = False", "def stop(self):\n\n self.playing = False\n self.book.reset()\n\n self.status_light.action = 'on'\n\n with self.mpd_client:\n self.mpd_client.stop()\n self.mpd_client.clear()", "def stop(self):\n if logging.getLogger().getEffectiveLevel() != 10:\n try:\n self._player.terminate()\n except AttributeError as e: # Make things a bit more user friendly and allow a stop command even if not playing\n if str(e) == \"'Player' object has no attribute '_player'\":\n return\n else:\n raise AttributeError(str(e)) # Only catch the known error and raise any others to pass them through\n logging.debug(\"Stopping Playback\")", "def stop_audio(self):\n # LOCK\n with self._audio_lock: \n self._prompt_queue.clear()\n if self._channel:\n self._channel.stop()\n self._channel = None\n self._current_prompt = None\n # UNLOCK" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Pause the current playing song.
def pause_song(self): if self.isPlaying: self.playSong[0].pause() print("Song paused. To continue type Play.") else: print("Play a song first...")
[ "def pause(self):\n self.paused = True\n # FIXME?: Why is this not doing anything? Shouldn't it be calling into the player API?", "def pause_music(self):\n\n self.my_pygame.mixer.music.pause() # pause the music", "def pause(self):\r\n if self.musicPlaying:\r\n pygame.mixer.music.pause()\r\n self.musicPaused = True", "def pause(self):\n if not self.paused:\n pygame.mixer.music.pause()\n self.paused = True\n else:\n pygame.mixer.music.unpause()\n self.paused = False", "def pause_play(self):\n\n if self.estado == gst.STATE_PAUSED \\\n or self.estado == gst.STATE_NULL \\\n or self.estado == gst.STATE_READY:\n self.__play()\n\n elif self.estado == gst.STATE_PLAYING:\n self.__pause()", "def media_play_pause(self) -> None:\n if self.state == MediaPlayerState.PLAYING:\n self.media_pause()\n else:\n self.media_play()", "def pause(self):\n if self.status()['state'] == \"playing\":\n self.toggle_pause()", "def pause(self,event=None):\r\n # If pause -> pause or stop -> pause, ignore, or if no video\r\n if not self.isPlaying():\r\n return\r\n # If play -> pause\r\n self.progress = time.time() - self.startTimestamp\r\n if self.hasAudio:\r\n mixer.music.pause()\r\n self.state = VideoPlayer.State.PAUSED", "def _control_pause(self):\n self.player.pause()", "def media_pause(self):\n self.send_gpmdp_msg(\"playback\", \"playPause\", False)\n self._status = STATE_PAUSED\n self.schedule_update_ha_state()", "def media_pause(self) -> None:\n self._attr_state = MediaPlayerState.PAUSED\n self._client.pause()", "def __pause(self):\n\n self.set_state(gst.STATE_PAUSED)", "def pause_play(self):\n\n try:\n if self.entrada:\n if self.estado == \"playing\": # pausa\n self.__pause()\n\n elif self.estado == \"paused\":\n self.__pause(True)\n self.estado = \"playing\"\n self.emit(\"estado\", \"playing\")\n\n else:\n #if self.uri: self.load(self.uri)\n pass\n\n except Exception, e:\n print \"HA OCURRIDO UN ERROR EN PAUSE_PLAY DEL REPRODUCTOR\", e", "async def toggle_play_pause(self):\n _LOGGER.debug(\"[Foobar2k] In Play / Pause\")\n if (self._power == POWER_ON):\n if (self._state == STATE_STOPPED):\n await self.prep_fetch(HTTP_POST, POST_PLAYER_PLAY_PLAYLIST.format(self._current_playlist_id, self._current_index), data=None)\n else: \n await self.prep_fetch(HTTP_POST, POST_PLAYER_PAUSE_TOGGLE, data=None)", "def pausePlaying(self):\n # FIXME Perhaps pause-playing should actually use value 1 here?\n self.clock.callAfterMeasures(0, self.pause)", "def media_pause(self):\n self.send_key(Keys.pause)", "def do_some_play_pause(player):\n player.play_song(\"sample.mp3\")\n sleep(5)\n player.pause()\n sleep(3)\n player.resume()\n sleep(5)\n player.stop()\n sleep(2)\n player.play()\n sleep(2)\n player.mute()\n sleep(1)\n player.unmute()\n sleep(20)\n player.quit()", "def pause(self, what):\n if what:\n self.session.pause()\n self.status = 'paused'\n else:\n self.session.resume()\n self.status = 'running'", "def play(self):\n self.paused = False\n event = self._paused_event\n self._paused_event = None\n self._advance(self.last, self.schedule, event=event)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }