diff --git a/openet/core/common.py b/openet/core/common.py index e2f68c2..07494bd 100644 --- a/openet/core/common.py +++ b/openet/core/common.py @@ -78,10 +78,12 @@ def landsat_c1_toa_cloud_mask( .Or(qa_img.rightShift(7).bitwiseAnd(3).gte(shadow_confidence)) if snow_flag: cloud_mask = cloud_mask.Or( - qa_img.rightShift(9).bitwiseAnd(3).gte(snow_confidence)) + qa_img.rightShift(9).bitwiseAnd(3).gte(snow_confidence) + ) if cirrus_flag: cloud_mask = cloud_mask.Or( - qa_img.rightShift(11).bitwiseAnd(3).gte(cirrus_confidence)) + qa_img.rightShift(11).bitwiseAnd(3).gte(cirrus_confidence) + ) # Set cloudy pixels to 0 and clear to 1 return cloud_mask.Not() diff --git a/openet/core/interpolate.py b/openet/core/interpolate.py index 0af8795..1a05b72 100644 --- a/openet/core/interpolate.py +++ b/openet/core/interpolate.py @@ -180,10 +180,8 @@ def _linear(image): # The closest image in time should be on "top" # CGM - Is the previous collection already sorted? # prev_qm_img = prev_qm_coll.mosaic() - prev_qm_img = prev_qm_coll.sort('system:time_start', True)\ - .mosaic() - next_qm_img = next_qm_coll.sort('system:time_start', False)\ - .mosaic() + prev_qm_img = prev_qm_coll.sort('system:time_start', True).mosaic() + next_qm_img = next_qm_coll.sort('system:time_start', False).mosaic() # DEADBEEF - It might be easier to interpolate all bands instead of # separating the value and time bands @@ -247,8 +245,7 @@ def _linear(image): # @deprecated -def aggregate_daily(image_coll, start_date=None, end_date=None, - agg_type='mean'): +def aggregate_daily(image_coll, start_date=None, end_date=None, agg_type='mean'): return aggregate_to_daily(image_coll, start_date, end_date, agg_type) @@ -401,8 +398,7 @@ def from_scene_et_fraction(scene_coll, start_date, end_date, variables, if t_interval.lower() not in ['daily', 'monthly', 'annual', 'custom']: raise ValueError('unsupported t_interval: {}'.format(t_interval)) elif interp_method.lower() not in ['linear']: - raise ValueError('unsupported interp_method: {}'.format( - interp_method)) + raise ValueError('unsupported interp_method: {}'.format(interp_method)) if ((type(interp_days) is str or type(interp_days) is float) and utils.is_number(interp_days)): @@ -472,8 +468,8 @@ def from_scene_et_fraction(scene_coll, start_date, end_date, variables, # # raise ValueError('et_reference_resample was not set') if type(et_reference_source) is str: - # Assume a string source is an single image collection ID - # not an list of collection IDs or ee.ImageCollection + # Assume a string source is a single image collection ID + # not a list of collection IDs or ee.ImageCollection daily_et_ref_coll = ee.ImageCollection(et_reference_source) \ .filterDate(start_date, end_date) \ .select([et_reference_band], ['et_reference']) @@ -488,7 +484,7 @@ def from_scene_et_fraction(scene_coll, start_date, end_date, variables, # Scale reference ET images (if necessary) # CGM - Resampling is not working correctly so not including for now - if (et_reference_factor and et_reference_factor != 1): + if et_reference_factor and et_reference_factor != 1: def et_reference_adjust(input_img): return input_img.multiply(et_reference_factor) \ .copyProperties(input_img) \ @@ -521,8 +517,9 @@ def et_reference_adjust(input_img): # For count, compute the composite/mosaic image for the mask band only if 'count' in variables: aggregate_coll = aggregate_to_daily( - image_coll = scene_coll.select(['mask']), - start_date=start_date, end_date=end_date) + image_coll=scene_coll.select(['mask']), + start_date=start_date, end_date=end_date + ) # The following is needed because the aggregate collection can be # empty if there are no scenes in the target date range but there @@ -531,20 +528,22 @@ def et_reference_adjust(input_img): # bands will be which causes a non-homogeneous image collection. aggregate_coll = aggregate_coll.merge( ee.Image.constant(0).rename(['mask']) - .set({'system:time_start': ee.Date(start_date).millis()})) + .set({'system:time_start': ee.Date(start_date).millis()}) + ) # Interpolate to a daily time step daily_coll = daily( target_coll=daily_et_ref_coll, source_coll=scene_coll.select(interp_vars), - interp_method=interp_method, interp_days=interp_days, + interp_method=interp_method, + interp_days=interp_days, use_joins=use_joins, compute_product=False, ) # The interpolate.daily() function can/will return the product of # the source and target image named as "{source_band}_1". - # The problem with this approach is that is will drop any other bands + # The problem with this approach is that it will drop any other bands # that are being interpolated (such as the ndvi). # daily_coll = daily_coll.select(['et_fraction_1'], ['et']) @@ -554,8 +553,7 @@ def et_reference_adjust(input_img): # if 'et' in variables or 'et_fraction' in variables: def compute_et(img): """This function assumes ETr and ETf are present""" - et_img = img.select(['et_fraction']) \ - .multiply(img.select(['et_reference'])) + et_img = img.select(['et_fraction']).multiply(img.select(['et_reference'])) return img.addBands(et_img.double().rename('et')) daily_coll = daily_coll.map(compute_et) @@ -598,8 +596,8 @@ def aggregate_image(agg_start_date, agg_end_date, date_format): if 'et_fraction' in variables: # Compute average et fraction over the aggregation period image_list.append( - et_img.divide(et_reference_img).rename( - ['et_fraction']).float()) + et_img.divide(et_reference_img).rename(['et_fraction']).float() + ) if 'ndvi' in variables: # Compute average ndvi over the aggregation period ndvi_img = daily_coll \ @@ -629,7 +627,8 @@ def agg_daily(daily_img): return aggregate_image( agg_start_date=agg_start_date, agg_end_date=ee.Date(agg_start_date).advance(1, 'day'), - date_format='YYYYMMdd') + date_format='YYYYMMdd', + ) return ee.ImageCollection(daily_coll.map(agg_daily)) @@ -647,7 +646,8 @@ def agg_monthly(agg_start_date): return aggregate_image( agg_start_date=agg_start_date, agg_end_date=ee.Date(agg_start_date).advance(1, 'month'), - date_format='YYYYMM') + date_format='YYYYMM', + ) return ee.ImageCollection(month_list.map(agg_monthly)) @@ -664,27 +664,32 @@ def agg_annual(agg_start_date): return aggregate_image( agg_start_date=agg_start_date, agg_end_date=ee.Date(agg_start_date).advance(1, 'year'), - date_format='YYYY') + date_format='YYYY', + ) return ee.ImageCollection(year_list.map(agg_annual)) elif t_interval.lower() == 'custom': # Returning an ImageCollection to be consistent return ee.ImageCollection(aggregate_image( - agg_start_date=start_date, agg_end_date=end_date, - date_format='YYYYMMdd')) + agg_start_date=start_date, + agg_end_date=end_date, + date_format='YYYYMMdd', + )) def from_scene_et_actual(scene_coll, start_date, end_date, variables, interp_args, model_args, t_interval, - use_joins=False, + use_joins=False, et_band_name='et', ): """Interpolate from a precomputed collection of Landsat actual ET scenes Parameters ---------- scene_coll : ee.ImageCollection - Non-daily 'et' images that will be interpolated. + Non-daily actual ET source images that will be interpolated from. + If the band name is different than 'et', it will need to be set using + the 'et_band_name' function parameter. start_date : str ISO format start date. end_date : str @@ -719,6 +724,8 @@ def from_scene_et_actual(scene_coll, start_date, end_date, variables, If True, use joins to link the target and source collections. If False, the source collection will be filtered for each target image. This parameter is passed through to interpolate.daily(). + et_band_name : str, optional + The actual ET band name. The default is "et". Returns ------- @@ -760,8 +767,7 @@ def from_scene_et_actual(scene_coll, start_date, end_date, variables, if t_interval.lower() not in ['daily', 'monthly', 'annual', 'custom']: raise ValueError('unsupported t_interval: {}'.format(t_interval)) elif interp_method.lower() not in ['linear']: - raise ValueError('unsupported interp_method: {}'.format( - interp_method)) + raise ValueError('unsupported interp_method: {}'.format(interp_method)) if ((type(interp_days) is str or type(interp_days) is float) and utils.is_number(interp_days)): @@ -825,8 +831,8 @@ def from_scene_et_actual(scene_coll, start_date, end_date, variables, # 'et_reference_resample was not set, default to nearest') # # raise ValueError('et_reference_resample was not set') - # Assume a string source is an single image collection ID - # not an list of collection IDs or ee.ImageCollection + # Assume a string source is a single image collection ID + # not a list of collection IDs or ee.ImageCollection daily_et_ref_coll_id = model_args['et_reference_source'] daily_et_ref_coll = ee.ImageCollection(daily_et_ref_coll_id) \ .filterDate(start_date, end_date) \ @@ -834,7 +840,7 @@ def from_scene_et_actual(scene_coll, start_date, end_date, variables, # Scale reference ET images (if necessary) # CGM - Resampling is not working correctly so not including for now - if (et_reference_factor and et_reference_factor != 1): + if et_reference_factor and et_reference_factor != 1: def et_reference_adjust(input_img): return input_img.multiply(et_reference_factor) \ .copyProperties(input_img) \ @@ -869,13 +875,13 @@ def et_reference_adjust(input_img): .filterDate(interp_start_date, interp_end_date) \ .select([interp_args['interp_band']]) - interp_vars = ['et'] + ['mask', 'time'] - # For count, compute the composite/mosaic image for the mask band only if 'count' in variables: aggregate_coll = aggregate_to_daily( image_coll=scene_coll.select(['mask']), - start_date=start_date, end_date=end_date) + start_date=start_date, + end_date=end_date, + ) # The following is needed because the aggregate collection can be # empty if there are no scenes in the target date range but there # are scenes in the interpolation date range. @@ -883,21 +889,23 @@ def et_reference_adjust(input_img): # bands will be which causes a non-homogeneous image collection. aggregate_coll = aggregate_coll.merge( ee.Image.constant(0).rename(['mask']) - .set({'system:time_start': ee.Date(start_date).millis()})) + .set({'system:time_start': ee.Date(start_date).millis()}) + ) # It might be more efficient to join the target collection to the scenes def normalize_et(img): img_date = ee.Date(img.get('system:time_start')) \ .update(hour=0, minute=0, second=0) img_date = ee.Date(img_date.millis().divide(1000).floor().multiply(1000)) - target_img = ee.Image(daily_target_coll \ - .filterDate(img_date, img_date.advance(1, 'day')).first()) + target_img = ee.Image( + daily_target_coll.filterDate(img_date, img_date.advance(1, 'day')).first() + ) # CGM - This is causing weird artifacts in the output images # if interp_args['interp_resample'].lower() in ['bilinear', 'bicubic']: # target_img = target_img.resample(interp_args['interp_resample']) - et_norm_img = img.select(['et']).divide(target_img).rename(['et_norm']) + et_norm_img = img.select([et_band_name]).divide(target_img).rename(['et_norm']) # Clamp the normalized ET image (et_fraction) if 'et_fraction_max' in interp_args.keys(): @@ -910,13 +918,12 @@ def normalize_et(img): # float(interp_args['et_fraction_min']), # float(interp_args['et_fraction_max'])) - return img.addBands([ - et_norm_img.double(), target_img.rename(['norm'])]) + return img.addBands([et_norm_img.double(), target_img.rename(['norm'])]) # The time band is always needed for interpolation scene_coll = scene_coll \ .filterDate(interp_start_date, interp_end_date) \ - .select(interp_vars) \ + .select([et_band_name, 'mask', 'time']) \ .map(normalize_et) # # Join the target (normalization) image to the scene images @@ -939,7 +946,8 @@ def normalize_et(img): daily_coll = daily( target_coll=daily_target_coll.filterDate(start_date, end_date), source_coll=scene_coll.select(['et_norm', 'time']), - interp_method=interp_method, interp_days=interp_days, + interp_method=interp_method, + interp_days=interp_days, use_joins=use_joins, compute_product=True, ) @@ -1001,8 +1009,8 @@ def aggregate_image(agg_start_date, agg_end_date, date_format): if 'et_fraction' in variables: # Compute average et fraction over the aggregation period image_list.append( - et_img.divide(et_reference_img) - .rename(['et_fraction']).float()) + et_img.divide(et_reference_img).rename(['et_fraction']).float() + ) # if 'ndvi' in variables: # # Compute average ndvi over the aggregation period # ndvi_img = daily_coll \ @@ -1032,7 +1040,8 @@ def agg_daily(daily_img): return aggregate_image( agg_start_date=agg_start_date, agg_end_date=ee.Date(agg_start_date).advance(1, 'day'), - date_format='YYYYMMdd') + date_format='YYYYMMdd', + ) return ee.ImageCollection(daily_coll.map(agg_daily)) @@ -1050,7 +1059,8 @@ def agg_monthly(agg_start_date): return aggregate_image( agg_start_date=agg_start_date, agg_end_date=ee.Date(agg_start_date).advance(1, 'month'), - date_format='YYYYMM') + date_format='YYYYMM', + ) return ee.ImageCollection(month_list.map(agg_monthly)) @@ -1067,15 +1077,18 @@ def agg_annual(agg_start_date): return aggregate_image( agg_start_date=agg_start_date, agg_end_date=ee.Date(agg_start_date).advance(1, 'year'), - date_format='YYYY') + date_format='YYYY', + ) return ee.ImageCollection(year_list.map(agg_annual)) elif t_interval.lower() == 'custom': # Returning an ImageCollection to be consistent return ee.ImageCollection(aggregate_image( - agg_start_date=start_date, agg_end_date=end_date, - date_format='YYYYMMdd')) + agg_start_date=start_date, + agg_end_date=end_date, + date_format='YYYYMMdd', + )) # @deprecated diff --git a/openet/core/tests/test_interpolate.py b/openet/core/tests/test_interpolate.py index bd8ae93..9836bab 100644 --- a/openet/core/tests/test_interpolate.py +++ b/openet/core/tests/test_interpolate.py @@ -23,6 +23,7 @@ def tgt_image(tgt_value, tgt_time): 'system:index': datetime.datetime.utcfromtimestamp( tgt_time / 1000.0).strftime('%Y%m%d')}) + def src_images(src_values, src_times): """Build constant source images from values and times""" src_images = [] @@ -165,8 +166,7 @@ def test_daily_collection(tgt_value, tgt_time, src_values, src_times, @pytest.mark.parametrize( "tgt_value, tgt_time, src_values, src_times, expected", [ - [10, 1439704800000, [0.0, 1.6], [1439660268614, 1441042674222], - 0.1], + [10, 1439704800000, [0.0, 1.6], [1439660268614, 1441042674222], 0.1], ] ) def test_daily_compute_product_true(tgt_value, tgt_time, src_values, src_times, @@ -725,6 +725,37 @@ def test_from_scene_et_actual_daily_et_fraction_max(tol=0.0001): assert abs(output['et_fraction']['2017-07-10'] - 1.4) <= tol +def test_from_scene_et_actual_daily_custom_et_band_name(tol=0.0001): + output_coll = interpolate.from_scene_et_actual( + scene_coll(['et', 'time', 'mask']) + .select(['et', 'time', 'mask'], ['et_actual', 'time', 'mask']), + start_date='2017-07-01', end_date='2017-08-01', + variables=['et', 'et_reference', 'et_fraction'], + interp_args={'interp_method': 'linear', 'interp_days': 32, + 'interp_source': 'IDAHO_EPSCOR/GRIDMET', + 'interp_band': 'etr', + 'interp_resample': 'nearest', + }, + model_args={'et_reference_source': 'IDAHO_EPSCOR/GRIDMET', + 'et_reference_band': 'etr', + 'et_reference_resample': 'nearest', + 'et_reference_factor': 1.0, + }, + t_interval='daily', + et_band_name='et_actual', + ) + + TEST_POINT = (-121.5265, 38.7399) + output = utils.point_coll_value(output_coll, TEST_POINT, scale=10) + assert abs(output['et_fraction']['2017-07-10'] - 0.449444979429245) <= tol + assert abs(output['et_reference']['2017-07-10'] - 10.5) <= tol + assert abs(output['et']['2017-07-10'] - 4.71917200088501) <= tol + assert abs(output['et']['2017-07-01'] - 3.6936933994293213) <= tol + assert abs(output['et']['2017-07-31'] - 4.951923370361328) <= tol + assert '2017-08-01' not in output['et'].keys() + # assert output['count']['2017-07-01'] == 3 + + def test_from_scene_et_fraction_t_interval_bad_value(): # Function should raise a ValueError if t_interval is not supported with pytest.raises(ValueError): diff --git a/openet/core/tests/test_utils.py b/openet/core/tests/test_utils.py index 496b55b..8d59af3 100644 --- a/openet/core/tests/test_utils.py +++ b/openet/core/tests/test_utils.py @@ -31,7 +31,8 @@ def test_date_0utc(date='2015-07-13'): def test_date_range_type(): output = utils.date_range( - datetime.datetime(2020, 1, 1), datetime.datetime(2020, 1, 3)) + datetime.datetime(2020, 1, 1), datetime.datetime(2020, 1, 3) + ) assert isinstance(output, types.GeneratorType) @@ -208,8 +209,7 @@ def test_constant_image_value_multiband(tol=0.000001): def test_constant_image_value_multiband_bands(tol=0.000001): """Test that the band names are carried through on a multiband image""" expected = 10.123456789 - input_img = ee.Image.constant([expected, expected + 1])\ - .rename(['foo', 'bar']) + input_img = ee.Image.constant([expected, expected + 1]).rename(['foo', 'bar']) output = utils.constant_image_value(input_img) assert abs(output['foo'] - expected) <= tol assert abs(output['bar'] - (expected + 1)) <= tol @@ -217,13 +217,13 @@ def test_constant_image_value_multiband_bands(tol=0.000001): def test_point_image_value(tol=0.001): expected = 2364.351 - output = utils.point_image_value( - ee.Image('USGS/NED'), [-106.03249, 37.17777]) + output = utils.point_image_value(ee.Image('USGS/NED'), [-106.03249, 37.17777]) assert abs(output['elevation'] - expected) <= tol def test_point_coll_value(tol=0.001): expected = 2364.351 output = utils.point_coll_value( - ee.ImageCollection([ee.Image('USGS/NED')]), [-106.03249, 37.17777]) + ee.ImageCollection([ee.Image('USGS/NED')]), [-106.03249, 37.17777] + ) assert abs(output['elevation']['2012-04-04'] - expected) <= tol