# -*- coding: utf-8 -*-# vim: tabstop=4 shiftwidth=4 softtabstop=4## Copyright (C) 2014-2023 GEM Foundation## OpenQuake is free software: you can redistribute it and/or modify it# under the terms of the GNU Affero General Public License as published# by the Free Software Foundation, either version 3 of the License, or# (at your option) any later version.## OpenQuake is distributed in the hope that it will be useful,# but WITHOUT ANY WARRANTY; without even the implied warranty of# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the# GNU Affero General Public License for more details.## You should have received a copy of the GNU Affero General Public License# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.importosimportreimportastimportsysimportjsonimportinspectimportloggingimportfunctoolsimportcollectionsimportnumpyimportpandasimportitertoolsfromopenquake.baselibimport__version__,hdf5,python3compat,configfromopenquake.baselib.parallelimportStarmapfromopenquake.baselib.generalimportDictArray,AccumDict,cached_propertyfromopenquake.hazardlib.imtimportfrom_string,sort_by_imtfromopenquake.hazardlibimportshakemapfromopenquake.hazardlibimportcorrelation,cross_correlation,stats,calcfromopenquake.hazardlibimportvalid,InvalidFile,sitefromopenquake.sep.classesimportSecondaryPerilfromopenquake.hazardlib.gsim_ltimportGsimLogicTreefromopenquake.risklibimportasset,scientificfromopenquake.risklib.riskmodelsimportget_risk_files__doc__="""\Full list of configuration parameters=====================================Engine Version: %sSome parameters have a default that it is used when the parameter isnot specified in the job.ini file. Some other parameters have no default,which means that not specifying them will raise an error when runninga calculation for which they are required.override_vs30: Optional Vs30 parameter to override the site model Vs30 Example: *override_vs30 = 800* Default: Noneaggregate_by: Used to compute aggregate losses and aggregate loss curves in risk calculations. Takes in input one or more exposure tags. Example: *aggregate_by = region, taxonomy*. Default: empty listaggregate_loss_curves_types: Used for event-based risk and damage calculations, to estimate the aggregated loss Exceedance Probability (EP) only or to also calculate (if possible) the Occurrence Exceedance Probability (OEP) and/or the Aggregate Exceedance Probability (AEP). Example: *aggregate_loss_curves_types = aep, oep*. Default: epreaggregate_by: Used to perform additional aggregations in risk calculations. Takes in input a proper subset of the tags in the aggregate_by option. Example: *reaggregate_by = region*. Default: empty listamplification_method: Used in classical PSHA calculations to amplify the hazard curves with the convolution or kernel method. Example: *amplification_method = kernel*. Default: "convolution"asce_version: ASCE version used in AELO mode. Example: *asce_version = asce7-22*. Default: "asce7-16"area_source_discretization: Discretization parameters (in km) for area sources. Example: *area_source_discretization = 10*. Default: 10ash_wet_amplification_factor: Used in volcanic risk calculations. Example: *ash_wet_amplification_factor=1.0*. Default: 1.0asset_correlation: Used in risk calculations to take into account asset correlation. Accepts only the values 1 (full correlation) and 0 (no correlation). Example: *asset_correlation=1*. Default: 0asset_hazard_distance: In km, used in risk calculations to print a warning when there are assets too distant from the hazard sites. In multi_risk calculations can be a dictionary: asset_hazard_distance = {'ASH': 50, 'LAVA': 10, ...} Example: *asset_hazard_distance = 5*. Default: 15asset_life_expectancy: Used in the classical_bcr calculator. Example: *asset_life_expectancy = 50*. Default: no defaultassets_per_site_limit: INTERNALgmf_max_gb: If the size (in GB) of the GMFs is below this value, then compute avg_gmf Example: *gmf_max_gb = 1.* Default: 0.1avg_losses: Used in risk calculations to compute average losses. Example: *avg_losses=false*. Default: Truebase_path: INTERNALcachedir: INTERNALcache_distances: Useful in UCERF calculations. Example: *cache_distances = true*. Default: Falsecalculation_mode: One of classical, disaggregation, event_based, scenario, scenario_risk, scenario_damage, event_based_risk, classical_risk, classical_bcr. Example: *calculation_mode=classical* Default: no defaultcollapse_gsim_logic_tree: INTERNALcollapse_level: INTERNALcollect_rlzs: Collect all realizations into a single effective realization. If not given it is true for sampling and false for full enumeration. Example: *collect_rlzs=true*. Default: Nonecorrelation_cutoff: Used in conditioned GMF calculation to avoid small negative eigenvalues wreaking havoc with the numerics Example: *correlation_cutoff = 1E-11* Default: 1E-12compare_with_classical: Used in event based calculation to perform also a classical calculation, so that the hazard curves can be compared. Example: *compare_with_classical = true*. Default: Falsecomplex_fault_mesh_spacing: In km, used to discretize complex faults. Example: *complex_fault_mesh_spacing = 15*. Default: 5concurrent_tasks: A hint to the engine for the number of tasks to generate. Do not set it unless you know what you are doing. Example: *concurrent_tasks = 100*. Default: twice the number of coresconditional_loss_poes: Used in classical_risk calculations to compute loss curves. Example: *conditional_loss_poes = 0.01 0.02*. Default: empty listcholesky_limit: When generating the GMFs from a ShakeMap the engine needs to perform a Cholesky decomposition of a matrix of size (M x N)^2, being M the number of intensity measure types and N the number of sites. The decomposition can become ultra-slow, run out of memory, or produce bogus negative eigenvalues, therefore there is a limit on the maximum size of M x N. Example: *cholesky_limit = 1000*. Default: 10,000continuous_fragility_discretization: Used when discretizing continuuos fragility functions. Example: *continuous_fragility_discretization = 10*. Default: 20coordinate_bin_width: Used in disaggregation calculations. Example: *coordinate_bin_width = 1.0*. Default: 100 degrees, meaning don't disaggregate by lon, latcountries: Used to restrict the exposure to a single country in Aristotle mode. Example: *countries = ITA*. Default: ()cross_correlation: When used in Conditional Spectrum calculation is the name of a cross correlation class (i.e. "BakerJayaram2008"). When used in ShakeMap calculations the valid choices are "yes", "no" "full", same as for *spatial_correlation*. Example: *cross_correlation = no*. Default: "yes"description: A string describing the calculation. Example: *description = Test calculation*. Default: "no description"disagg_bin_edges: A dictionary where the keys can be: mag, dist, lon, lat, eps and the values are lists of floats indicating the edges of the bins used to perform the disaggregation. Example: *disagg_bin_edges = {'mag': [5.0, 5.5, 6.0, 6.5]}*. Default: empty dictionarydisagg_by_src: Flag used to enable disaggregation by source when possible. Example: *disagg_by_src = true*. Default: Falsedisagg_outputs: Used in disaggregation calculations to restrict the number of exported outputs. Example: *disagg_outputs = Mag_Dist* Default: list of all possible outputsdiscard_assets: Flag used in risk calculations to discard assets from the exposure. Example: *discard_assets = true*. Default: Falsediscard_trts: Used to discard tectonic region types that do not contribute to the hazard. Example: *discard_trts = Volcanic*. Default: empty listdiscrete_damage_distribution: Make sure the damage distribution contain only integers (require the "number" field in the exposure to be integer). Example: *discrete_damage_distribution = true* Default: Falsedistance_bin_width: In km, used in disaggregation calculations to specify the distance bins. Example: *distance_bin_width = 20*. Default: no defaultebrisk_maxsize: INTERNALepsilon_star: A boolean controlling the typology of disaggregation output to be provided. When True disaggregation is perfomed in terms of epsilon* rather then epsilon (see Bazzurro and Cornell, 1999) Example: *epsilon_star = true* Default: Falseextreme_gmv: A scalar on an IMT-keyed dictionary specifying when a ground motion value is extreme and the engine has to treat is specially. Example: *extreme_gmv = 5.0* Default: {'default': numpy.inf} i.e. no values are extremefloating_x_step: Float, used in rupture generation for kite faults. indicates the fraction of fault length used to float ruptures along strike by the given float (i.e. "0.5" floats the ruptures at half the rupture length). Uniform distribution of the ruptures is maintained, such that if the mesh spacing and rupture dimensions prohibit the defined overlap fraction, the fraction is increased until uniform distribution is achieved. The minimum possible value depends on the rupture dimensions and the mesh spacing. If 0, standard rupture floating is used along-strike (i.e. no mesh nodes are skipped). Example: *floating_x_step = 0.5* Default: 0floating_y_step: Float, used in rupture generation for kite faults. indicates the fraction of fault width used to float ruptures down dip. (i.e. "0.5" floats that half the rupture length). Uniform distribution of the ruptures is maintained, such that if the mesh spacing and rupture dimensions prohibit the defined overlap fraction, the fraction is increased until uniform distribution is achieved. The minimum possible value depends on the rupture dimensions and the mesh spacing. If 0, standard rupture floating is used along-strike (i.e. no mesh nodes on the rupture dimensions and the mesh spacing. Example: *floating_y_step = 0.5* Default: 0ignore_encoding_errors: If set, skip characters with non-UTF8 encoding Example: *ignore_encoding_errors = true*. Default: Falseignore_master_seed: If set, estimate analytically the uncertainty on the losses due to the uncertainty on the vulnerability functions. Example: *ignore_master_seed = vulnerability*. Default: Noneexport_dir: Set the export directory. Example: *export_dir = /tmp*. Default: the current directory, "."exports: Specify what kind of outputs to export by default. Example: *exports = csv, rst*. Default: empty listground_motion_correlation_model: Enable ground motion correlation. Example: *ground_motion_correlation_model = JB2009*. Default: Noneground_motion_correlation_params: To be used together with ground_motion_correlation_model. Example: *ground_motion_correlation_params = {"vs30_clustering": False}*. Default: empty dictionaryground_motion_fields: Flag to turn on/off the calculation of ground motion fields. Example: *ground_motion_fields = false*. Default: Truegsim: Used to specify a GSIM in scenario or event based calculations. Example: *gsim = BooreAtkinson2008*. Default: "[FromFile]"hazard_calculation_id: Used to specify a previous calculation from which the hazard is read. Example: *hazard_calculation_id = 42*. Default: Nonehazard_curves_from_gmfs: Used in scenario/event based calculations. If set, generates hazard curves from the ground motion fields. Example: *hazard_curves_from_gmfs = true*. Default: Falsehazard_maps: Set it to true to export the hazard maps. Example: *hazard_maps = true*. Default: Falsehoriz_comp_to_geom_mean: Apply the correction to the geometric mean when possible, depending on the GMPE and the Intensity Measure Component Example: *horiz_comp_to_geom_mean = true*. Default: Falseignore_covs: Used in risk calculations to set all the coefficients of variation of the vulnerability functions to zero. Example *ignore_covs = true* Default: Falseignore_missing_costs: Accepts exposures with missing costs (by discarding such assets). Example: *ignore_missing_costs = nonstructural, business_interruption*. Default: Falseiml_disagg: Used in disaggregation calculations to specify an intensity measure type and level. Example: *iml_disagg = {'PGA': 0.02}*. Default: no defaultimt_ref: Reference intensity measure type used to compute the conditional spectrum. The imt_ref must belong to the list of IMTs of the calculation. Example: *imt_ref = SA(0.15)*. Default: empty stringindividual_rlzs: When set, store the individual hazard curves and/or individual risk curves for each realization. Example: *individual_rlzs = true*. Default: Falseindividual_curves: Legacy name for `individual_rlzs`, it should not be used. Example: *individual_curves = true*. Default: Falseinfer_occur_rates: If set infer the occurrence rates from the first probs_occur in nonparametric sources. Example: *infer_occur_rates = true* Default: Falseinfrastructure_connectivity_analysis: If set, run the infrastructure connectivity analysis. Example: *infrastructure_connectivity_analysis = true* Default: Falseinputs: INTERNAL. Dictionary with the input files paths.intensity_measure_types: List of intensity measure types in an event based calculation. Example: *intensity_measure_types = PGA SA(0.1)*. Default: empty listintensity_measure_types_and_levels: List of intensity measure types and levels in a classical calculation. Example: *intensity_measure_types_and_levels={"PGA": logscale(0.1, 1, 20)}*. Default: empty dictionaryinterest_rate: Used in classical_bcr calculations. Example: *interest_rate = 0.05*. Default: no defaultinvestigation_time: Hazard investigation time in years, used in classical and event based calculations. Example: *investigation_time = 50*. Default: no defaultjob_id: ID of a job in the database Example: *job_id = 42*. Default: 0 (meaning create a new job)limit_states: Limit states used in damage calculations. Example: *limit_states = moderate, complete* Default: no defaultlocal_timestamp: Timestamp that includes both the date, time and the time zone information Example: 2023-02-06 04:17:34+03:00 Default: Nonelrem_steps_per_interval: Used in the vulnerability functions. Example: *lrem_steps_per_interval = 1*. Default: 0mag_bin_width: Width of the magnitude bin used in disaggregation calculations. Example: *mag_bin_width = 0.5*. Default: 1.master_seed: Seed used to control the generation of the epsilons, relevant for risk calculations with vulnerability functions with nonzero coefficients of variation. Example: *master_seed = 1234*. Default: 123456789max: Compute the maximum across realizations. Akin to mean and quantiles. Example: *max = true*. Default: Falsemax_aggregations: Maximum number of aggregation keys. Example: *max_aggregations = 200_000* Default: 100_000max_blocks: INTERNAL. Used in classical calculationsmax_data_transfer: INTERNAL. Restrict the maximum data transfer in disaggregation calculations.max_gmvs_chunk: Maximum number of rows of the gmf_data table per task. Example: *max_gmvs_chunk = 200_000* Default: 100_000max_potential_gmfs: Restrict the product *num_sites * num_events*. Example: *max_potential_gmfs = 1E9*. Default: 2E11max_potential_paths: Restrict the maximum number of realizations. Example: *max_potential_paths = 200*. Default: 15_000max_sites_disagg: Maximum number of sites for which to store rupture information. In disaggregation calculations with many sites you may be forced to raise *max_sites_disagg*, that must be greater or equal to the number of sites. Example: *max_sites_disagg = 100* Default: 10max_weight: INTERNALmaximum_distance: Integration distance. Can be give as a scalar, as a dictionary TRT -> scalar or as dictionary TRT -> [(mag, dist), ...] Example: *maximum_distance = 200*. Default: no defaultmaximum_distance_stations: Applies only to scenario calculations with conditioned GMFs to discard stations. Example: *maximum_distance_stations = 100*. Default: Nonemean: Flag to enable/disable the calculation of mean curves. Example: *mean = false*. Default: Trueminimum_asset_loss: Used in risk calculations. If set, losses smaller than the *minimum_asset_loss* are consider zeros. Example: *minimum_asset_loss = {"structural": 1000}*. Default: empty dictionaryminimum_distance: If set, distances below the minimum are rounded up. Example: *minimum_distance = 5* Default: 0minimum_intensity: If set, ground motion values below the *minimum_intensity* are considered zeros. Example: *minimum_intensity = {'PGA': .01}*. Default: empty dictionaryminimum_magnitude: If set, ruptures below the *minimum_magnitude* are discarded. Example: *minimum_magnitude = 5.0*. Default: 0modal_damage_state: Used in scenario_damage calculations to export only the damage state with the highest probability. Example: *modal_damage_state = true*. Default: falsemosaic_model: Used to restrict the ruptures to a given model Example: *mosaic_model = ZAF* Default: empty stringnum_epsilon_bins: Number of epsilon bins in disaggregation calculations. Example: *num_epsilon_bins = 3*. Default: 1num_rlzs_disagg: Used in disaggregation calculation to specify how many outputs will be generated. `0` means all realizations, `n` means the n closest to the mean hazard curve. Example: *num_rlzs_disagg=1*. Default: 0number_of_ground_motion_fields: Used in scenario calculations to specify how many random ground motion fields to generate. Example: *number_of_ground_motion_fields = 100*. Default: no defaultnumber_of_logic_tree_samples: Used to specify the number of realizations to generate when using logic tree sampling. If zero, full enumeration is performed. Example: *number_of_logic_tree_samples = 0*. Default: 0oversampling: When equal to "forbid" raise an error if tot_samples > num_paths in classical calculations; when equal to "tolerate" do not raise the error (the default). Example: *oversampling = forbid* Default: toleratepoes: Probabilities of Exceedance used to specify the hazard maps or hazard spectra to compute. Example: *poes = 0.01 0.02*. Default: empty listpoes_disagg: Alias for poes.pointsource_distance: Used in classical calculations to collapse the point sources. Can also be used in conjunction with *ps_grid_spacing*. Example: *pointsource_distance = 50*. Default: {'default': 100}postproc_func: Specify a postprocessing function in calculators/postproc. Example: *postproc_func = compute_mrd.main* Default: 'dummy.main' (no postprocessing)postproc_args: Specify the arguments to be passed to the postprocessing function Example: *postproc_args = {'imt': 'PGA'}* Default: {} (no arguments)prefer_global_site_params: INTERNAL. Automatically set by the engine.ps_grid_spacing: Used in classical calculations to grid the point sources. Requires the *pointsource_distance* to be set too. Example: *ps_grid_spacing = 50*. Default: 0, meaning no gridquantiles: List of probabilities used to compute the quantiles across realizations. Example: quantiles = 0.15 0.50 0.85 Default: empty listrandom_seed: Seed used in the sampling of the logic tree. Example: *random_seed = 1234*. Default: 42reference_backarc: Used when there is no site model to specify a global backarc parameter, used in some GMPEs. Can be True or False Example: *reference_backarc = true*. Default: Falsereference_depth_to_1pt0km_per_sec: Used when there is no site model to specify a global z1pt0 parameter, used in some GMPEs. Example: *reference_depth_to_1pt0km_per_sec = 100*. Default: no defaultreference_depth_to_2pt5km_per_sec: Used when there is no site model to specify a global z2pt5 parameter, used in some GMPEs. Example: *reference_depth_to_2pt5km_per_sec = 5*. Default: no defaultreference_vs30_type: Used when there is no site model to specify a global vs30 type. The choices are "inferred" or "measured" Example: *reference_vs30_type = measured"*. Default: "inferred"reference_vs30_value: Used when there is no site model to specify a global vs30 value. Example: *reference_vs30_value = 760*. Default: no defaultregion: A list of lon/lat pairs used to specify a region of interest Example: *region = 10.0 43.0, 12.0 43.0, 12.0 46.0, 10.0 46.0* Default: Noneregion_grid_spacing: Used together with the *region* option to generate the hazard sites. Example: *region_grid_spacing = 10*. Default: Nonereturn_periods: Used in the computation of the loss curves. Example: *return_periods = 200 500 1000*. Default: empty list.reqv_ignore_sources: Used when some sources in a TRT that uses the equivalent distance term should not be collapsed. Example: *reqv_ignore_sources = src1 src2 src3* Default: empty listrisk_imtls: INTERNAL. Automatically set by the engine.risk_investigation_time: Used in risk calculations. If not specified, the (hazard) investigation_time is used instead. Example: *risk_investigation_time = 50*. Default: Nonerlz_index: Used in disaggregation calculations to specify the realization from which to start the disaggregation. Example: *rlz_index = 0*. Default: Nonerupture_dict: Dictionary with rupture parameters lon, lat, dep, mag, rake, strike, dip Example: *rupture_dict = {'lon': 10, 'lat': 20, 'dep': 10, 'mag': 6, 'rake': 0}* Default: {}rupture_mesh_spacing: Set the discretization parameter (in km) for rupture geometries. Example: *rupture_mesh_spacing = 2.0*. Default: 5.0sampling_method: One of early_weights, late_weights, early_latin, late_latin) Example: *sampling_method = early_latin*. Default: 'early_weights'mea_tau_phi: Save the mean and standard deviations computed by the GMPEs Example: *mea_tau_phi = true* Default: Falsesec_peril_params: INTERNALsecondary_perils: INTERNALsecondary_simulations: INTERNALses_per_logic_tree_path: Set the number of stochastic event sets per logic tree realization in event based calculations. Example: *ses_per_logic_tree_path = 100*. Default: 1ses_seed: Seed governing the generation of the ground motion field. Example: *ses_seed = 123*. Default: 42shakemap_id: Used in ShakeMap calculations to download a ShakeMap from the USGS site Example: *shakemap_id = usp000fjta*. Default: no defaultshakemap_uri: Dictionary used in ShakeMap calculations to specify a ShakeMap. Must contain a key named "kind" with values "usgs_id", "usgs_xml" or "file_npy". Example: shakemap_uri = { "kind": "usgs_xml", "grid_url": "file:///home/michele/usp000fjta/grid.xml", "uncertainty_url": "file:///home/michele/usp000fjta/uncertainty.xml"}. Default: empty dictionaryshift_hypo: Used in classical calculations to shift the rupture hypocenter. Example: *shift_hypo = true*. Default: falsesite_effects: Used in ShakeMap calculations to turn on GMF amplification based on the vs30 values in the ShakeMap (site_effects='shakemap') or in the site collection (site_effects='sitecol'). Example: *site_effects = 'shakemap'*. Default: 'no'sites: Used to specify a list of sites. Example: *sites = 10.1 45, 10.2 45*.tile_spec: INTERNALtiling: Used to force the tiling or non-tiling strategy in classical calculations Example: *tiling = true*. Default: None, meaning the engine will decide what to dosmlt_branch: Used to restrict the source model logic tree to a specific branch Example: *smlt_branch=b1* Default: empty string, meaning all branchessoil_intensities: Used in classical calculations with amplification_method = convolutionsource_id: Used for debugging purposes. When given, restricts the source model to the given source IDs. Example: *source_id = src001 src002*. Default: empty listsource_nodes: INTERNALspatial_correlation: Used in the ShakeMap calculator. The choics are "yes", "no" and "full". Example: *spatial_correlation = full*. Default: "yes"specific_assets: INTERNALsplit_sources: INTERNALsplit_by_gsim: INTERNALouts_per_task: How many outputs per task to generate (honored in some calculators) Example: *outs_per_task = 3* Default: 4std: Compute the standard deviation across realizations. Akin to mean and max. Example: *std = true*. Default: Falsesteps_per_interval: Used in the fragility functions when building the intensity levels Example: *steps_per_interval = 4*. Default: 1tectonic_region_type: Used to specify a tectonic region type. Example: *tectonic_region_type = Active Shallow Crust*. Default: '*'time_event: Used in scenario_risk calculations when the occupancy depend on the time. Valid choices are "avg", "day", "night", "transit". Example: *time_event = day*. Default: "avg"time_per_task: Used in calculations with task splitting. If a task slice takes longer then *time_per_task* seconds, then spawn subtasks for the other slices. Example: *time_per_task=1000* Default: 600total_losses: Used in event based risk calculations to compute total losses and and total curves by summing across different loss types. Possible values are "structural+nonstructural", "structural+contents", "nonstructural+contents", "structural+nonstructural+contents". Example: *total_losses = structural+nonstructural* Default: Nonetruncation_level: Truncation level used in the GMPEs. Example: *truncation_level = 0* to compute median GMFs. Default: no defaultuniform_hazard_spectra: Flag used to generated uniform hazard specta for the given poes Example: *uniform_hazard_spectra = true*. Default: Falseuse_rates: When set, convert to rates before computing the statistical hazard curves Example: *use_rates = true*. Default: Falsevs30_tolerance: Used when amplification_method = convolution. Example: *vs30_tolerance = 20*. Default: 0width_of_mfd_bin: Used to specify the width of the Magnitude Frequency Distribution. Example: *width_of_mfd_bin = 0.2*. Default: None"""%__version__PSDIST=float(config.performance.pointsource_distance)GROUND_MOTION_CORRELATION_MODELS=['JB2009','HM2018']TWO16=2**16# 65536TWO32=2**32U16=numpy.uint16U32=numpy.uint32U64=numpy.uint64F32=numpy.float32F64=numpy.float64ALL_CALCULATORS=['aftershock','classical_risk','classical_damage','classical','custom','event_based','scenario','post_risk','ebrisk','scenario_risk','event_based_risk','disaggregation','multi_risk','classical_bcr','preclassical','event_based_damage','scenario_damage']COST_TYPES=['structural','nonstructural','contents','business_interruption']ALL_COST_TYPES=['+'.join(s)forl_idxinrange(len(COST_TYPES))forsinitertools.combinations(COST_TYPES,l_idx+1)]
[docs]defcheck_same_levels(imtls):""" :param imtls: a dictionary (or dict-like) imt -> imls :returns: the periods and the levels :raises: a ValueError if the levels are not the same across all IMTs """ifnotimtls:raiseValueError('There are no intensity_measure_types_and_levels!')imls=imtls[next(iter(imtls))]forimtinimtls:ifnotimt.startswith(('PGA','SA')):raiseValueError('Site amplification works only with ''PGA and SA, got %s'%imt)if(imtls[imt]==0).all():raiseValueError('You forgot to set intensity_measure_types_and_levels!')eliflen(imtls[imt])!=len(imls)orany(l1!=l2forl1,l2inzip(imtls[imt],imls)):raiseValueError('Site amplification works only if the ''levels are the same across all IMTs')periods=[from_string(imt).periodforimtinimtls]returnperiods,imls
[docs]defcheck_increasing(dframe,*columns):""" Make sure the passed columns of the dataframe exists and correspond to increasing numbers """forcolincolumns:arr=dframe[col].to_numpy()assert(numpy.diff(arr)>=0).all(),arr
[docs]classOqParam(valid.ParamSet):_input_files=()# set in get_oqparamKNOWN_INPUTS={'rupture_model','exposure','site_model','delta_rates','source_model','shakemap','gmfs','gsim_logic_tree','source_model_logic_tree','geometry','hazard_curves','insurance','reinsurance','ins_loss','job_ini','multi_peril','taxonomy_mapping','fragility','consequence','reqv','input_zip','reqv_ignore_sources','amplification','station_data','nonstructural_vulnerability','nonstructural_fragility','nonstructural_consequence','structural_vulnerability','structural_fragility','structural_consequence','contents_vulnerability','contents_fragility','contents_consequence','business_interruption_vulnerability','business_interruption_fragility','business_interruption_consequence','structural_vulnerability_retrofitted','occupants_vulnerability','residents_vulnerability','area_vulnerability','number_vulnerability','post_loss_amplification',}# old name => new nameALIASES={'individual_curves':'individual_rlzs','quantile_hazard_curves':'quantiles','mean_hazard_curves':'mean','max_hazard_curves':'max'}hazard_imtls={}override_vs30=valid.Param(valid.positivefloat,None)aggregate_by=valid.Param(valid.namelists,[])aggregate_loss_curves_types=valid.Param(# accepting all comma-separated permutations of 1, 2 or 3 elements# of the list ['ep', 'aep' 'oep']valid.Choice('ep','aep','oep','ep, aep','ep, oep','aep, ep','aep, oep','oep, ep','oep, aep','ep, aep, oep','ep, oep, aep','aep, ep, oep','aep, oep, ep','oep, ep, aep','oep, aep, ep'),'ep')reaggregate_by=valid.Param(valid.namelist,[])amplification_method=valid.Param(valid.Choice('convolution','kernel'),'convolution')asce_version=valid.Param(valid.Choice('ASCE7-16','ASCE7-22'),'ASCE7-16')minimum_asset_loss=valid.Param(valid.floatdict,{'default':0})area_source_discretization=valid.Param(valid.NoneOr(valid.positivefloat),None)asset_correlation=valid.Param(valid.Choice('0','1'),0)asset_life_expectancy=valid.Param(valid.positivefloat)assets_per_site_limit=valid.Param(valid.positivefloat,1000)avg_losses=valid.Param(valid.boolean,True)base_path=valid.Param(valid.utf8,'.')calculation_mode=valid.Param(valid.Choice(*ALL_CALCULATORS))collapse_gsim_logic_tree=valid.Param(valid.namelist,[])collapse_level=valid.Param(int,-1)collect_rlzs=valid.Param(valid.boolean,None)coordinate_bin_width=valid.Param(valid.positivefloat,100.)compare_with_classical=valid.Param(valid.boolean,False)concurrent_tasks=valid.Param(valid.positiveint,Starmap.CT)conditional_loss_poes=valid.Param(valid.probabilities,[])continuous_fragility_discretization=valid.Param(valid.positiveint,20)countries=valid.Param(valid.namelist,())cross_correlation=valid.Param(valid.utf8_not_empty,'yes')cholesky_limit=valid.Param(valid.positiveint,10_000)correlation_cutoff=valid.Param(valid.positivefloat,1E-12)cachedir=valid.Param(valid.utf8,'')cache_distances=valid.Param(valid.boolean,False)description=valid.Param(valid.utf8_not_empty,"no description")disagg_by_src=valid.Param(valid.boolean,False)disagg_outputs=valid.Param(valid.disagg_outputs,list(valid.pmf_map))disagg_bin_edges=valid.Param(valid.dictionary,{})discard_assets=valid.Param(valid.boolean,False)discard_trts=valid.Param(str,'')# tested in the cariboo examplediscrete_damage_distribution=valid.Param(valid.boolean,False)distance_bin_width=valid.Param(valid.positivefloat)mag_bin_width=valid.Param(valid.positivefloat,1.)floating_x_step=valid.Param(valid.positivefloat,0)floating_y_step=valid.Param(valid.positivefloat,0)ignore_encoding_errors=valid.Param(valid.boolean,False)ignore_master_seed=valid.Param(valid.boolean,False)epsilon_star=valid.Param(valid.boolean,False)export_dir=valid.Param(valid.utf8,'.')exports=valid.Param(valid.export_formats,())extreme_gmv=valid.Param(valid.floatdict,{'default':numpy.inf})gmf_max_gb=valid.Param(valid.positivefloat,.1)ground_motion_correlation_model=valid.Param(valid.NoneOr(valid.Choice(*GROUND_MOTION_CORRELATION_MODELS)),None)ground_motion_correlation_params=valid.Param(valid.dictionary,{})ground_motion_fields=valid.Param(valid.boolean,True)gsim=valid.Param(valid.utf8,'[FromFile]')hazard_calculation_id=valid.Param(valid.NoneOr(valid.positiveint),None)hazard_curves_from_gmfs=valid.Param(valid.boolean,False)hazard_maps=valid.Param(valid.boolean,False)horiz_comp_to_geom_mean=valid.Param(valid.boolean,False)ignore_missing_costs=valid.Param(valid.namelist,[])ignore_covs=valid.Param(valid.boolean,False)iml_disagg=valid.Param(valid.floatdict,{})# IMT -> IMLimt_ref=valid.Param(valid.intensity_measure_type,'')individual_rlzs=valid.Param(valid.boolean,None)inputs=valid.Param(dict,{})ash_wet_amplification_factor=valid.Param(valid.positivefloat,1.0)infer_occur_rates=valid.Param(valid.boolean,False)infrastructure_connectivity_analysis=valid.Param(valid.boolean,False)intensity_measure_types=valid.Param(valid.intensity_measure_types,'')intensity_measure_types_and_levels=valid.Param(valid.intensity_measure_types_and_levels,None)interest_rate=valid.Param(valid.positivefloat)investigation_time=valid.Param(valid.positivefloat,None)job_id=valid.Param(valid.positiveint,0)limit_states=valid.Param(valid.namelist,[])local_timestamp=valid.Param(valid.local_timestamp,None)lrem_steps_per_interval=valid.Param(valid.positiveint,0)steps_per_interval=valid.Param(valid.positiveint,1)master_seed=valid.Param(valid.positiveint,123456789)maximum_distance=valid.Param(valid.IntegrationDistance.new)# kmmaximum_distance_stations=valid.Param(valid.positivefloat,None)# kmasset_hazard_distance=valid.Param(valid.floatdict,{'default':15})# kmmax=valid.Param(valid.boolean,False)max_aggregations=valid.Param(valid.positivefloat,1E5)max_blocks=valid.Param(valid.positiveint,100)max_data_transfer=valid.Param(valid.positivefloat,2E11)max_gmvs_chunk=valid.Param(valid.positiveint,100_000)# for 2GB limitmax_potential_gmfs=valid.Param(valid.positiveint,1E12)max_potential_paths=valid.Param(valid.positiveint,15_000)max_sites_disagg=valid.Param(valid.positiveint,10)mean_hazard_curves=mean=valid.Param(valid.boolean,True)mosaic_model=valid.Param(valid.three_letters,'')std=valid.Param(valid.boolean,False)minimum_distance=valid.Param(valid.positivefloat,0)minimum_intensity=valid.Param(valid.floatdict,{})# IMT -> minIMLminimum_magnitude=valid.Param(valid.floatdict,{'default':0})# by TRTmodal_damage_state=valid.Param(valid.boolean,False)number_of_ground_motion_fields=valid.Param(valid.positiveint)number_of_logic_tree_samples=valid.Param(valid.positiveint,0)num_epsilon_bins=valid.Param(valid.positiveint,1)num_rlzs_disagg=valid.Param(valid.positiveint,0)oversampling=valid.Param(valid.Choice('forbid','tolerate'),'tolerate')poes=valid.Param(valid.probabilities,[])poes_disagg=valid.Param(valid.probabilities,[])pointsource_distance=valid.Param(valid.floatdict,{'default':PSDIST})postproc_func=valid.Param(valid.mod_func,'dummy.main')postproc_args=valid.Param(valid.dictionary,{})prefer_global_site_params=valid.Param(valid.boolean,None)ps_grid_spacing=valid.Param(valid.positivefloat,0)quantile_hazard_curves=quantiles=valid.Param(valid.probabilities,[])random_seed=valid.Param(valid.positiveint,42)reference_depth_to_1pt0km_per_sec=valid.Param(valid.positivefloat,numpy.nan)reference_depth_to_2pt5km_per_sec=valid.Param(valid.positivefloat,numpy.nan)reference_vs30_type=valid.Param(valid.Choice('measured','inferred'),'inferred')reference_vs30_value=valid.Param(valid.positivefloat,numpy.nan)reference_backarc=valid.Param(valid.boolean,False)region=valid.Param(valid.wkt_polygon,None)region_grid_spacing=valid.Param(valid.positivefloat,None)reqv_ignore_sources=valid.Param(valid.namelist,[])risk_imtls=valid.Param(valid.intensity_measure_types_and_levels,{})risk_investigation_time=valid.Param(valid.positivefloat,None)rlz_index=valid.Param(valid.positiveints,None)rupture_mesh_spacing=valid.Param(valid.positivefloat,5.0)rupture_dict=valid.Param(valid.dictionary,{})complex_fault_mesh_spacing=valid.Param(valid.NoneOr(valid.positivefloat),None)return_periods=valid.Param(valid.positiveints,[])sampling_method=valid.Param(valid.Choice('early_weights','late_weights','early_latin','late_latin'),'early_weights')mea_tau_phi=valid.Param(valid.boolean,False)secondary_perils=valid.Param(valid.namelist,[])sec_peril_params=valid.Param(valid.dictionary,{})secondary_simulations=valid.Param(valid.dictionary,{})ses_per_logic_tree_path=valid.Param(valid.compose(valid.nonzero,valid.positiveint),1)ses_seed=valid.Param(valid.positiveint,42)shakemap_id=valid.Param(valid.nice_string,None)shakemap_uri=valid.Param(valid.dictionary,{})shift_hypo=valid.Param(valid.boolean,False)site_effects=valid.Param(valid.Choice('no','shakemap','sitemodel'),'no')# shakemap amplif.sites=valid.Param(valid.NoneOr(valid.coordinates),None)tile_spec=valid.Param(valid.tile_spec,None)tiling=valid.Param(valid.boolean,None)smlt_branch=valid.Param(valid.simple_id,'')soil_intensities=valid.Param(valid.positivefloats,None)source_id=valid.Param(valid.namelist,[])source_nodes=valid.Param(valid.namelist,[])spatial_correlation=valid.Param(valid.Choice('yes','no','full'),'yes')specific_assets=valid.Param(valid.namelist,[])split_sources=valid.Param(valid.boolean,True)split_by_gsim=valid.Param(valid.positiveint,0)outs_per_task=valid.Param(valid.positiveint,4)ebrisk_maxsize=valid.Param(valid.positivefloat,2E10)# used in ebrisktectonic_region_type=valid.Param(valid.utf8,'*')time_event=valid.Param(valid.Choice('avg','day','night','transit'),'avg')time_per_task=valid.Param(valid.positivefloat,600)total_losses=valid.Param(valid.Choice(*ALL_COST_TYPES),None)truncation_level=valid.Param(lambdas:valid.positivefloat(s)or1E-9)uniform_hazard_spectra=valid.Param(valid.boolean,False)use_rates=valid.Param(valid.boolean,False)vs30_tolerance=valid.Param(int,0)width_of_mfd_bin=valid.Param(valid.positivefloat,None)@propertydefno_pointsource_distance(self):""" :returns: True if the pointsource_distance is 1000 km """returnset(self.pointsource_distance.values())=={1000}@propertydefrisk_files(self):try:returnself._risk_filesexceptAttributeError:self._risk_files=get_risk_files(self.inputs)returnself._risk_files@propertydefinput_dir(self):""" :returns: absolute path to where the job.ini is """returnos.path.abspath(os.path.dirname(self.inputs['job_ini']))
[docs]defget_input_size(self):""" :returns: the total size in bytes of the input files NB: this will fail if the files are not available, so it should be called only before starting the calculation. The same information is stored in the datastore. """# NB: when the OqParam object is instantiated from a dictionary and# not from a job.ini file the key 'job_ini ' has value '<in-memory>'returnsum(os.path.getsize(f)forfinself._input_filesiff!='<in-memory>')
[docs]defget_reqv(self):""" :returns: an instance of class:`RjbEquivalent` if reqv_hdf5 is set """if'reqv'notinself.inputs:returnreturn{key:valid.RjbEquivalent(value)forkey,valueinself.inputs['reqv'].items()}
[docs]deffix_legacy_names(self,dic):fornameinlist(dic):ifnameinself.ALIASES:ifself.ALIASES[name]indic:# passed both the new (self.ALIASES[name]) and the old nameraiseNameError('Please remove %s, you should use only %s'%(name,self.ALIASES[name]))# use the new name instead of the old onedic[self.ALIASES[name]]=dic.pop(name)inp=dic.get('inputs',{})if'sites'ininp:ifinp.get('site_model'):raiseNameError('Please remove sites, you should use ''only site_model')inp['site_model']=[inp.pop('sites')]self.prefer_global_site_params=True
def__init__(self,**names_vals):if'_log'innames_vals:# called from enginedelnames_vals['_log']self.fix_legacy_names(names_vals)super().__init__(**names_vals)if'job_ini'notinself.inputs:self.inputs['job_ini']='<in-memory>'if'calculation_mode'notinnames_vals:self.raise_invalid('Missing calculation_mode')if'region_constraint'innames_vals:if'region'innames_vals:self.raise_invalid('You cannot have both region and ''region_constraint')logging.warning('region_constraint is obsolete, use region instead')self.region=valid.wkt_polygon(names_vals.pop('region_constraint'))if('intensity_measure_types_and_levels'innames_valsand'intensity_measure_types'innames_vals):logging.warning('Ignoring intensity_measure_types since ''intensity_measure_types_and_levels is set')if'iml_disagg'innames_vals:# normalize things like SA(0.10) -> SA(0.1)self.iml_disagg={str(from_string(imt)):[iml]forimt,imlinself.iml_disagg.items()}self.hazard_imtls=self.iml_disaggif'intensity_measure_types_and_levels'innames_vals:self.raise_invalid('Please remove the intensity_measure_types_and_levels '': they will be inferred from the iml_disagg ''dictionary')elif'intensity_measure_types_and_levels'innames_vals:self.hazard_imtls=self.intensity_measure_types_and_levelsdelattr(self,'intensity_measure_types_and_levels')lens=set(map(len,self.hazard_imtls.values()))iflen(lens)>1:dic={imt:len(ls)forimt,lsinself.hazard_imtls.items()}raiseValueError('Each IMT must have the same number of levels, instead ''you have %s'%dic)elif'intensity_measure_types'innames_vals:self.hazard_imtls=dict.fromkeys(self.intensity_measure_types,[0])delattr(self,'intensity_measure_types')if'minimum_intensity'innames_vals:dic={}forimt,imlinself.minimum_intensity.items():ifimt=='default':dic[imt]=imlelse:# normalize IMT, for instance SA(1.) => SA(1.0)dic[from_string(imt).string]=imlself.minimum_intensity=dicif('ps_grid_spacing'innames_valsandfloat(names_vals['ps_grid_spacing'])and'pointsource_distance'notinnames_vals):self.pointsource_distance=dict(default=40.)ifself.collapse_level>=0:self.time_per_task=1_000_000# disable task_splitting# cut maximum_distance with minimum_magnitudeifhasattr(self,'maximum_distance'):# can be missing in post-calculationsself.maximum_distance.cut(self.minimum_magnitude)self.check_hazard()self.check_gsim_lt()self.check_risk()self.check_ebrisk()
[docs]defraise_invalid(self,msg):""" Raise an InvalidFile error """raiseInvalidFile('%s: %s'%(self.inputs['job_ini'],msg))
[docs]defcheck_gsim_lt(self):# check the gsim_logic_tree and set req_site_paramsself.req_site_params=set()ifself.inputs.get('gsim_logic_tree'):ifself.gsim!='[FromFile]':self.raise_invalid('if `gsim_logic_tree_file` is set, there'' must be no `gsim` key')path=os.path.join(self.base_path,self.inputs['gsim_logic_tree'])gsim_lt=GsimLogicTree(path,['*'])# check the GSIMsself._trts=set()discard={trt.strip()fortrtinself.discard_trts.split(',')}fortrt,gsimsingsim_lt.values.items():iftrtnotindiscard:self.check_gsims(gsims)self._trts.add(trt)elifself.gsim:self.check_gsims([valid.gsim(self.gsim,self.base_path)])else:self.raise_invalid('Missing gsim or gsim_logic_tree_file')if'amplification'inself.inputs:self.req_site_params.add('ampcode')self.req_site_params=sorted(self.req_site_params)
[docs]defcheck_risk(self):# checks for riskself._risk_files=get_risk_files(self.inputs)if(self.job_type=='risk'andnotself.shakemap_uriandnotself.aristotle):# check the risk_fileshc=self.hazard_calculation_idif'damage'inself.calculation_modeandnothc:ok=any('fragility'inkeyforkeyinself._risk_files)ifnotok:self.raise_invalid('Missing fragility files')elif('risk'inself.calculation_modeandself.calculation_mode!='multi_risk'andnothc):ok=any('vulnerability'inkeyforkeyinself._risk_files)ifnotok:self.raise_invalid('missing vulnerability files')ifself.hazard_precomputed()andself.job_type=='risk':self.check_missing('site_model','debug')self.check_missing('gsim_logic_tree','debug')self.check_missing('source_model_logic_tree','debug')ifself.job_type=='risk':self.check_aggregate_by()if('hazard_curves'notinself.inputsand'gmfs'notinself.inputsand'multi_peril'notinself.inputsandself.inputs['job_ini']!='<in-memory>'andself.calculation_mode!='scenario'andnotself.hazard_calculation_id):ifnothasattr(self,'truncation_level'):self.raise_invalid("Missing truncation_level")if'reinsurance'inself.inputs:self.check_reinsurance()# check investigation_timeif(self.investigation_timeandself.calculation_mode.startswith('scenario')):raiseValueError('%s: there cannot be investigation_time in %s'%(self.inputs['job_ini'],self.calculation_mode))# check inputsunknown=set(self.inputs)-self.KNOWN_INPUTSifunknown:raiseValueError('Unknown key %s_file in %s'%(unknown.pop(),self.inputs['job_ini']))# check return_periods vs poesifself.return_periodsandnotself.poesandself.investigation_time:self.poes=1-numpy.exp(-self.investigation_time/numpy.array(self.return_periods))# checks for classical_damageifself.calculation_mode=='classical_damage':ifself.conditional_loss_poes:self.raise_invalid('conditional_loss_poes are not defined ''for classical_damage calculations')ifnotself.investigation_timeandnotself.hazard_calculation_id:self.raise_invalid('missing investigation_time')
[docs]defcheck_ebrisk(self):# check specific to ebriskifself.calculation_mode=='ebrisk':ifself.ground_motion_fields:print('ground_motion_fields overridden to false',file=sys.stderr)self.ground_motion_fields=Falseifself.hazard_curves_from_gmfs:self.raise_invalid('hazard_curves_from_gmfs=true is invalid in ebrisk')
[docs]defcheck_hazard(self):# check for GMFs from fileif(self.inputs.get('gmfs','').endswith('.csv')and'site_model'notinself.inputsandself.sitesisNone):self.raise_invalid('You forgot to specify a site_model')elifself.inputs.get('gmfs','').endswith('.xml'):self.raise_invalid('GMFs in XML are not supported anymore')# checks for event_basedif'event_based'inself.calculation_mode:ifself.ps_grid_spacing:logging.warning('ps_grid_spacing is ignored in event_based ''calculations')ifself.ses_per_logic_tree_path>=TWO32:self.raise_invalid('ses_per_logic_tree_path too big: %d'%self.ses_per_logic_tree_path)ifself.number_of_logic_tree_samples>=TWO16:self.raise_invalid('number_of_logic_tree_samples too big: %d'%self.number_of_logic_tree_samples)# check for amplificationif('amplification'inself.inputsandself.imtlsandself.calculation_modein['classical','classical_risk','disaggregation']):check_same_levels(self.imtls)# checks for disaggregationifself.calculation_mode=='disaggregation':ifnotself.poes_disaggandself.poes:self.poes_disagg=self.poeselifnotself.poesandself.poes_disagg:self.poes=self.poes_disaggelifself.poes!=self.poes_disagg:self.raise_invalid('poes_disagg != poes: %s!=%s'%(self.poes_disagg,self.poes))ifnotself.poes_disaggandnotself.iml_disagg:self.raise_invalid('poes_disagg or iml_disagg must be set')elifself.poes_disaggandself.iml_disagg:self.raise_invalid('iml_disagg and poes_disagg cannot be set at the same time')ifnotself.disagg_bin_edges:forkin('mag_bin_width','distance_bin_width','coordinate_bin_width','num_epsilon_bins'):ifknotinvars(self):self.raise_invalid('%s must be set'%k)ifself.disagg_outputsandnotany('Eps'inoutforoutinself.disagg_outputs):self.num_epsilon_bins=1ifself.rlz_indexisnotNoneandself.num_rlzs_disagg!=1:self.raise_invalid('you cannot set rlzs_index and ''num_rlzs_disagg at the same time')# check compute_rtgm will runif'rtgm'inself.postproc_func:if'PGA'and"SA(0.2)"and'SA(1.0)'notinself.imtls:self.raise_invalid('the IMTs PGA, SA(0.2), and SA(1.0)'' are required to use compute_rtgm')
[docs]defvalidate(self):""" Set self.loss_types """fromopenquake.commonlibimportdatastore# avoid circular importifself.hazard_calculation_id:withdatastore.read(self.hazard_calculation_id)asds:self._parent=ds['oqparam']else:self._parent=None# set all_cost_types# rt has the form 'vulnerability/structural', 'fragility/...', ...costtypes=set(rt.rsplit('/')[1]forrtinself.risk_files)ifnotcosttypesandself.hazard_calculation_id:try:self._risk_files=rfs=get_risk_files(self._parent.inputs)costtypes=set(rt.rsplit('/')[1]forrtinrfs)exceptOSError:# FileNotFound for wrong hazard_calculation_idpassself.all_cost_types=sorted(costtypes)# including occupants# fix minimum_asset_lossself.minimum_asset_loss={ln:calc.filters.getdefault(self.minimum_asset_loss,ln)forlninself.loss_types}super().validate()self.check_source_model()if'post_loss_amplification'inself.inputs:df=pandas.read_csv(self.inputs['post_loss_amplification'])check_increasing(df,'return_period','pla_factor')ifself.avg_losses:self.raise_invalid("you must set avg_losses=false with post_loss_amplification")
[docs]defcheck_gsims(self,gsims):""" :param gsims: a sequence of GSIM instances """forgsimingsims:self.req_site_params.update(gsim.REQUIRES_SITES_PARAMETERS)has_sites=self.sitesisnotNoneor'site_model'inself.inputsifnothas_sites:returnimts=set()forimtinself.imtls:im=from_string(imt)ifimt.startswith("SA"):imts.add("SA")elifimt.startswith("SDi"):imts.add("SDi")elifimt.startswith("EAS"):imts.add("EAS")elifimt.startswith("FAS"):imts.add("FAS")elifimt.startswith("DRVT"):imts.add("DRVT")elifimt.startswith("AvgSA"):imts.add("AvgSA")else:imts.add(im.string)forgsimingsims:if(hasattr(gsim,'weight')orself.calculation_mode=='aftershock'):continue# disable the checkrestrict_imts=gsim.DEFINED_FOR_INTENSITY_MEASURE_TYPESifrestrict_imts:names=set(cls.__name__forclsinrestrict_imts)invalid_imts=', '.join(imts-names)ifinvalid_imts:raiseValueError('The IMT %s is not accepted by the GSIM %s'%(invalid_imts,gsim))if(self.hazard_calculation_idisNoneand'site_model'notinself.inputs):# look at the required sites parameters: they must have# a valid value; the other parameters can keep a NaN# value since they are not used by the calculatorforparamingsim.REQUIRES_SITES_PARAMETERS:ifparamin('lon','lat'):# no checkcontinueelifparaminsite.param:# mandatory paramsparam_name=site.param[param]param_value=getattr(self,param_name)if(isinstance(param_value,float)andnumpy.isnan(param_value)):raiseValueError('Please set a value for %r, this is required ''by the GSIM %s'%(param_name,gsim))
@propertydeftses(self):""" Return the total time as investigation_time * ses_per_logic_tree_path * (number_of_logic_tree_samples or 1) """return(self.investigation_time*self.ses_per_logic_tree_path*(self.number_of_logic_tree_samplesor1))@propertydeftime_ratio(self):""" The ratio risk_investigation_time / eff_investigation_time per rlz """ifself.investigation_timeisNone:raiseValueError('Missing investigation_time in the .ini file')return(self.risk_investigation_timeorself.investigation_time)/(self.investigation_time*self.ses_per_logic_tree_path)
[docs]defrisk_event_rates(self,num_events,num_haz_rlzs):""" :param num_events: the number of events per risk realization :param num_haz_rlzs: the number of hazard realizations If risk_investigation_time is 1, returns the annual event rates for each realization as a list, possibly of 1 element. """ifself.investigation_timeisNone:# for scenarios there is no effective_timereturnnumpy.full_like(num_events,len(num_events))else:# for event based compute the time_ratiotime_ratio=self.time_ratioifself.collect_rlzs:time_ratio/=num_haz_rlzsreturntime_ratio*num_events
@propertydefimtls(self):""" Returns a DictArray with the risk intensity measure types and levels, if given, or the hazard ones. """imtls=self.hazard_imtlsorself.risk_imtlsreturnDictArray(sort_by_imt(imtls))ifimtlselse{}@propertydefmin_iml(self):""" :returns: a vector of minimum intensities, one per IMT """#if 'scenario' in self.calculation_mode: # disable min_iml# return numpy.full(len(self.imtls), 1E-10)mini=self.minimum_intensityifmini:forimtinself.imtls:try:mini[imt]=calc.filters.getdefault(mini,imt)exceptKeyError:mini[imt]=0if'default'inmini:delmini['default']min_iml=F64([mini.get(imt)or1E-10forimtinself.imtls])returnmin_iml
[docs]defget_max_iml(self):""" :returns: a vector of extreme intensities, one per IMT """max_iml=numpy.zeros(len(self.imtls))form,imtinenumerate(self.imtls):max_iml[m]=calc.filters.getdefault(self.extreme_gmv,imt)returnmax_iml
[docs]deflevels_per_imt(self):""" :returns: the number of levels per IMT (a.ka. L1) """returnself.imtls.size//len(self.imtls)
[docs]defset_risk_imts(self,risklist):""" :param risklist: a list of risk functions with attributes .id, .loss_type, .kind Set the attribute risk_imtls. """risk_imtls=AccumDict(accum=[])# imt -> imlsfori,rfinenumerate(risklist):ifnothasattr(rf,'imt')orrf.kind.endswith('_retrofitted'):# for consequence or retrofittedcontinueifhasattr(rf,'build'):# FragilityFunctionListrf=rf.build(risklist.limit_states,self.continuous_fragility_discretization,self.steps_per_interval)risklist[i]=rffrom_string(rf.imt)# make sure it is a valid IMTrisk_imtls[rf.imt].extend(imlforimlinrf.imlsifiml>0)suggested=['\nintensity_measure_types_and_levels = {']forimt,imlsinrisk_imtls.items():risk_imtls[imt]=list(valid.logscale(min(imls),max(imls),20))suggested.append(' %r: logscale(%s, %s, 20),'%(imt,min(imls),max(imls)))suggested[-1]+='}'self.risk_imtls={imt:[min(ls)]forimt,lsinrisk_imtls.items()}ifself.uniform_hazard_spectra:self.check_uniform_hazard_spectra()ifnotself.hazard_imtls:if(self.calculation_mode.startswith('classical')orself.hazard_curves_from_gmfs):self.raise_invalid('You must provide the ''intensity measure levels explicitly. Suggestion:'+'\n '.join(suggested))if(len(self.imtls)==0and'event_based'inself.calculation_modeand'gmfs'notinself.inputsandnotself.hazard_calculation_idandself.ground_motion_fields):raiseValueError('Please define intensity_measure_types in %s'%self.inputs['job_ini'])
[docs]defget_primary_imtls(self):""" :returns: IMTs and levels which are not secondary """sec_imts=set(self.sec_imts)return{imt:imlsforimt,imlsinself.imtls.items()ifimtnotinsec_imts}
[docs]defhmap_dt(self):# used for CSV export""" :returns: a composite dtype (imt, poe) """returnnumpy.dtype([('%s-%s'%(imt,poe),F32)forimtinself.imtlsforpoeinself.poes])
[docs]defuhs_dt(self):# used for CSV and NPZ export""" :returns: a composity dtype (poe, imt) """imts_dt=numpy.dtype([(imt,F32)forimtinself.imtlsifimt.startswith(('PGA','SA'))])returnnumpy.dtype([('%.6f'%poe,imts_dt)forpoeinself.poes])
[docs]defimt_periods(self):""" :returns: the IMTs with a period, to be used in an UHS calculation """imts=[]foriminself.imtls:imt=from_string(im)ifimt.periodorimt.string=='PGA':imts.append(imt)returnimts
[docs]defimt_dt(self,dtype=F64):""" :returns: a numpy dtype {imt: float} """returnnumpy.dtype([(imt,dtype)forimtinsort_by_imt(self.imtls)])
@propertydeflti(self):""" Dictionary extended_loss_type -> extended_loss_type index """return{lt:ifori,ltinenumerate(self.ext_loss_types)}@propertydefloss_types(self):""" :returns: list of loss types (empty for hazard) """ifnothasattr(self,"all_cost_types"):# for hazardreturn[]names=[]forltinself.all_cost_types:names.append(lt)returnnames@propertydefext_loss_types(self):""" :returns: list of loss types + secondary loss types """etypes=self.loss_typesifself.total_losses:etypes=self.loss_types+[self.total_losses]if'insurance'inself.inputs:itypes=[lt+'_ins'forltinself.inputs['insurance']]etypes=self.loss_types+itypesreturnetypes
[docs]defloss_dt(self,dtype=F64):""" :returns: a composite dtype based on the loss types including occupants """returnnumpy.dtype(self.loss_dt_list(dtype))
[docs]defloss_dt_list(self,dtype=F64):""" :returns: a data type list [(loss_name, dtype), ...] """dts=[(str(lt),dtype)forltinself.loss_types]returndts
[docs]defloss_maps_dt(self,dtype=F32):""" Return a composite data type for loss maps """ltypes=self.loss_dt(dtype).nameslst=[('poe-%s'%poe,dtype)forpoeinself.conditional_loss_poes]returnnumpy.dtype([(lt,lst)forltinltypes])
[docs]defgmf_data_dt(self):""" :returns: a composite data type for the GMFs """lst=[('sid',U32),('eid',U32)]form,imtinenumerate(self.get_primary_imtls()):lst.append((f'gmv_{m}',F32))foroutinself.sec_imts:lst.append((out,F32))returnnumpy.dtype(lst)
[docs]defget_sec_perils(self):""" :returns: a list of secondary perils """returnSecondaryPeril.instantiate(self.secondary_perils,self.sec_peril_params)
@cached_propertydefsec_imts(self):""" :returns: a list of secondary outputs """outs=[]forspinself.get_sec_perils():outs.extend(sp.outputs)returnouts
[docs]defno_imls(self):""" Return True if there are no intensity measure levels """returnsum(sum(imls)forimlsinself.imtls.values())==0
@propertydefcorrel_model(self):""" Return a correlation object. See :mod:`openquake.hazardlib.correlation` for more info. """correl_name=self.ground_motion_correlation_modelifcorrel_nameisNone:# no correlation modelreturncorrel_model_cls=getattr(correlation,'%sCorrelationModel'%correl_name)returncorrel_model_cls(**self.ground_motion_correlation_params)@propertydefcross_correl(self):""" Return a cross correlation object (or None). See :mod:`openquake.hazardlib.cross_correlation` for more info. """try:cls=getattr(cross_correlation,self.cross_correlation)exceptAttributeError:returnNonereturncls()@propertydefrupture_xml(self):return('rupture_model'inself.inputsandself.inputs['rupture_model'].endswith('.xml'))@propertydefaristotle(self):""" Return True if we are in Aristotle mode, i.e. there is an HDF5 exposure with a known structure """exposures=self.inputs.get('exposure',[])returnexposuresandexposures[0].endswith('.hdf5')@propertydeffastmean(self):""" Return True if it is possible to use the fast mean algorithm """return(notself.individual_rlzsandself.soil_intensitiesisNoneandlist(self.hazard_stats())==['mean']andself.use_rates)
[docs]defhazard_stats(self):""" Return a dictionary stat_name -> stat_func """names=[]# name of statistical functionsfuncs=[]# statistical functions of kind func(values, weights)ifself.mean:names.append('mean')funcs.append(stats.mean_curve)ifself.std:names.append('std')funcs.append(stats.std_curve)forqinself.quantiles:names.append('quantile-%s'%q)funcs.append(functools.partial(stats.quantile_curve,q))ifself.max:names.append('max')funcs.append(stats.max_curve)returndict(zip(names,funcs))
@propertydefjob_type(self):""" 'hazard' or 'risk' """return'risk'if('risk'inself.calculation_modeor'damage'inself.calculation_modeor'bcr'inself.calculation_mode)else'hazard'
[docs]defis_event_based(self):""" The calculation mode is event_based, event_based_risk or ebrisk """return(self.calculation_modein'event_based_risk ebrisk event_based_damage')
[docs]defis_valid_disagg_by_src(self):""" disagg_by_src can be set only if ps_grid_spacing = 0 """ifself.disagg_by_src:returnself.ps_grid_spacing==0returnTrue
[docs]defis_valid_concurrent_tasks(self):""" At most you can use 30_000 tasks """returnself.concurrent_tasks<=30_000
[docs]defis_valid_shakemap(self):""" hazard_calculation_id must be set if shakemap_id is set """ifself.shakemap_uri:kind=self.shakemap_uri['kind']get_array=getattr(shakemap.parsers,'get_array_'+kind)sig=inspect.signature(get_array)# parameters without default valueparams=[p.nameforpinlist(sig.parameters.values())ifp.defaultisp.empty]all_params=list(sig.parameters)ifnotall(pinlist(self.shakemap_uri)forpinparams)or \
notall(pinall_paramsforpinlist(self.shakemap_uri)):raiseValueError('Error in shakemap_uri: Expected parameters %s, ''valid parameters %s, got %s'%(params,all_params,list(self.shakemap_uri)))returnself.hazard_calculation_idif(self.shakemap_idorself.shakemap_uri)elseTrue
[docs]defis_valid_truncation_level(self):""" In presence of a correlation model the truncation level must be nonzero """ifself.ground_motion_correlation_model:returnself.truncation_level!=0else:returnTrue
[docs]defis_valid_geometry(self):""" It is possible to infer the geometry only if exactly one of sites, sites_csv, hazard_curves_csv, region is set. You did set more than one, or nothing. """ifself.calculation_modein('preclassical','aftershock'):returnTrue# disable the checkif'hazard_curves'inself.inputsand(self.sitesisnotNoneor'site_model'inself.inputs):returnFalsehas_sites=self.sitesisnotNoneor'site_model'inself.inputsifnothas_sitesandnotself.ground_motion_fields:# when generating only the ruptures you do not need the sitesreturnTrueif('risk'inself.calculation_modeor'damage'inself.calculation_modeor'bcr'inself.calculation_mode):returnTrue# no check on the sites for riskflags=dict(sites=bool(self.sites),site_model_csv=self.inputs.get('site_model',0),hazard_curves_csv=self.inputs.get('hazard_curves',0),gmfs_csv=self.inputs.get('gmfs',0),region=bool(self.regionandself.region_grid_spacing))# NB: below we check that all the flags# are mutually exclusivereturnsum(bool(v)forvinflags.values())==1orself.inputs.get('exposure')orself.inputs.get('site_model')
[docs]defis_valid_poes(self):""" When computing hazard maps and/or uniform hazard spectra, the poes list must be non-empty. """ifself.hazard_mapsorself.uniform_hazard_spectra:returnbool(self.poes)else:returnTrue
[docs]defis_valid_maximum_distance(self):""" Invalid maximum_distance={maximum_distance}: {error} """if'gsim_logic_tree'notinself.inputs:returnTrue# disable the checkgsim_lt=self.inputs['gsim_logic_tree']# set self._trtstrts=set(self.maximum_distance)unknown=', '.join(trts-self._trts-set(self.minimum_magnitude)-{'default'})ifunknown:self.error=('setting the maximum_distance for %s which is ''not in %s'%(unknown,gsim_lt))returnFalsefortrt,valinself.maximum_distance.items():iftrtnotinself._trtsandtrt!='default':# not a problem, the associated maxdist will simply be ignoredlogging.warning('tectonic region %r not in %s',trt,gsim_lt)if'default'notintrtsandtrts<self._trts:missing=', '.join(self._trts-trts)self.error='missing distance for %s and no default'%missingreturnFalsereturnTrue
[docs]defis_valid_intensity_measure_types(self):""" If the IMTs and levels are extracted from the risk models, they must not be set directly. Moreover, if `intensity_measure_types_and_levels` is set directly, `intensity_measure_types` must not be set. """ifself.ground_motion_correlation_model:forimtinself.imtls:ifnot(imt.startswith('SA')orimtin['PGA','PGV']):raiseValueError('Correlation model %s does not accept IMT=%s'%(self.ground_motion_correlation_model,imt))ifself.risk_files:# IMTLs extracted from the risk filesreturn(self.intensity_measure_types==''andself.intensity_measure_types_and_levelsisNone)elifnotself.hazard_imtlsandnothasattr(self,'risk_imtls'):returnFalsereturnTrue
[docs]defis_valid_intensity_measure_levels(self):""" In order to compute hazard curves, `intensity_measure_types_and_levels` must be set or extracted from the risk models. """invalid=self.no_imls()andnotself.risk_filesand(self.hazard_curves_from_gmfsorself.calculation_modein('classical','disaggregation'))returnnotinvalid
[docs]defis_valid_soil_intensities(self):""" soil_intensities must be defined only in classical calculations with amplification_method=convolution """classical=('classical'inself.calculation_modeor'disaggregation'inself.calculation_mode)if(classicaland'amplification'inself.inputsandself.amplification_method=='convolution'):returnlen(self.soil_intensities)>1else:returnself.soil_intensitiesisNone
[docs]defis_valid_specific_assets(self):""" Read the special assets from the parameters `specific_assets` or `specific_assets_csv`, if present. You cannot have both. The concept is meaninful only for risk calculators. """ifself.specific_assetsand'specific_assets'inself.inputs:returnFalseelse:returnTrue
[docs]defis_valid_export_dir(self):""" export_dir={export_dir} must refer to a directory, and the user must have the permission to write on it. """ifself.export_dirandnotos.path.isabs(self.export_dir):self.export_dir=os.path.normpath(os.path.join(self.input_dir,self.export_dir))ifnotself.export_dir:self.export_dir=os.path.expanduser('~')# home directorylogging.info('export_dir not specified. Using export_dir=%s'%self.export_dir)returnTrueifnotos.path.exists(self.export_dir):try:os.makedirs(self.export_dir)exceptPermissionError:returnFalsereturnTruereturnos.path.isdir(self.export_dir)andos.access(self.export_dir,os.W_OK)
[docs]defis_valid_complex_fault_mesh_spacing(self):""" The `complex_fault_mesh_spacing` parameter can be None only if `rupture_mesh_spacing` is set. In that case it is identified with it. """rms=getattr(self,'rupture_mesh_spacing',None)ifrmsandnotgetattr(self,'complex_fault_mesh_spacing',None):self.complex_fault_mesh_spacing=self.rupture_mesh_spacingreturnTrue
[docs]defis_valid_collect_rlzs(self):""" sampling_method must be early_weights with collect_rlzs=true """ifself.collect_rlzsisNone:self.collect_rlzs=self.number_of_logic_tree_samples>1ifself.job_type=='hazard':returnTrue# there are more checks for risk calculationsifself.collect_rlzsandself.individual_rlzs:self.raise_invalid("you cannot have individual_rlzs=true with ""collect_rlzs=true")ifself.calculation_mode=='event_based_damage':ifnotself.investigation_time:self.raise_invalid('Missing investigation_time')returnTrueelifself.collect_rlzsisFalse:returnTrueelifself.hazard_calculation_id:n=self._parent.number_of_logic_tree_samplesifnandn!=self.number_of_logic_tree_samples:raiseValueError('Please specify number_of_logic_tree_samples''=%d'%n)hstats=list(self.hazard_stats())ifhstatsandhstats!=['mean']:self.raise_invalid('quantiles are not supported with collect_rlzs=true')ifself.number_of_logic_tree_samples==0:raiseValueError('collect_rlzs=true is inconsistent with ''full enumeration')returnself.sampling_method=='early_weights'
[docs]defcheck_aggregate_by(self):tagset=asset.tagset(self.aggregate_by)if'id'intagsetandlen(tagset)>1:raiseValueError('aggregate_by = id must contain a single tag')elif'site_id'intagsetandlen(tagset)>1:raiseValueError('aggregate_by = site_id must contain a single tag')elif'reinsurance'inself.inputs:ifnotany(['policy']==aggbyforaggbyinself.aggregate_by):err_msg=('The field `aggregate_by = policy`'' is required for reinsurance calculations.')ifself.aggregate_by:err_msg+=(' Got `aggregate_by = %s` instead.'%self.aggregate_by)self.raise_invalid(err_msg)returnTrue
[docs]defcheck_reinsurance(self):# there must be a 'treaty' and a loss type (possibly a total type)dic=self.inputs['reinsurance'].copy()try:[lt]=dicexceptValueError:self.raise_invalid('too many loss types in reinsurance %s'%list(dic))ifltnotinscientific.LOSSID:self.raise_invalid('%s: unknown loss type %s in reinsurance'%lt)if'+'inltandnotself.total_losses:self.raise_invalid('you forgot to set total_losses=%s'%lt)
[docs]defcheck_uniform_hazard_spectra(self):ok_imts=[imtforimtinself.imtlsifimt=='PGA'orimt.startswith('SA')]ifnotok_imts:raiseValueError('The `uniform_hazard_spectra` can be True only ''if the IMT set contains SA(...) or PGA, got %s'%list(self.imtls))eliflen(ok_imts)==1:logging.warning('There is a single IMT, the uniform_hazard_spectra plot will ''contain a single point')
[docs]defcheck_source_model(self):if('hazard_curves'inself.inputsor'gmfs'inself.inputsor'multi_peril'inself.inputsor'rupture_model'inself.inputsor'scenario'inself.calculation_modeor'ins_loss'inself.inputs):returnif('source_model_logic_tree'notinself.inputsandself.inputs['job_ini']!='<in-memory>'andnotself.hazard_calculation_id):raiseValueError('Missing source_model_logic_tree in %s ''or missing --hc option'%self.inputs.get('job_ini','job_ini'))
[docs]defcheck_missing(self,param,action):""" Make sure the given parameter is missing in the job.ini file """assertactionin('debug','info','warn','error'),actionifself.inputs.get(param):msg='%s_file is ignored in %s'%(param,self.calculation_mode)ifaction=='error':self.raise_invalid(msg)else:getattr(logging,action)(msg)
[docs]defhazard_precomputed(self):""" :returns: True if the hazard is precomputed """if'gmfs'inself.inputsor'hazard_curves'inself.inputs:returnTruereturnself.hazard_calculation_id
[docs]@classmethoddefdocs(cls):""" :returns: a dictionary parameter name -> parameter documentation """dic={}lst=re.split(r'\n([\w_]+):\n',__doc__)forname,docinzip(lst[1::2],lst[2::2]):name=name.split()[-1]dic[name]=docreturndic
# tested in geese; expected to work for the hazard mosaic
[docs]defto_ini(self):""" Converts the parameters into a string in .ini format """dic={k:vfork,vinvars(self).items()ifnotk.startswith('_')}deldic['base_path']deldic['req_site_params']deldic['export_dir']return'[general]\n'+'\n'.join(to_ini(k,v)fork,vindic.items())
def__toh5__(self):returnhdf5.dumps(vars(self)),{}def__fromh5__(self,array,attrs):ifisinstance(array,numpy.ndarray):# old format <= 3.11, tested in read_old_data,# used to read old GMFsdd=collections.defaultdict(dict)for(name_,literal_)inarray:name=python3compat.decode(name_)literal=python3compat.decode(literal_)if'.'inname:k1,k2=name.split('.',1)dd[k1][k2]=ast.literal_eval(literal)else:dd[name]=ast.literal_eval(literal)vars(self).update(dd)else:# for version >= 3.12vars(self).update(json.loads(python3compat.decode(array)))Idist=calc.filters.IntegrationDistanceifhasattr(self,'maximum_distance')andnotisinstance(self.maximum_distance,Idist):self.maximum_distance=Idist(**self.maximum_distance)
def_rel_fnames(obj,P):# strip the first P characters and convert to relative pathsifisinstance(obj,str):returnobj[P:]elifisinstance(obj,list):return'\n '.join(s[P:]forsinobj)else:# assume dictdic={k:v[P:]fork,vinobj.items()}returnstr(dic)
[docs]defto_ini(key,val):""" Converts key, val into .ini format """ifkey=='inputs':fnames=[]forvinval.values():ifisinstance(v,str):fnames.append(v)elifisinstance(v,list):fnames.extend(v)elifisinstance(v,dict):fnames.extend(v.values())delval['job_ini']P=len(os.path.commonprefix(fnames))return'\n'.join(f'{k}_file = {_rel_fnames(v,P)}'fork,vinval.items()ifnotk.startswith('_'))elifkey=='sites':sites=', '.join(f'{lon}{lat}'forlon,lat,depinval)returnf"sites = {sites}"elifkey=='region':coords=val[9:-2].split(',')# strip POLYGON((...))returnf'{key} = {", ".join(cforcincoords[:-1])}'elifkey=='hazard_imtls':returnf"intensity_measure_types_and_levels = {val}"elifkeyin('reqv_ignore_sources','poes','quantiles','disagg_outputs','source_id','source_nodes','soil_intensities'):returnf"{key} = {' '.join(map(str,val))}"else:ifvalisNone:val=''returnf'{key} = {val}'