# -*- coding: utf-8 -*-# vim: tabstop=4 shiftwidth=4 softtabstop=4## Copyright (C) 2015-2023 GEM Foundation## OpenQuake is free software: you can redistribute it and/or modify it# under the terms of the GNU Affero General Public License as published# by the Free Software Foundation, either version 3 of the License, or# (at your option) any later version.## OpenQuake is distributed in the hope that it will be useful,# but WITHOUT ANY WARRANTY; without even the implied warranty of# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the# GNU Affero General Public License for more details.## You should have received a copy of the GNU Affero General Public License# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.importioimportastimporthtmlimportos.pathimportnumbersimportoperatorimportfunctoolsimportitertoolsimportcollectionsimportloggingimportnumpyimportpandasfromopenquake.baselib.generalimport(humansize,countby,AccumDict,CallableDict,get_array,group_array,fast_agg)fromopenquake.baselib.hdf5importFLOAT,INT,get_shape_descr,vstrfromopenquake.baselib.performanceimportperformance_view,Monitorfromopenquake.baselib.python3compatimportencode,decodefromopenquake.hazardlibimportlogictree,calc,source,geofromopenquake.hazardlib.shakemap.parsersimportdownload_rupture_dictfromopenquake.hazardlib.contextsimport(KNOWN_DISTANCES,ContextMaker,Collapser)fromopenquake.commonlibimportutilfromopenquake.risklibimportriskmodelsfromopenquake.risklib.scientificimport(losses_by_period,return_periods,LOSSID,LOSSTYPE)fromopenquake.baselib.writersimportbuild_header,scientificformatfromopenquake.calculators.classicalimportget_pmaps_gbfromopenquake.calculators.gettersimportget_ebrupturefromopenquake.calculators.extractimportextractF32=numpy.float32F64=numpy.float64U32=numpy.uint32U8=numpy.uint8# a dictionary of views datastore -> arrayview=CallableDict(keyfunc=lambdas:s.split(':',1)[0])code2cls=source.rupture.code2cls# ########################## utility functions ############################## #
[docs]defform(value):""" Format numbers in a nice way. >>> form(0) '0' >>> form(0.0) '0.0' >>> form(0.0001) '1.000E-04' >>> form(1003.4) '1_003' >>> form(103.41) '103.4' >>> form(9.3) '9.30000' >>> form(-1.2) '-1.2' """ifisinstance(value,FLOAT+INT):ifvalue<=0:returnstr(value)elifvalue<.001:return'%.3E'%valueelifvalue<10andisinstance(value,FLOAT):return'%.5f'%valueelifvalue>1000:return'{:_d}'.format(int(round(value)))elifnumpy.isnan(value):return'NaN'else:# in the range 10-1000returnstr(round(value,1))elifisinstance(value,bytes):returndecode(value)elifisinstance(value,str):returnvalueelifisinstance(value,numpy.object_):returnstr(value)elifhasattr(value,'__len__')andlen(value)>1:return' '.join(map(form,value))returnstr(value)
[docs]defdt(names):""" :param names: list or a string with space-separated names :returns: a numpy structured dtype """ifisinstance(names,str):names=names.split()returnnumpy.dtype([(name,object)fornameinnames])
[docs]classHtmlTable(object):""" Convert a sequence header+body into a HTML table. """css="""\ tr.evenRow { background-color: lightgreen } tr.oddRow { } th { background-color: lightblue } """maxrows=5000border="1"summary=""def__init__(self,header_plus_body,name='noname',empty_table='Empty table'):header,body=header_plus_body[0],header_plus_body[1:]self.name=nameself.empty_table=empty_tablerows=[]# rows is a finite sequence of tuplesfori,rowinenumerate(body):ifi==self.maxrows:rows.append(["Table truncated because too big: more than %s rows"%i])breakrows.append(row)self.rows=rowsself.header=tuple(header)# horizontal header
[docs]deftext_table(data,header=None,fmt=None,ext='rst'):""" Build a .rst (or .org) table from a matrix or a DataFrame >>> tbl = [['a', 1], ['b', 2]] >>> print(text_table(tbl, header=['Name', 'Value'])) +------+-------+ | Name | Value | +------+-------+ | a | 1 | +------+-------+ | b | 2 | +------+-------+ """assertextin'csv rst org html',extifisinstance(data,pandas.DataFrame):ifdata.index.name:data=data.reset_index()header=headerorlist(data.columns)data=zip(*[data[col].to_numpy()forcolindata.columns])ifheaderisNoneandhasattr(data,'_fields'):header=data._fieldstry:# see if data is a composite numpy arraydata.dtype.fieldsexceptAttributeError:# not a composite arrayheader=headeror()else:ifnotheader:header=[col.split(':')[0]forcolinbuild_header(data.dtype)]ifheader:col_sizes=[len(col)forcolinheader]else:col_sizes=[len(str(col))forcolindata[0]]body=[]fmt=functools.partial(scientificformat,fmt=fmt)iffmtelseformforrowindata:tup=tuple(fmt(c)forcinrow)for(i,col)inenumerate(tup):col_sizes[i]=max(col_sizes[i],len(col))iflen(tup)!=len(col_sizes):raiseValueError('The header has %d fields but the row %d fields!'%(len(col_sizes),len(tup)))body.append(tup)ifext=='html':returnHtmlTable([header]+body).render()wrap='+-%s-+'ifext=='rst'else'|-%s-|'sepline=wrap%'-+-'.join('-'*sizeforsizeincol_sizes)sep=','ifext=='csv'else' | 'templ=sep.join('%-{}s'.format(size)forsizeincol_sizes)ifext!='csv':templ='| %s |'%templifheaderandext=='rst':lines=[sepline,templ%tuple(header),sepline]elifheaderandext=='org':lines=[templ%tuple(header),sepline]elifheaderandext=='csv':lines=[','.join(header)]else:lines=[sepline]forrowinbody:lines.append(templ%row)ifext=='rst':lines.append(sepline)return'\n'.join(lines)
[docs]@view.add('worst_sources')defview_worst_sources(token,dstore):""" Returns the sources with worst weights """if':'intoken:step=int(token.split(':')[1])else:step=1data=dstore.read_df('source_data','src_id')deldata['impact']ser=data.groupby('taskno').ctimes.sum().sort_values().tail(1)[[taskno,maxtime]]=ser.to_dict().items()data=data[data.taskno==taskno]print('Sources in the slowest task (%d seconds, weight=%d, taskno=%d)'%(maxtime,data['weight'].sum(),taskno))data['slow_rate']=data.ctimes/data.weightdeldata['taskno']df=data.sort_values('ctimes',ascending=False)returndf[slice(None,None,step)]
[docs]@view.add('slow_sources')defview_slow_sources(token,dstore,maxrows=20):""" Returns the slowest sources """info=dstore['source_info']['source_id','code','calc_time','num_sites','num_ruptures']info.sort(order='calc_time')data=numpy.zeros(len(info),dt(info.dtype.names))fornameininfo.dtype.names:data[name]=info[name]returndata[::-1][:maxrows]
[docs]@view.add('rup_info')defview_rup_info(token,dstore,maxrows=25):""" Show the slowest ruptures """ifnotcode2cls:code2cls.update(source.rupture.BaseRupture.init())fields=['code','n_occ','nsites','mag']rups=dstore.read_df('ruptures','id')[fields]info=dstore.read_df('gmf_data/rup_info','rup_id')df=rups.join(info).sort_values('time',ascending=False)df['surface']=[code2cls[code][1].__name__forcodeindf.code]deldf['task_no']deldf['code']returndf[:maxrows]
[docs]@view.add('contents')defview_contents(token,dstore):""" Returns the size of the contents of the datastore and its total size """tot=(dstore.filename,humansize(os.path.getsize(dstore.filename)))data=sorted((dstore.getsize(key),key)forkeyindstore)rows=[(key,humansize(nbytes))fornbytes,keyindata]+[tot]returnnumpy.array(rows,dt('dataset size'))
[docs]@view.add('full_lt')defview_full_lt(token,dstore):full_lt=dstore['full_lt'].init()num_paths=full_lt.get_num_potential_paths()ifnotfull_lt.num_samplesandnum_paths>15000:return'<%d realizations>'%num_pathsfull_lt.get_trt_rlzs(dstore['trt_smrs'][:])# set _rlzs_byheader=['trt_smr','gsim','rlzs']rows=[]fortrt_smr,rbginfull_lt._rlzs_by.items():forgsim,rlzsinrbg.items():rows.append((trt_smr,repr(str(gsim)),short_repr(rlzs)))returnnumpy.array(rows,dt(header))
[docs]@view.add('job_info')defview_job_info(token,dstore):""" Determine the amount of data transferred from the controller node to the workers and back in a classical calculation. """data=[]task_info=dstore['task_info'][()]task_sent=ast.literal_eval(decode(dstore['task_sent'][()]))fortask,dicintask_sent.items():sent=sorted(dic.items(),key=operator.itemgetter(1),reverse=True)sent=['%s=%s'%(k,humansize(v))fork,vinsent[:3]]recv=get_array(task_info,taskname=encode(task))['received']data.append((task,' '.join(sent),humansize(recv.sum()),humansize(recv.mean())))returnnumpy.array(data,dt('task sent received mean_recv'))
[docs]@view.add('avglosses_data_transfer')defavglosses_data_transfer(token,dstore):""" Determine the amount of average losses transferred from the workers to the controller node in a risk calculation. """oq=dstore['oqparam']N=len(dstore['assetcol'])R=dstore['full_lt'].get_num_paths()L=len(dstore.get_attr('crm','loss_types'))ct=oq.concurrent_taskssize_bytes=N*R*L*8*ct# 8 byte floatsreturn('%d asset(s) x %d realization(s) x %d loss type(s) losses x ''8 bytes x %d tasks = %s'%(N,R,L,ct,humansize(size_bytes)))
# for scenario_risk and classical_risk
[docs]@view.add('totlosses')defview_totlosses(token,dstore):""" This is a debugging view. You can use it to check that the total losses, i.e. the losses obtained by summing the average losses on all assets are indeed equal to the aggregate losses. This is a sanity check for the correctness of the implementation. """oq=dstore['oqparam']tot_losses=0forltypeinoq.loss_types:name='avg_losses-stats/'+ltypetry:tot=dstore[name][()].sum(axis=0)exceptKeyError:name='avg_losses-rlzs/'+ltypetot=dstore[name][()].sum(axis=0)tot_losses+=totreturntext_table(tot_losses.view(oq.loss_dt(F32)),fmt='%.6E')
[docs]defalt_to_many_columns(alt,loss_types):# convert an risk_by_event in the format# (event_id, agg_id, loss_id, loss) =># (event_id, agg_id, structural, nonstructural, ...)dic=dict(event_id=[])forlninloss_types:dic[ln]=[]for(eid,kid),dfinalt.groupby(['event_id','agg_id']):dic['event_id'].append(eid)forlninloss_types:arr=df[df.loss_id==LOSSID[ln]].loss.to_numpy()loss=0iflen(arr)==0elsearr[0]# arr has size 0 or 1dic[ln].append(loss)returnpandas.DataFrame(dic)
[docs]@view.add('portfolio_losses')defview_portfolio_losses(token,dstore):""" The losses for the full portfolio, for each realization and loss type, extracted from the event loss table. """oq=dstore['oqparam']loss_dt=oq.loss_dt()data=_portfolio_loss(dstore).view(loss_dt)[:,0]# shape Rrlzids=[str(r)forrinrange(len(data))]array=util.compose_arrays(numpy.array(rlzids),data,'rlz_id')# this is very sensitive to rounding errors, so I am using a low precisionreturntext_table(array,fmt='%.5E')
[docs]@view.add('portfolio_loss')defview_portfolio_loss(token,dstore):""" The mean portfolio loss for each loss type, extracted from the event loss table. """oq=dstore['oqparam']K=dstore['risk_by_event'].attrs.get('K',0)alt_df=dstore.read_df('risk_by_event','agg_id',dict(agg_id=K))weights=dstore['weights'][:]rlzs=dstore['events']['rlz_id']E=len(rlzs)R=len(weights)ws=weights[rlzs]avgs=[]attrs=dstore['gmf_data'].attrsitime=attrs['investigation_time']etime=attrs['effective_time']ifitime:freq=(oq.risk_investigation_timeoritime)*E/etimeelse:freq=1/Rforlninoq.loss_types:df=alt_df[alt_df.loss_id==LOSSID[ln]]eids=df.pop('event_id').to_numpy()if(eids>=E).any():# reduced eventsassertlen(set(ws))==1,'Weights must be all equal'weights=ws[:len(eids)]else:weights=ws[eids]avgs.append(weights@df.loss.to_numpy()/ws.sum()*freq)returntext_table([['avg']+avgs],['loss']+oq.loss_types)
[docs]@view.add('portfolio_dmgdist')defportfolio_dmgdist(token,dstore):""" The portfolio damages extracted from the first realization of damages-rlzs """oq=dstore['oqparam']dstates=['no_damage']+oq.limit_statesD=len(dstates)arr=dstore['damages-rlzs'][:,0,:,:D].sum(axis=0)# shape (L, D)tbl=numpy.zeros(len(arr),dt(['loss_type','total']+dstates))tbl['loss_type']=oq.loss_typestbl['total']=arr.sum(axis=1)fordsi,dsinenumerate(dstates):tbl[ds]=arr[:,dsi]returntbl
[docs]@view.add('portfolio_damage')defview_portfolio_damage(token,dstore):""" The mean full portfolio damage for each loss type, extracted from the average damages """if'aggcurves'indstore:# event_based_damageK=dstore.get_attr('risk_by_event','K')df=dstore.read_df('aggcurves',sel=dict(agg_id=K,return_period=0))lnames=numpy.array(dstore['oqparam'].loss_types)df['loss_type']=lnames[df.loss_id.to_numpy()]deldf['loss_id']deldf['agg_id']deldf['return_period']returndf.set_index('loss_type')# dimensions assets, stat, loss_types, dmg_stateif'damages-stats'indstore:attrs=get_shape_descr(dstore['damages-stats'].attrs['json'])arr=dstore.sel('damages-stats',stat='mean').sum(axis=(0,1))else:attrs=get_shape_descr(dstore['damages-rlzs'].attrs['json'])arr=dstore.sel('damages-rlzs',rlz=0).sum(axis=(0,1))rows=[(lt,)+tuple(row)forlt,rowinzip(attrs['loss_type'],arr)]returnnumpy.array(rows,dt(['loss_type']+list(attrs['dmg_state'])))
[docs]defsum_table(records):""" Used to compute summaries. The records are assumed to have numeric fields, except the first field which is ignored, since it typically contains a label. Here is an example: >>> sum_table([('a', 1), ('b', 2)]) ['total', 3] """size=len(records[0])result=[None]*sizefirstrec=records[0]foriinrange(size):ifisinstance(firstrec[i],(numbers.Number,numpy.ndarray)):result[i]=sum(rec[i]forrecinrecords)else:result[i]='total'returnresult
[docs]@view.add('exposure_info')defview_exposure_info(token,dstore):""" Display info about the exposure model """assetcol=dstore['assetcol/array'][:]taxonomies=sorted(set(dstore['assetcol'].taxonomies))data=[('#assets',len(assetcol)),('#taxonomies',len(taxonomies))]returntext_table(data)+'\n\n'+view_assets_by_site(token,dstore)
[docs]@view.add('ruptures_events')defview_ruptures_events(token,dstore):num_ruptures=len(dstore['ruptures'])num_events=len(dstore['events'])events_by_rlz=countby(dstore['events'][()],'rlz_id')mult=round(num_events/num_ruptures,3)lst=[('Total number of ruptures',num_ruptures),('Total number of events',num_events),('Rupture multiplicity',mult),('Events by rlz',events_by_rlz.values())]returntext_table(lst)
[docs]@view.add('fullreport')defview_fullreport(token,dstore):""" Display an .rst report about the computation """# avoid circular importsfromopenquake.calculators.reportwriterimportReportWriterreturnReportWriter(dstore).make_report(show_inputs=False)
[docs]@view.add('performance')defview_performance(token,dstore):""" Display performance information """returnperformance_view(dstore)
[docs]defstats(name,array,*extras):""" Returns statistics from an array of numbers. :param name: a descriptive string :returns: (name, mean, rel_std, min, max, len) + extras """avg=numpy.mean(array)std='nan'iflen(array)==1else'%02.0f%%'%(numpy.std(array)/avg*100)max_=numpy.max(array)return(name,len(array),avg,std,numpy.min(array),max_)+extras
[docs]@view.add('num_units')defview_num_units(token,dstore):""" Display the number of units by taxonomy """taxo=dstore['assetcol/tagcol/taxonomy'][()]counts=collections.Counter()forassetindstore['assetcol']:counts[taxo[asset['taxonomy']]]+=asset['value-number']data=sorted(counts.items())data.append(('*ALL*',sum(d[1]fordindata)))returnnumpy.array(data,dt('taxonomy num_units'))
[docs]@view.add('assets_by_site')defview_assets_by_site(token,dstore):""" Display statistical information about the distribution of the assets """taxonomies=dstore['assetcol/tagcol/taxonomy'][()]assets_by=group_array(dstore['assetcol'].array,'site_id')data=['taxonomy num_sites mean stddev min max num_assets'.split()]num_assets=AccumDict()forassetsinassets_by.values():num_assets+={k:[len(v)]fork,vingroup_array(assets,'taxonomy').items()}fortaxoinsorted(num_assets):val=numpy.array(num_assets[taxo])data.append(stats(taxonomies[taxo],val,val.sum()))iflen(num_assets)>1:# more than one taxonomy, add a summaryn_assets=numpy.array([len(assets)forassetsinassets_by.values()])data.append(stats('*ALL*',n_assets,n_assets.sum()))returntext_table(data)
[docs]@view.add('required_params_per_trt')defview_required_params_per_trt(token,dstore):""" Display the parameters needed by each tectonic region type """full_lt=dstore['full_lt']tbl=[]fortrtinfull_lt.trts:gsims=full_lt.gsim_lt.values[trt]# adding fake mags to the ContextMaker, needed for table-based GMPEscmaker=ContextMaker(trt,gsims,{'imtls':{},'mags':['7.00']})req=set()forgsimincmaker.gsims:req.update(gsim.requires())req_params=sorted(req-{'mag'})gsim_str=' '.join(map(repr,gsims)).replace('\n','\\n')iflen(gsim_str)>160:gsim_str=', '.join(repr(gsim).split('\n')[0]forgsimingsims)tbl.append((trt,gsim_str,req_params))returntext_table(tbl,header='trt gsims req_params'.split(),fmt=scientificformat)
[docs]@view.add('task_info')defview_task_info(token,dstore):""" Display statistical information about the tasks performance. It is possible to get full information about a specific task with a command like this one, for a classical calculation:: $ oq show task_info:classical """task_info=dstore['task_info']task_info.refresh()args=token.split(':')[1:]# called as task_info:task_nameifargs:[task]=argsarray=get_array(task_info[()],taskname=task.encode('utf8'))rduration=array['duration']/array['weight']data=util.compose_arrays(rduration,array,'rduration')data.sort(order='duration')returndatadata=[]fortask,arringroup_array(task_info[()],'taskname').items():val=arr['duration']iflen(val):data.append(stats(task,val,val.max()/val.mean()))ifnotdata:return'Not available'returnnumpy.array(data,dt('operation-duration counts mean stddev min max slowfac'))
[docs]@view.add('task_durations')defview_task_durations(token,dstore):""" Display the raw task durations. Here is an example of usage:: $ oq show task_durations """df=dstore.read_df('source_data')out=[]fortaskno,rowsindf.groupby('taskno'):srcids=reduce_srcids(rows.src_id.to_numpy())out.append((taskno,rows.ctimes.sum(),rows.weight.sum(),srcids))arr=numpy.array(out,dt('taskno duration weight srcids'))arr.sort(order='duration')returnarr
[docs]@view.add('task')defview_task_hazard(token,dstore):""" Display info about a given task. Here are a few examples of usage:: $ oq show task:classical:0 # the fastest task $ oq show task:classical:-1 # the slowest task """_,name,index=token.split(':')if'source_data'notindstore:return'Missing source_data'data=get_array(dstore['task_info'][()],taskname=encode(name))iflen(data)==0:raiseRuntimeError('No task_info for %s'%name)data.sort(order='duration')rec=data[int(index)]taskno=rec['task_no']sdata=dstore.read_df('source_data','taskno').loc[taskno]num_ruptures=sdata.nrupts.sum()eff_sites=sdata.nsites.sum()msg=('taskno={:_d}, fragments={:_d}, num_ruptures={:_d}, ''eff_sites={:_d}, weight={:.1f}, duration={:.1f}s').format(taskno,len(sdata),num_ruptures,eff_sites,rec['weight'],rec['duration'])returnmsg
[docs]@view.add('source_data')defview_source_data(token,dstore):""" Display info about a given task. Here is an example:: $ oq show source_data:42 """if':'notintoken:returndstore.read_df(token,'src_id')_,taskno=token.split(':')taskno=int(taskno)if'source_data'notindstore:return'Missing source_data'df=dstore.read_df('source_data','src_id',sel={'taskno':taskno})deldf['taskno']df['slowrate']=df['ctimes']/df['weight']returndf.sort_values('ctimes')
[docs]@view.add('task_ebrisk')defview_task_ebrisk(token,dstore):""" Display info about ebrisk tasks: $ oq show task_ebrisk:-1 # the slowest task """idx=int(token.split(':')[1])task_info=get_array(dstore['task_info'][()],taskname=b'ebrisk')task_info.sort(order='duration')info=task_info[idx]times=get_array(dstore['gmf_info'][()],task_no=info['task_no'])extra=times[['nsites','gmfbytes','dt']]ds=dstore.parentifdstore.parentelsedstorerups=ds['ruptures']['id','code','n_occ','mag'][times['rup_id']]codeset=set('code_%d'%codeforcodeinnumpy.unique(rups['code']))tbl=text_table(util.compose_arrays(rups,extra))codes=['%s: %s'%itforitinds.getitem('ruptures').attrs.items()ifit[0]incodeset]msg='%s\n%s\nHazard time for task %d: %d of %d s, '%(tbl,'\n'.join(codes),info['task_no'],extra['dt'].sum(),info['duration'])msg+='gmfbytes=%s, w=%d'%(humansize(extra['gmfbytes'].sum()),(rups['n_occ']*extra['nsites']).sum())returnmsg
[docs]@view.add('global_hazard')defview_global_hazard(token,dstore):""" Display the global hazard for the calculation. This is used for debugging purposes when comparing the results of two calculations. """imtls=dstore['oqparam'].imtlsarr=dstore.sel('hcurves-stats',stat='mean')# shape N, S, M, Lres=tuple(arr.mean(axis=(0,1,3)))# length Mreturnnumpy.array([res],dt(imtls))
[docs]@view.add('global_hmaps')defview_global_hmaps(token,dstore):""" Display the global hazard maps for the calculation. They are used for debugging purposes when comparing the results of two calculations. They are the mean over the sites of the mean hazard maps. """oq=dstore['oqparam']dt=numpy.dtype([('%s-%s'%(imt,poe),F32)forimtinoq.imtlsforpoeinoq.poes])hmaps=dstore.sel('hmaps-stats',stat='mean')means=hmaps.mean(axis=(0,1))# shape M, Preturnnumpy.array([tuple(means.flatten())],dt)
[docs]@view.add('global_gmfs')defview_global_gmfs(token,dstore):""" Display GMFs on the first IMT averaged on everything for debugging purposes """imtls=dstore['oqparam'].imtlsrow=[dstore[f'gmf_data/gmv_{m}'][:].mean(axis=0)forminrange(len(imtls))]returntext_table([row],header=imtls)
[docs]@view.add('gmf')defview_gmf(token,dstore):""" Display a mean gmf for debugging purposes """df=dstore.read_df('gmf_data','sid')gmf=df.groupby(df.index).mean()returnstr(gmf)
[docs]defbinning_error(values,eids,nbins=10):""" :param values: E values :param eids: E integer event indices :returns: std/mean for the sums of the values Group the values in nbins depending on the eids and returns the variability of the sums relative to the mean. """df=pandas.DataFrame({'val':values},eids)res=df.groupby(eids%nbins).val.sum()returnres.std()/res.mean()
[docs]@view.add('extreme_gmvs')defview_extreme_gmvs(token,dstore):""" Display table of extreme GMVs with fields (eid, gmv_0, sid, rlz. rup) """if':'intoken:maxgmv=float(token.split(':')[1])else:maxgmv=5# PGA=5g is default value defining extreme GMVsimt0=list(dstore['oqparam'].imtls)[0]eids=dstore['gmf_data/eid'][:]gmvs=dstore['gmf_data/gmv_0'][:]sids=dstore['gmf_data/sid'][:]msg=''err=binning_error(gmvs,eids)iferr>.05:msg+=('Your results are expected to have a large dependency ''from the rupture seed: %d%%'%(err*100))ifimt0=='PGA':rups=dstore['ruptures'][:]rupdict=dict(zip(rups['id'],rups))gmpe=GmpeExtractor(dstore)df=pandas.DataFrame({'gmv_0':gmvs,'sid':sids},eids)extreme_df=df[df.gmv_0>maxgmv].rename(columns={'gmv_0':imt0})iflen(extreme_df)==0:return'No PGAs over %s g found'%maxgmvev=dstore['events'][()][extreme_df.index]# extreme_df['rlz'] = ev['rlz_id']extreme_df['rup']=ev['rup_id']extreme_df['mag']=[rupdict[rupid]['mag']forrupidinev['rup_id']]hypos=numpy.array([rupdict[rupid]['hypo']forrupidinev['rup_id']])# extreme_df['lon'] = numpy.round(hypos[:, 0])# extreme_df['lat'] = numpy.round(hypos[:, 1])extreme_df['dep']=numpy.round(hypos[:,2])trt_smrs=[rupdict[rupid]['trt_smr']forrupidinev['rup_id']]extreme_df['gmpe']=gmpe.extract(trt_smrs,ev['rlz_id'])exdf=extreme_df.sort_values(imt0).groupby('sid').head(1)iflen(exdf):msg+=('\nThere are extreme GMVs, run `oq show extreme_gmvs:%s`''to see them'%maxgmv)if':'intoken:msg=str(exdf.set_index('rup'))returnmsgreturnmsg+'\nCould not extract extreme GMVs for '+imt0
[docs]@view.add('mean_rates')defview_mean_rates(token,dstore):""" Display mean hazard rates for the first site """oq=dstore['oqparam']assertoq.use_ratespoes=dstore.sel('hcurves-stats',site_id=0,stat='mean')[0,0]# NRML1rates=numpy.zeros(poes.shape[1],dt(oq.imtls))form,imtinenumerate(oq.imtls):rates[imt]=calc.disagg.to_rates(poes[m])returnrates
[docs]@view.add('mean_disagg')defview_mean_disagg(token,dstore):""" Display mean quantities for the disaggregation. Useful for checking differences between two calculations. """N,M,P=dstore['hmap3'].shapetbl=[]kd={key:dset[:]forkey,dsetinsorted(dstore['disagg-rlzs'].items())}oq=dstore['oqparam']forsinrange(N):form,imtinenumerate(oq.imtls):forpinrange(P):row=['%s-sid-%d-poe-%s'%(imt,s,p)]fork,dinkd.items():row.append(d[s,...,m,p,:].mean())tbl.append(tuple(row))returnnumpy.array(sorted(tbl),dt(['key']+list(kd)))
[docs]@view.add('disagg')defview_disagg(token,dstore):""" Example: $ oq show disagg:Mag Returns a table poe, imt, mag, contribution for the first site """kind=token.split(':')[1]assertkindin('Mag','Dist','TRT'),kindsite_id=0if'disagg-stats'indstore:data=dstore['disagg-stats/'+kind][site_id,...,0]# (:, M, P)else:data=dstore['disagg-rlzs/'+kind][site_id,...,0]# (:, M, P)Ma,M,P=data.shapeoq=dstore['oqparam']imts=list(oq.imtls)dtlist=[('poe',float),('imt',(numpy.bytes_,10)),(kind.lower()+'bin',int),('prob',float)]lst=[]forp,m,mainitertools.product(range(P),range(M),range(Ma)):lst.append((oq.poes[p],imts[m],ma,data[ma,m,p]))returnnumpy.array(lst,dtlist)
[docs]@view.add('bad_ruptures')defview_bad_ruptures(token,dstore):""" Display the ruptures degenerating to a point """data=dstore['ruptures']['id','code','mag','minlon','maxlon','minlat','maxlat']bad=data[numpy.logical_and(data['minlon']==data['maxlon'],data['minlat']==data['maxlat'])]returnbad
[docs]@view.add('gmvs_to_hazard')defview_gmvs_to_hazard(token,dstore):""" Show the number of GMFs over the highest IML """args=token.split(':')[1:]# called as view_gmvs_to_hazard:sidifnotargs:sid=0eliflen(args)==1:# only sid specifiedsid=int(args[0])assertsidindstore['sitecol'].sidsoq=dstore['oqparam']num_ses=oq.ses_per_logic_tree_pathdata=dstore.read_df('gmf_data','sid').loc[sid]tbl=[]forimti,(imt,imls)inenumerate(oq.imtls.items()):gmv=data['gmv_%d'%imti].to_numpy()forimlinimls:# same algorithm as in _gmvs_to_haz_curveexceeding=numpy.sum(gmv>=iml)poe=1-numpy.exp(-exceeding/num_ses)tbl.append((sid,imt,iml,exceeding,poe))returnnumpy.array(tbl,dt('sid imt iml num_exceeding poe'))
[docs]@view.add('gmvs')defview_gmvs(token,dstore):""" Show the GMVs on a given site ID """sid=int(token.split(':')[1])# called as view_gmvs:sidassertsidindstore['sitecol'].sidsdata=dstore.read_df('gmf_data','sid')returndata.loc[sid]
[docs]@view.add('events_by_mag')defview_events_by_mag(token,dstore):""" Show how many events there are for each magnitude """rups=dstore['ruptures'][()]num_evs=fast_agg(dstore['events']['rup_id'])counts={}formag,grpingroup_array(rups,'mag').items():counts[mag]=sum(num_evs[rup_id]forrup_idingrp['id'])returnnumpy.array(list(counts.items()),dt('mag num_events'))
[docs]@view.add('ebrups_by_mag')defview_ebrups_by_mag(token,dstore):""" Show how many event based ruptures there are for each magnitude """mags=dstore['ruptures']['mag']uniq,counts=numpy.unique(mags,return_counts=True)returntext_table(zip(uniq,counts),['mag','num_ruptures'])
[docs]@view.add('maximum_intensity')defview_maximum_intensity(token,dstore):""" Show intensities at minimum and maximum distance for the highest magnitude """effect=extract(dstore,'effect')data=zip(dstore['full_lt'].trts,effect[-1,-1],effect[-1,0])returntext_table(data,['trt','intensity1','intensity2'])
[docs]@view.add('extreme_sites')defview_extreme(token,dstore):""" Show sites where the mean hazard map reaches maximum values """mean=dstore.sel('hmaps-stats',stat='mean')[:,0,0,-1]# shape N1MPsite_ids,=numpy.where(mean==mean.max())returndstore['sitecol'][site_ids]
[docs]@view.add('zero_losses')defview_zero_losses(token,dstore):""" Sanity check on avg_losses and avg_gmf """R=len(dstore['weights'])oq=dstore['oqparam']avg_gmf=dstore['avg_gmf'][0]asset_df=dstore.read_df('assetcol/array','site_id')forcolinasset_df.columns:ifnotcol.startswith('value-'):delasset_df[col]values_df=asset_df.groupby(asset_df.index).sum()avglosses=dstore['avg_losses-rlzs'][:].sum(axis=1)/R# shape (A, L)dic=dict(site_id=dstore['assetcol']['site_id'])forlti,lnameinenumerate(oq.loss_types):dic[lname]=avglosses[:,lti]losses_df=pandas.DataFrame(dic).groupby('site_id').sum()sids=losses_df.index.to_numpy()avg_gmf=avg_gmf[sids]nonzero_gmf=(avg_gmf>oq.min_iml).any(axis=1)nonzero_losses=(losses_df>0).to_numpy().any(axis=1)bad,=numpy.where(nonzero_gmf!=nonzero_losses)# this happens in scenario_risk/case_shakemap and case_3msg='Site #%d is suspicious:\navg_gmf=%s\navg_loss=%s\nvalues=%s'foridxinbad:sid=sids[idx]logging.warning(msg,sid,dict(zip(oq.all_imts(),avg_gmf[sid])),_get(losses_df,sid),_get(values_df,sid))returnbad
def_get(df,sid):returndf.loc[sid].to_dict()
[docs]@view.add('gsim_for_event')defview_gsim_for_event(token,dstore):""" Display the GSIM used when computing the GMF for the given event: $ oq show gsim_for_event:123 -1 [BooreAtkinson2008] """eid=int(token.split(':')[1])full_lt=dstore['full_lt']rup_id,rlz_id=dstore['events'][eid][['rup_id','rlz_id']]trt_smr=dict(dstore['ruptures'][:][['id','trt_smr']])trti=trt_smr[rup_id]//2**24gsim=full_lt.get_realizations()[rlz_id].gsim_rlz.value[trti]returngsim
[docs]@view.add('calc_risk')defview_calc_risk(token,dstore):""" Compute the risk_by_event table starting from GMFs """# avoid a mysterious circular import only on macosfromopenquake.calculatorsimportevent_based_riskasebr_,event_id=token.split(':')oq=dstore['oqparam']assetcol=dstore['assetcol']ebr.set_oqparam(oq,assetcol,dstore)crmodel=riskmodels.CompositeRiskModel.read(dstore,oq)gmf_df=dstore.read_df('gmf_data')gmf_df=gmf_df[gmf_df.eid==int(event_id)]ws=dstore['weights']rlz_id=dstore['events']['rlz_id']aggids,_=assetcol.build_aggids(oq.aggregate_by,oq.max_aggregations)agg_keys=numpy.concatenate([dstore['agg_keys'][:],numpy.array([b'total'])])ARK=(oq.A,len(ws),oq.K)ifoq.ignore_master_seedoroq.ignore_covs:rng=Noneelse:rng=ebr.MultiEventRNG(oq.master_seed,gmf_df.eid.unique(),int(oq.asset_correlation))mon=Monitor()outs=[]# ebr.gen_outputs(gmf_df, crmodel, rng, mon)sids=gmf_df.sid.to_numpy()assets=assetcol.to_dframe()fortaxoinassets.taxonomy.unique():adf=assets[assets.taxonomy==taxo]df=gmf_df[numpy.isin(sids,adf.site_id.unique())]iflen(df)==0:# common enoughcontinueout=crmodel.get_output(adf,df,oq._sec_losses,rng)outs.append(out)avg,alt=ebr.aggreg(outs,crmodel,ARK,aggids,rlz_id,oq.ideduc,mon)delalt['event_id']delalt['variance']alt['type']=LOSSTYPE[alt.loss_id]delalt['loss_id']alt['agg_keys']=decode(agg_keys[alt.agg_id])delalt['agg_id']returnalt
[docs]@view.add('event_loss_table')defview_event_loss_table(token,dstore):""" Display the top 20 losses of the event loss table for the first loss type $ oq show event_loss_table """K=dstore['risk_by_event'].attrs.get('K',0)df=dstore.read_df('risk_by_event','event_id',dict(agg_id=K,loss_id=0))df['std']=numpy.sqrt(df.variance)df.sort_values('loss',ascending=False,inplace=True)deldf['agg_id']deldf['loss_id']deldf['variance']returndf[:20]
[docs]@view.add('risk_by_event')defview_risk_by_event(token,dstore):"""There are two possibilities: $ oq show risk_by_event:<loss_type> $ oq show risk_by_event:<event_id> In both cases displays the top 30 losses of the aggregate loss table as a TSV, for all events or only the given event. """_,ltype=token.split(':')try:loss_id=LOSSID[ltype]df=dstore.read_df('risk_by_event',sel=dict(loss_id=loss_id))deldf['loss_id']exceptKeyError:event_id=int(ltype)df=dstore.read_df('risk_by_event',sel=dict(event_id=event_id))df['ltype']=LOSSTYPE[df.loss_id.to_numpy()]deldf['loss_id']deldf['event_id']deldf['variance']df=df[df.agg_id==df.agg_id.max()].sort_values('loss',ascending=False)deldf['agg_id']out=io.StringIO()df[:30].to_csv(out,sep='\t',index=False,float_format='%.1f',lineterminator='\r\n')returnout.getvalue()
[docs]@view.add('risk_by_rup')defview_risk_by_rup(token,dstore):""" Display the top 30 aggregate losses by rupture ID. Usage: $ oq show risk_by_rup """rbr=dstore.read_df('loss_by_rupture','rup_id')info=dstore.read_df('gmf_data/rup_info','rup_id')rdf=dstore.read_df('ruptures','id')df=rbr.join(rdf).join(info)[['loss','mag','n_occ','hypo_0','hypo_1','hypo_2','rrup']]forfieldindf.columns:iffieldnotin('mag','n_occ'):df[field]=numpy.round(F64(df[field]),1)returndf[:30]
[docs]@view.add('loss_ids')defview_loss_ids(token,dstore):""" Displays the loss IDs corresponding to nonzero losses """K=dstore['risk_by_event'].attrs.get('K',0)df=dstore.read_df('risk_by_event','event_id',dict(agg_id=K))loss_ids=df.loss_id.unique()arr=numpy.zeros(len(loss_ids),dt('loss_type loss_id'))fori,loss_idinenumerate(loss_ids):arr[i]['loss_type']=LOSSTYPE[loss_id]arr[i]['loss_id']=loss_idreturnarr
[docs]@view.add('delta_loss')defview_delta_loss(token,dstore):""" Estimate the stocastic error on the loss curve by splitting the events in odd and even. Example: $ oq show delta_loss # default structural """if':'intoken:_,li=token.split(':')li=int(li)else:li=2# structuraloq=dstore['oqparam']loss_type=LOSSTYPE[li]K=dstore['risk_by_event'].attrs.get('K',0)df=dstore.read_df('risk_by_event','event_id',dict(agg_id=K,loss_id=li))iflen(df)==0:# for instance no fatalitiesreturn{'delta':numpy.zeros(1),'loss_types':view_loss_ids(token,dstore),'error':f"There are no relevant events for {loss_type=}"}ifoq.calculation_mode=='scenario_risk':ifoq.number_of_ground_motion_fields==1:return{'delta':numpy.zeros(1),'error':'number_of_ground_motion_fields=1'}R=len(dstore['weights'])df['rlz_id']=dstore['events']['rlz_id'][df.index]c0,c1=numpy.zeros(R),numpy.zeros(R)forrinrange(R):loss=df.loss[df.rlz_id==r]c0[r]=loss[::2].mean()# evenc1[r]=loss[1::2].mean()# odddic=dict(even=c0,odd=c1,delta=numpy.abs(c0-c1)/(c0+c1))returnpandas.DataFrame(dic)num_events=len(dstore['events'])efftime=oq.investigation_time*oq.ses_per_logic_tree_path*len(dstore['weights'])periods=return_periods(efftime,num_events)[1:-1]losses0=df.loss[df.index%2==0]losses1=df.loss.loc[df.index%2==1]c0=losses_by_period(losses1,periods,len(losses1),efftime/2)['curve']c1=losses_by_period(losses0,periods,len(losses0),efftime/2)['curve']ok=(c0!=0)&(c1!=0)c0=c0[ok]c1=c1[ok]res=losses_by_period(df['loss'],periods,num_events,efftime)losses=res['curve'][ok]dic=dict(loss=losses,even=c0,odd=c1,delta=numpy.abs(c0-c1)/(c0+c1))returnpandas.DataFrame(dic,periods[ok])
[docs]@view.add('composite_source_model')defview_composite_source_model(token,dstore):""" Show the structure of the CompositeSourceModel in terms of grp_id """lst=[]full_lt=dstore['full_lt'].init()forgrp_id,dfindstore.read_df('source_info').groupby('grp_id'):lst.append((str(grp_id),full_lt.trts[df.trti.unique()[0]],len(df)))returnnumpy.array(lst,dt('grp_id trt num_sources'))
[docs]@view.add('branches')defview_branches(token,dstore):""" Show info about the branches in the logic tree """full_lt=dstore['full_lt']smlt=full_lt.source_model_ltgslt=full_lt.gsim_lttbl=[]fork,vinfull_lt.source_model_lt.shortener.items():tbl.append((k,v,smlt.branches[k].value))gsims=sum(gslt.values.values(),[])iflen(gslt.shortener)<len(gsims):# possible for engine < 3.13raiseValueError('There were duplicated branch IDs in the gsim logic tree %s'%gslt.filename)forg,(k,v)inenumerate(gslt.shortener.items()):tbl.append((k,v,str(gsims[g]).replace('\n',r'\n')))returnnumpy.array(tbl,dt('branch_id abbrev uvalue'))
[docs]@view.add('rlz')defview_rlz(token,dstore):""" Show info about a given realization in the logic tree Example: $ oq show rlz:0 -1 """_,rlz_id=token.split(':')full_lt=dstore['full_lt']rlz=full_lt.get_realizations()[int(rlz_id)]smlt=full_lt.source_model_ltgslt=full_lt.gsim_lttbl=[]forbset,bridinzip(smlt.branchsets,rlz.sm_lt_path):tbl.append((bset.uncertainty_type,smlt.branches[brid].value))fortrt,valueinzip(sorted(gslt.bsetdict),rlz.gsim_rlz.value):tbl.append((trt,value))returnnumpy.array(tbl,dt('uncertainty_type uvalue'))
[docs]@view.add('branchsets')defview_branchsets(token,dstore):""" Show the branchsets in the logic tree """flt=dstore['full_lt']clt=logictree.compose(flt.source_model_lt,flt.gsim_lt)returntext_table(enumerate(map(repr,clt.branchsets)),header=['bsno','bset'],ext='org')
[docs]@view.add('rupture')defview_rupture(token,dstore):""" Show a rupture with its geometry """rup_id=int(token.split(':')[1])returnget_ebrupture(dstore,rup_id)
[docs]@view.add('event_rates')defview_event_rates(token,dstore):""" Show the number of events per realization multiplied by risk_time/eff_time """oq=dstore['oqparam']R=dstore['full_lt'].get_num_paths()ifoq.calculation_mode!='event_based_damage':returnnumpy.ones(R)time_ratio=(oq.risk_investigation_timeoroq.investigation_time)/(oq.ses_per_logic_tree_path*oq.investigation_time)ifoq.collect_rlzs:returnnumpy.array([len(dstore['events'])*time_ratio/R])else:rlzs=dstore['events']['rlz_id']returnnumpy.bincount(rlzs,minlength=R)*time_ratio
[docs]@view.add('sum')defview_sum(token,dstore):""" Show the sum of an array of shape (A, R, L, ...) on the first axis """_,arrayname=token.split(':')# called as sum:damages-rlzsdset=dstore[arrayname]A,R,L,*D=dset.shapecols=['RL']+tup2str(itertools.product(*[range(d)fordinD]))arr=dset[:].sum(axis=0)# shape R, L, *Dz=numpy.zeros(R*L,dt(cols))forr,arinenumerate(arr):forli,ainenumerate(ar):a=a.flatten()forc,colinenumerate(cols):z[r*L+li][col]=a[c-1]ifc>0else(r,li)returnz
[docs]@view.add('agg_id')defview_agg_id(token,dstore):""" Show the available aggregations """[aggby]=dstore['oqparam'].aggregate_bykeys=[key.decode('utf8').split(',')forkeyindstore['agg_keys'][:]]keys=numpy.array(keys)# shape (N, A)dic={aggkey:keys[:,a]fora,aggkeyinenumerate(aggby)}df=pandas.DataFrame(dic)df.index.name='agg_id'returndf
[docs]@view.add('mean_perils')defview_mean_perils(token,dstore):""" For instance `oq show mean_perils` """oq=dstore['oqparam']pdcols=dstore.get_attr('gmf_data','__pdcolumns__').split()perils=[colforcolinpdcols[2:]ifnotcol.startswith('gmv_')]N=len(dstore['sitecol/sids'])sid=dstore['gmf_data/sid'][:]out=numpy.zeros(N,[(per,float)forperinperils])ifoq.number_of_logic_tree_samples:E=len(dstore['events'])forperilinperils:out[peril]=fast_agg(sid,dstore['gmf_data/'+peril][:])/Eelse:rlz_weights=dstore['weights'][:]ev_weights=rlz_weights[dstore['events']['rlz_id']]totw=ev_weights.sum()# num_gmfsforperilinperils:data=dstore['gmf_data/'+peril][:]weights=ev_weights[dstore['gmf_data/eid'][:]]out[peril]=fast_agg(sid,data*weights)/totwreturnout
[docs]@view.add('rup_stats')defview_rup_stats(token,dstore):""" Show the statistics of event based ruptures """rups=dstore['ruptures'][:]out=[stats(f,rups[f])forfin'mag n_occ'.split()]returnnumpy.array(out,dt('kind counts mean stddev min max'))
[docs]@view.add('rup')defview_rup(token,dstore):""" Show the ruptures (contexts) generated by a given source """try:source_id=token.split(':',1)[1]exceptIndexError:print('Use the syntax oq show rup:<source_id>')return[]source_ids=decode(dstore['source_info']['source_id'])src_id=source_ids.index(source_id)df=dstore.read_df('rup',sel=dict(src_id=src_id))df=df.sort_values(['rup_id','sids']).set_index('rup_id')returndf
[docs]@view.add('usgs_rupture')defview_usgs_rupture(token,dstore):""" Show the parameters of a rupture downloaded from the USGS site. $ oq show usgs_rupture:us70006sj8 {'lon': 74.628, 'lat': 35.5909, 'dep': 13.8, 'mag': 5.6, 'rake': 0.0} """try:usgs_id=token.split(':',1)[1]exceptIndexError:return'Example: oq show usgs_rupture:us70006sj8'returndownload_rupture_dict(usgs_id)
[docs]@view.add('collapsible')defview_collapsible(token,dstore):""" Show how much the ruptures are collapsed for each site """if':'intoken:collapse_level=int(token.split(':')[1])else:collapse_level=0dist_types=[dtfordtindstore['rup']ifdtinKNOWN_DISTANCES]vs30=dstore['sitecol'].vs30ctx_df=dstore.read_df('rup')ctx_df['vs30']=vs30[ctx_df.sids]has_vs30=len(numpy.unique(vs30))>1c=Collapser(collapse_level,dist_types,has_vs30)ctx_df['mdbin']=c.calc_mdbin(ctx_df)print('cfactor = %d/%d'%(len(ctx_df),len(ctx_df['mdbin'].unique())))out=[]forsid,dfinctx_df.groupby('sids'):n,u=len(df),len(df.mdbin.unique())out.append((sid,u,n,n/u))returnnumpy.array(out,dt('site_id eff_rups num_rups cfactor'))
# tested in oq-risk-tests etna
[docs]@view.add('event_based_mfd')defview_event_based_mfd(token,dstore):""" Compare n_occ/eff_time with occurrence_rate """dic=extract(dstore,'event_based_mfd?').to_dict()deldic['extra']returnpandas.DataFrame(dic).set_index('mag')
# used in the AELO project
[docs]@view.add('relevant_sources')defview_relevant_sources(token,dstore):""" Returns a table with the sources contributing more than 10% of the highest source. """imt=token.split(':')[1]kw=dstore['oqparam'].postproc_args[iml]=kw['imls_by_sid']['0']aw=extract(dstore,f'mean_rates_by_src?imt={imt}&iml={iml}')rates=aw.array['rate']# for each source in decreasing orderreturnaw.array[rates>.1*rates[0]]
[docs]defshorten(lst):""" Shorten a list of strings """iflen(lst)<=7:returnlstreturnlst[:3]+['...']+lst[-3:]
[docs]@view.add('sources_branches')defview_sources_branches(token,dstore):""" Returns a table with the sources in the logic tree by branches """sd=dstore['full_lt/source_data'][:]acc=AccumDict(accum=[])forsrc,trtinnumpy.unique(sd[['source','trt']]):brs=b' '.join(sorted(sd[sd['source']==src]['branch']))acc[brs,trt].append(src.decode('utf8'))out=[(t,' '.join(shorten(s)),b)for((b,t),s)insorted(acc.items())]returnnumpy.array(sorted(out),dt('trt sources branches'))
[docs]@view.add('MPL')defview_MPL(token,dstore):""" Maximum Probable Loss at a given return period """rp=int(token.split(':')[1])K=dstore['risk_by_event'].attrs['K']ltypes=list(dstore['agg_curves-stats'])out=numpy.zeros(1,[(lt,float)forltinltypes])forltypeinltypes:# shape (K+1, S, P)arr=dstore.sel(f'agg_curves-stats/{ltype}',stat='mean',agg_id=K,return_period=rp)out[ltype]=arrreturnout
def_drate(df,imt,src):returndf[(df.imt==imt)&(df.source_id==src)].value.sum()def_irate(df,imt,src,iml,imls):subdf=df[(df.imt==imt)&(df.src_id==src)]interp=numpy.interp(numpy.log(iml),numpy.log(imls[subdf.lvl]),numpy.log(subdf.value))returnnumpy.exp(interp)# used only in AELO calculations# NB: in presence of !-sources the comparison makes no sense
[docs]@view.add('gh3')defview_gh3(token,dstore):sitecol=dstore['sitecol']gh3=geo.utils.geohash3(sitecol.lons,sitecol.lats)uni,cnt=numpy.unique(gh3,return_counts=True)# print(sorted(cnt))returnnumpy.array([stats('gh3',cnt)],dt('kind counts mean stddev min max'))
[docs]@view.add('sites_by_country')defview_sites_by_country(token,dstore):""" Returns a table with the number of sites per country. The countries are defined as in the file geoBoundariesCGAZ_ADM0.shp """returndstore['sitecol'].by_country()
[docs]@view.add('aggrisk')defview_aggrisk(token,dstore):""" Returns a table with the aggregate risk by realization and loss type """gsim_lt=dstore['full_lt/gsim_lt']df=dstore.read_df('aggrisk',sel={'agg_id':0})dt=[('gsim',vstr),('weight',float)]+[(lt,float)forltinLOSSTYPE[df.loss_id.unique()]]rlzs=list(gsim_lt)AVG=len(rlzs)arr=numpy.zeros(AVG+1,dt)forr,rlzinenumerate(rlzs):arr[r]['gsim']=repr(repr(rlz.value[0]))arr[r]['weight']=rlz.weightforr,loss_id,lossinzip(df.rlz_id,df.loss_id,df.loss):rlz=rlzs[r]lt=LOSSTYPE[loss_id]arr[r][lt]=lossarr[AVG][lt]+=loss*rlz.weightarr[AVG]['gsim']='Average'arr[AVG]['weight']=1returnarr