repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/preprocessing/CreateMergedCatalogNGAWest3CA.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 29 13:58:20 2021
@author: glavrent
"""
# %% Required Packages
# ======================================
#load libraries
import os
import sys
import pathlib
import glob
import re #regular expression package
import warnings
#arithmetic libraries
import numpy as np
import numpy.linalg
import scipy as sp
import scipy.linalg
import pandas as pd
#geometric libraries
from shapely.geometry import Point as shp_pt, Polygon as shp_poly
#geographic libraries
import pyproj
#plottign libraries
from matplotlib import pyplot as plt
import matplotlib.ticker as mticker
#user-derfined functions
sys.path.insert(0,'../../Python_lib/catalog')
sys.path.insert(0,'../../Python_lib/plotting')
import pylib_catalog as pylib_catalog
import pylib_contour_plots as pylib_cplt
# %% Define Input Data
# ======================================
# threshold distances
# distance for collocated stations
thres_dist = 0.01
#flatfiles
dir_flatfiles = '../../../Data/Verification/preprocessing/flatfiles/'
# fname_flatfiles = ['NGAWest2_CA/CatalogNGAWest2CA_ASK14.csv',
# 'CA_NV_2011-2020/CatalogNewRecords_2011-2021_CA_NV.csv']
fname_flatfiles = ['NGAWest2_CA/CatalogNGAWest2CA_ASK14.csv',
'CA_NV_2011-2021Lite/CatalogNewRecordsLite_2011-2021_CA_NV.csv']
#flatfile file
# fname_flatfile_out = 'CatalogNGAWest3CA'
fname_flatfile_out = 'CatalogNGAWest3CALite'
#output directory
dir_out = '../../../Data/Verification/preprocessing/flatfiles/merged/'
dir_fig = dir_out + 'figures/'
flag_col = True
#North and South CA polygons
sreg_NCA_latlon = np.array([[42,-124.5],[35,-124.5],[35,-120],[36,-120],[36,-119.5],[37,-119.5],[37,-119],[38,-119],[38,-114],[42,-114],[42,-124.5]])
sreg_SCA_latlon = np.array([[31,-124.5],[35,-124.5],[35,-120],[36,-120],[36,-119.5],[37,-119.5],[37,-119],[38,-119],[38,-114],[31,-114],[31,-124.5]])
#earthquake station info columns for averaging
eq_col_idx = 'eqid'
sta_col_idx = 'ssn'
eq_col_info = ['mag', 'eqX', 'eqY', 'eqZ']
sta_col_info = ['Vs30', 'staX', 'staY']
# %% Load Data
# ======================================
#load individual faltfiles
df_flatfile = [pd.read_csv(dir_flatfiles + fn_fltfile) for fn_fltfile in fname_flatfiles]
# %% Process Data
# ======================================
#compute number of events and stations per dataframe
n_eq_df_orig = [len(np.unique(df_fltf.eqid)) for df_fltf in df_flatfile]
n_sta_df_orig = [len(np.unique(df_fltf.ssn)) for df_fltf in df_flatfile]
# Merge Data-sets
#---- ---- ---- ---- ----
#define data-set id, copy original rsn, eqid, and ssn
for ds_id in range(len(df_flatfile)):
df_flatfile[ds_id].loc[:,'dsid'] = ds_id
#copy original columns
df_flatfile[ds_id].loc[:,'rsn_orig'] = df_flatfile[ds_id].loc[:,'rsn']
df_flatfile[ds_id].loc[:,'eqid_orig'] = df_flatfile[ds_id].loc[:,'eqid']
df_flatfile[ds_id].loc[:,'ssn_orig'] = df_flatfile[ds_id].loc[:,'ssn']
#merge datasets
df_flatfile = pd.concat(df_flatfile).reset_index(drop=True)
#define projection system
assert(len(np.unique(df_flatfile.UTMzone))==1),'Error. Multiple UTM zones defined.'
utm_zone = df_flatfile.UTMzone[0]
utmProj = pyproj.Proj("+proj=utm +zone="+utm_zone+", +ellps=WGS84 +datum=WGS84 +units=km +no_defs")
#reset rsn
df_flatfile.rsn = np.arange(len(df_flatfile))+1
# Original info data-frame
#---- ---- ---- ---- ----
df_flatfile_orig = df_flatfile.copy()
# New Earthquake IDs
#---- ---- ---- ---- ----
#define new earthquake id
_, eq_inv = np.unique( df_flatfile[['dsid','eqid']], axis=0, return_inverse=True )
df_flatfile.eqid = eq_inv + 1
#unique eqids
eqid_array = np.unique(df_flatfile.eqid)
#total number of events
n_eq_orig = len(eqid_array)
assert(n_eq_orig == np.sum(n_eq_df_orig)),'Error. Total number of events is not equal to sum of number of events from individual data-sets'
# New Station IDs
#---- ---- ---- ---- ----
#define new earthquake id, initially different stations for separate data-sets
_, sta_idx, sta_inv = np.unique(df_flatfile[['dsid','ssn']], axis=0, return_index=True, return_inverse=True )
df_flatfile.ssn = sta_inv + 1
#total number of statios before collocation
n_sta_orig = len(np.unique(df_flatfile.ssn))
assert(n_sta_orig == np.sum(n_sta_df_orig)),'Error. Total number of stations, before collocation, is not equal to sum of number of station from individual data-sets'
# Collocate Stations
#---- ---- ---- ---- ----
#update ssn for colocated stations
df_flatfile = pylib_catalog.ColocatePt(df_flatfile, 'ssn', ['staX','staY'], thres_dist=thres_dist)
#keep single record from each event
i_unq_eq_sta = np.unique(df_flatfile[['eqid','ssn']].values, return_index=True, axis=0)[1]
df_flatfile = df_flatfile.iloc[i_unq_eq_sta, :].sort_index()
# Average GM Parameters
# ---- ---- ---- ---- ----
df_flatfile = pylib_catalog.IndexAvgColumns(df_flatfile, 'eqid', ['mag','eqX','eqY','eqZ'])
df_flatfile = pylib_catalog.IndexAvgColumns(df_flatfile, 'ssn', ['Vs30','staX','staY','staElev'])
#verify no station has multiple records at the same event
for eqid in eqid_array:
sta_eq = df_flatfile.loc[df_flatfile.eqid == eqid,'ssn'].values
assert(len(sta_eq) == len(np.unique(sta_eq))),'Error. Event %i has multiple collocated stations'%eqid
#recalculated lat/lon coordinates
_, eq_idx, eq_inv = np.unique(df_flatfile.loc[:,'eqid'], axis=0, return_index=True, return_inverse=True)
_, sta_idx, sta_inv = np.unique(df_flatfile.loc[:,'ssn'], axis=0, return_index=True, return_inverse=True)
n_eq = len(eq_idx)
n_sta = len(sta_idx)
eq_latlon = np.flip([utmProj(e.eqX, e.eqY, inverse=True) for _, e in df_flatfile.iloc[eq_idx,:].iterrows()], axis=1)
sta_latlon = np.flip([utmProj(s.staX, s.staY, inverse=True) for _, s in df_flatfile.iloc[sta_idx,:].iterrows()], axis=1)
df_flatfile.loc[:,['eqLat','eqLon']] = eq_latlon[eq_inv,:]
df_flatfile.loc[:,['staLat','staLon']] = sta_latlon[sta_inv,:]
# Midpoint Coordinates
# ---- ---- ---- ---- ----
df_flatfile.loc[:,['mptX','mptY']] = (df_flatfile.loc[:,['eqX','eqY']].values + df_flatfile.loc[:,['staX','staY']].values) / 2
df_flatfile.loc[:,['mptLat','mptLon']] = np.flip( np.array([utmProj(pt.mptX, pt.mptY, inverse=True) for _, pt in df_flatfile.iterrows()]), axis=1 )
#recompute rupture distance
rrup_array = np.sqrt( np.linalg.norm(df_flatfile[['eqX','eqY']].values-df_flatfile[['staX','staY']].values, axis=1)**2 +
df_flatfile['eqZ'].values**2 )
df_flatfile.Rrup = rrup_array
# Difference between original and process catalog
#---- ---- ---- ---- ----
eq_corr_diff = np.linalg.norm(df_flatfile[['eqLat','eqLon']].values - df_flatfile_orig[['eqLat','eqLon']].values, axis=1)
sta_corr_diff = np.linalg.norm(df_flatfile[['staLat','staLon']].values - df_flatfile_orig[['staLat','staLon']].values, axis=1)
eq_loc_diff = np.linalg.norm(df_flatfile[['eqX','eqY']].values - df_flatfile_orig[['eqX','eqY']].values, axis=1)
sta_loc_diff = np.linalg.norm(df_flatfile[['staX','staY']].values - df_flatfile_orig[['staX','staY']].values, axis=1)
mag_diff = np.abs(df_flatfile['mag'].values - df_flatfile_orig['mag'].values)
rrup_diff = np.abs(df_flatfile['Rrup'].values - df_flatfile_orig['Rrup'].values)
vs30_diff = np.abs(df_flatfile['Vs30'].values - df_flatfile_orig['Vs30'].values)
#North South CA regions
#---- ---- ---- ---- ----
#shapely polygons for Northern and Southern CA
sreg_NCA_X = np.array([utmProj(pt_lon, pt_lat) for pt_lat, pt_lon in zip(sreg_NCA_latlon[:,0], sreg_NCA_latlon[:,1])])
sreg_SCA_X = np.array([utmProj(pt_lon, pt_lat) for pt_lat, pt_lon in zip(sreg_SCA_latlon[:,0], sreg_SCA_latlon[:,1])])
#shapely polygons for Northern and Southern CA
sreg_NCA_poly = shp_poly(sreg_NCA_X)
sreg_SCA_poly = shp_poly(sreg_SCA_X)
#indices for earthquakes belonging to Northern and Southern CA
i_sregNCA = np.array([ shp_pt(eq_x).within(sreg_NCA_poly) for _, eq_x in df_flatfile[['eqX','eqY']].iterrows() ])
i_sregSCA = np.array([ shp_pt(eq_x).within(sreg_SCA_poly) for _, eq_x in df_flatfile[['eqX','eqY']].iterrows() ])
assert( (i_sregNCA + i_sregSCA <= 1).all() ),'Error. Overlapping sub-regions'
#add region info to catalog
df_flatfile.loc[:,'sreg'] = 0
df_flatfile.loc[i_sregNCA,'sreg'] = 1
df_flatfile.loc[i_sregSCA,'sreg'] = 2
# Clean Records
#---- ---- ---- ---- ----
#remove records with unknown earthquake and source parameters
i_val_rec = ~np.isnan(df_flatfile[eq_col_info+sta_col_info]).any(axis=1)
df_flatfile = df_flatfile.loc[i_val_rec,:]
# %% Save data
# ======================================
#create output directories
if not os.path.isdir(dir_out): pathlib.Path(dir_out).mkdir(parents=True, exist_ok=True)
if not os.path.isdir(dir_fig): pathlib.Path(dir_fig).mkdir(parents=True, exist_ok=True)
#rearange columns
df_flatfile = df_flatfile[['rsn', 'eqid', 'ssn', 'dsid', 'rsn_orig', 'eqid_orig', 'ssn_orig',
'mag', 'Rrup', 'Vs30', 'year',
'eqLat', 'eqLon', 'staLat', 'staLon', 'mptLat', 'mptLon',
'UTMzone', 'eqX', 'eqY', 'eqZ', 'staX', 'staY', 'mptX', 'mptY', 'sreg']]
# #create individual North and South CA catalogs
# df_flatfileN = df_flatfile.loc[df_flatfile.sreg==1]
# df_flatfileS = df_flatfile.loc[df_flatfile.sreg==2]
#save processed dataframe
fullname_flatfile_out = '%s%s.csv'%(dir_out, fname_flatfile_out)
df_flatfile.to_csv(fullname_flatfile_out, index=False)
# fullname_flatfile_out = '%s%sNCA.csv'%(dir_out, fname_flatfile_out)
# df_flatfileS.to_csv(fullname_flatfile_out, index=False)
# fullname_flatfile_out = '%s%sSCA.csv'%(dir_out, fname_flatfile_out)
# df_flatfileN.to_csv(fullname_flatfile_out, index=False)
# Print data info
# ---------------------------
print(f'NGAWest3:')
print(f'\tGeneral Info:')
print(f'\t\tnumber of rec: %.i'%len(df_flatfile))
print(f'\t\tnumber of rec (R<200km): %.i'%np.sum(df_flatfile.Rrup<=200))
print(f'\t\tnumber of rec (R<300km): %.i'%np.sum(df_flatfile.Rrup<=300))
print(f'\t\tnumber of eq: %.i'%len(df_flatfile.eqid.unique()))
print(f'\t\tnumber of sta: %.i'%len(df_flatfile.ssn.unique()))
print(f'\t\tcoverage: %.i to %i'%(df_flatfile.year.min(), df_flatfile.year.max()))
print(f'\tMerging Info:')
print(f'\t\tnumber of merged stations: %.i'%(n_sta_orig-n_sta))
print(f'\t\tmax EQ latlon difference: %.2f'%eq_corr_diff[i_val_rec].max())
print(f'\t\tmax EQ UTM difference: %.2f'%eq_loc_diff[i_val_rec].max())
print(f'\t\tmax Sta latlon difference: %.2f'%sta_corr_diff[i_val_rec].max())
print(f'\t\tmax Sta UTM difference: %.2f'%sta_loc_diff[i_val_rec].max())
print(f'\t\tmax M difference: %.2f'%mag_diff[i_val_rec].max())
print(f'\t\tmax Rrup difference: %.2fkm'%rrup_diff[i_val_rec].max())
print(f'\t\tmax Vs30 difference: %.2fm/sec'%vs30_diff[i_val_rec].max())
print(f'\t\tnumber of invalid records: %.i'%np.sum(~i_val_rec))
#write out summary
# ---- ---- ---- ---- ----
f = open(dir_out + 'summary_data' + '.txt', 'w')
f.write(f'NGAWest3:\n')
f.write(f'\tGeneral Info:\n')
f.write(f'\t\tnumber of rec: %.i\n'%len(df_flatfile))
f.write(f'\t\tnumber of rec (R<200km): %.i\n'%np.sum(df_flatfile.Rrup<=200))
f.write(f'\t\tnumber of rec (R<300km): %.i\n'%np.sum(df_flatfile.Rrup<=300))
f.write(f'\t\tnumber of eq: %.i\n'%len(df_flatfile.eqid.unique()))
f.write(f'\t\tnumber of sta: %.i\n'%len(df_flatfile.ssn.unique()))
f.write(f'\t\tcoverage: %.i to %i\n'%(df_flatfile.year.min(), df_flatfile.year.max()))
f.write(f'\tMerging Info:\n')
f.write(f'\t\tnumber of merged stations: %.i\n'%(n_sta_orig-n_sta))
f.write(f'\t\tmax EQ latlon difference: %.2f\n'%eq_corr_diff[i_val_rec].max())
f.write(f'\t\tmax EQ UTM difference: %.2f\n'%eq_loc_diff[i_val_rec].max())
f.write(f'\t\tmax Sta latlon difference: %.2f\n'%sta_corr_diff[i_val_rec].max())
f.write(f'\t\tmax Sta UTM difference: %.2f\n'%sta_loc_diff[i_val_rec].max())
f.write(f'\t\tmax M difference: %.2f\n'%mag_diff[i_val_rec].max())
f.write(f'\t\tmax Rrup difference: %.2fkm\n'%rrup_diff[i_val_rec].max())
f.write(f'\t\tmax Vs30 difference: %.2fm/sec\n'%vs30_diff[i_val_rec].max())
f.write(f'\t\tnumber of invalid records: %.i\n'%np.sum(~i_val_rec))
# %% Plotting
# ======================================
df_flt = df_flatfile.copy().reset_index(drop=True)
# Mag-Dist distribution
fname_fig = 'M-R_dist_log'
#create figure
fig, ax = plt.subplots(figsize = (10,9))
pl1 = ax.scatter(df_flt.Rrup, df_flt.mag)
#edit figure properties
ax.set_xlabel(r'Distance ($km$)', fontsize=30)
ax.set_ylabel(r'Magnitude', fontsize=30)
ax.grid(which='both')
ax.set_xscale('log')
ax.set_xlim([0.1, 2000])
ax.set_ylim([2, 8])
ax.tick_params(axis='x', labelsize=25)
ax.tick_params(axis='y', labelsize=25)
ax.xaxis.set_tick_params(which='major', size=10, width=2, direction='in', top='on')
ax.xaxis.set_tick_params(which='minor', size=7, width=2, direction='in', top='on')
ax.yaxis.set_tick_params(which='major', size=10, width=2, direction='in', right='on')
ax.yaxis.set_tick_params(which='minor', size=7, width=2, direction='in', right='on')
fig.tight_layout()
#save figure
fig.savefig( dir_fig + fname_fig + '.png' )
# Mag-Dist distribution
fname_fig = 'M-R_dist_linear'
#create figure
fig, ax = plt.subplots(figsize = (10,9))
pl1 = ax.scatter(df_flt.Rrup, df_flt.mag)
#edit figure properties
ax.set_xlabel(r'Distance ($km$)', fontsize=30)
ax.set_ylabel(r'Magnitude', fontsize=30)
ax.grid(which='both')
ax.set_xlim([0.1, 500])
ax.set_ylim([2, 8])
ax.tick_params(axis='x', labelsize=25)
ax.tick_params(axis='y', labelsize=25)
ax.xaxis.set_tick_params(which='major', size=10, width=2, direction='in', top='on')
ax.xaxis.set_tick_params(which='minor', size=7, width=2, direction='in', top='on')
ax.yaxis.set_tick_params(which='major', size=10, width=2, direction='in', right='on')
ax.yaxis.set_tick_params(which='minor', size=7, width=2, direction='in', right='on')
fig.tight_layout()
#save figure
fig.savefig( dir_fig + fname_fig + '.png' )
# Source depht distribution
fname_fig = 'eqZ_dist'
#create figure
fig, ax = plt.subplots(figsize = (10,9))
pl1 = ax.hist(-df_flt.eqZ)
#edit figure properties
ax.set_xlabel(r'Source depth (km)', fontsize=30)
ax.set_ylabel(r'Count', fontsize=30)
ax.grid(which='both')
# ax.set_xscale('log')
# ax.set_xlim([0.1, 2000])
# ax.set_ylim([2, 8])
ax.tick_params(axis='x', labelsize=25)
ax.tick_params(axis='y', labelsize=25)
ax.xaxis.set_tick_params(which='major', size=10, width=2, direction='in', top='on')
ax.xaxis.set_tick_params(which='minor', size=7, width=2, direction='in', top='on')
ax.yaxis.set_tick_params(which='major', size=10, width=2, direction='in', right='on')
ax.yaxis.set_tick_params(which='minor', size=7, width=2, direction='in', right='on')
fig.tight_layout()
#save figure
fig.savefig( dir_fig + fname_fig + '.png' )
# eq and sta location
#get unique earthquake indices
_, eq_idx = np.unique(df_flt['eqid'], axis=0, return_index=True )
#get unique station indices
_, sta_idx = np.unique(df_flt['ssn'], axis=0, return_index=True)
# eq and sta location
fname_fig = 'eq_sta_locations'
fig, ax, data_crs, gl = pylib_cplt.PlotMap(flag_grid=True)
#plot earthquake and station locations
ax.plot(df_flt.loc[eq_idx,'eqLon'].values, df_flt.loc[eq_idx,'eqLat'].values,
'*', transform = data_crs, markersize = 10, zorder=13, label='Events')
ax.plot(df_flt.loc[sta_idx,'staLon'].values, df_flt.loc[sta_idx,'staLat'].values,
'o', transform = data_crs, markersize = 6, zorder=13, label='Stations')
#edit figure properties
gl.xlabel_style = {'size': 25}
gl.ylabel_style = {'size': 25}
# gl.xlocator = mticker.FixedLocator([-124, -122, -120, -118, -116, -114])
# gl.ylocator = mticker.FixedLocator([32, 34, 36, 38, 40])
ax.legend(fontsize=25, loc='lower left')
# ax.set_xlim([-125, -113.5])
# ax.set_ylim([30.5, 42.5])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
| 15,940 | 42.673973 | 165 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/preprocessing/CreateCatalogNewEvents2021Lite.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 27 16:12:57 2021
@author: glavrent
"""
# Required Packages
# ======================================
#load libraries
import os
import sys
import pathlib
import glob
import re #regular expression package
#arithmetic libraries
import numpy as np
import pandas as pd
#geographic coordinates
import pyproj
#plotting libraries
from matplotlib import pyplot as plt
import matplotlib.ticker as mticker
#user-derfined functions
sys.path.insert(0,'../../Python_lib/catalog')
sys.path.insert(0,'../../Python_lib/plotting')
import pylib_catalog as pylib_catalog
import pylib_contour_plots as pylib_cplt
# %% Define Input Data
# ======================================
#thresholds
#number of events to keep
n_eq2keep = 1000
# n_eq2keep = 900
#mag range
eq_mag_min = 3.0
eq_mag_max = np.inf
eq_mag2keep = 6
#maximum depth
eq_mag_depth = 20
#distance range
# rrup_thres = 700
rrup_thres = 300
#year range
year_min = 2011
year_max = 2021
#colocation threshold
thres_dist_col = 0.01
#input flatfiles
fname_flatfile_newrec = '../../../Data/Verification/preprocessing/flatfiles/CA_NV_2011-2021/CatalogNewRecords_2011-2021_CA_NV.csv'
#flatfile file
fname_flatfile = 'CatalogNewRecordsLite_%.i-%.i_CA_NV'%(year_min, year_max )
#output directory
dir_out = '../../../Data/Verification/preprocessing/flatfiles/CA_NV_%.i-%.iLite/'%(year_min, year_max)
dir_fig = dir_out + 'figures/'
#latlon window
# win_latlon = np.array([[30, 43],[-125, -110]])
win_latlon = np.array([[32, 42.5],[-125, -114]])
#set random seed number
np.random.seed(1)
# %% Load Data
# ======================================
#read event and station info
df_flatfile_newrec = pd.read_csv(fname_flatfile_newrec)
# %% Process Data
# ======================================
# projection system
# ---- ---- ---- ---- ----
utm_zone = np.unique(df_flatfile_newrec.UTMzone)
assert(len(utm_zone)==1),'Error. Multiple UTM zones'
utmProj = pyproj.Proj("+proj=utm +zone="+utm_zone[0]+", +ellps=WGS84 +datum=WGS84 +units=km +no_defs")
# cleaning files
# ---- ---- ---- ---- ----
#set -999 to nan
df_flatfile_newrec.replace(-999, np.nan, inplace=True)
#remove data with unknown mag
df_flatfile_newrec = df_flatfile_newrec[ ~np.isnan(df_flatfile_newrec['mag']) ]
#remove data with unknown coordinates
df_flatfile_newrec = df_flatfile_newrec[ ~np.isnan(df_flatfile_newrec[['eqLat', 'eqLon']]).any(axis=1) ]
df_flatfile_newrec = df_flatfile_newrec[ ~np.isnan(df_flatfile_newrec[['staLat', 'staLon']]).any(axis=1) ]
#remove earthquakes outside mag range
i_mag = np.logical_and(df_flatfile_newrec['mag'] >= eq_mag_min, df_flatfile_newrec['mag'] <= eq_mag_max)
df_flatfile_newrec = df_flatfile_newrec.loc[i_mag,:]
# keep only data in spatio-temporal window
# ---- ---- ---- ---- ----
#earthquakes
i_space_win_eq = np.all(np.array([df_flatfile_newrec.eqLat >= win_latlon[0,0],
df_flatfile_newrec.eqLat < win_latlon[0,1],
df_flatfile_newrec.eqLon >= win_latlon[1,0],
df_flatfile_newrec.eqLon < win_latlon[1,1]]),axis=0)
#stations
i_space_win_sta = np.all(np.array([df_flatfile_newrec.staLat >= win_latlon[0,0],
df_flatfile_newrec.staLat < win_latlon[0,1],
df_flatfile_newrec.staLon >= win_latlon[1,0],
df_flatfile_newrec.staLon < win_latlon[1,1]]),axis=0)
#depth limit
i_eq_depth = -df_flatfile_newrec.eqZ <= eq_mag_depth
#time
i_time_win = np.logical_and(df_flatfile_newrec.year >= year_min, df_flatfile_newrec.year <= year_max)
#records to keep
i_win = np.all(np.array([i_space_win_eq, i_space_win_sta, i_eq_depth, i_time_win]),axis=0)
df_flatfile_newrec = df_flatfile_newrec[i_win]
# keep only subset of events
# ---- ---- ---- ---- ----
if ~np.isnan(n_eq2keep):
#unique indices
eqid, eq_idx = np.unique(df_flatfile_newrec.eventid.values, return_index=True)
#magnitue array
mag_array = df_flatfile_newrec.mag.values[eq_idx]
#earthquakes to keep that exceed eq_mag2keep
eqid2keep = eqid[mag_array > eq_mag2keep]
#number of additional earthquakes to randomly sample
n_eq2keep = n_eq2keep - len(eqid2keep)
if n_eq2keep > 0:
eqid2keep = np.append(eqid2keep,
np.random.choice(eqid[~np.isin(eqid, eqid2keep)], size=n_eq2keep, replace=False) )
#keep only records of selected earthquakes
df_flatfile_newrec = df_flatfile_newrec.loc[df_flatfile_newrec.eventid.isin(eqid2keep),:]
# rupture distance
# ---- ---- ---- ---- ----
#remove records based on rupture distance
i_rrup = df_flatfile_newrec['Rrup'] < rrup_thres
df_flatfile_newrec = df_flatfile_newrec.loc[i_rrup,:]
# compute unique rsn eqid and ssn
# ---- ---- ---- ---- ----
#set rsn as axis
df_flatfile_newrec.set_index('rsn', inplace=True)
#updated earthquake and station ids
_, eq_idx, eq_inv = np.unique(df_flatfile_newrec.loc[:,'eqid'], axis=0, return_index=True, return_inverse=True)
_, sta_idx, sta_inv = np.unique(df_flatfile_newrec.loc[:,'ssn'], axis=0, return_index=True, return_inverse=True)
n_eq_orig = len(eq_idx)
n_sta_orig = len(sta_idx)
# average gm parameters
# ---- ---- ---- ---- ----
df_flatfile_newrec = pylib_catalog.IndexAvgColumns(df_flatfile_newrec, 'eqid', ['mag','eqX','eqY','eqZ'])
df_flatfile_newrec = pylib_catalog.IndexAvgColumns(df_flatfile_newrec, 'ssn', ['Vs30','staX','staY','staElev'])
#recalculated lat/lon coordinates
_, eq_idx, eq_inv = np.unique(df_flatfile_newrec.loc[:,'eqid'], axis=0, return_index=True, return_inverse=True)
_, sta_idx, sta_inv = np.unique(df_flatfile_newrec.loc[:,'ssn'], axis=0, return_index=True, return_inverse=True)
n_eq = len(eq_idx)
n_sta = len(sta_idx)
eq_latlon = np.flip([utmProj(e.eqX, e.eqY, inverse=True) for _, e in df_flatfile_newrec.iloc[eq_idx,:].iterrows()], axis=1)
sta_latlon = np.flip([utmProj(s.staX, s.staY, inverse=True) for _, s in df_flatfile_newrec.iloc[sta_idx,:].iterrows()], axis=1)
df_flatfile_newrec.loc[:,['eqLat','eqLon']] = eq_latlon[eq_inv,:]
df_flatfile_newrec.loc[:,['staLat','staLon']] = sta_latlon[sta_inv,:]
# midpoint coordinates
# ---- ---- ---- ---- ----
df_flatfile_newrec.loc[:,['mptX','mptY']] = (df_flatfile_newrec.loc[:,['eqX','eqY']].values + df_flatfile_newrec.loc[:,['staX','staY']].values) / 2
df_flatfile_newrec.loc[:,['mptLat','mptLon']] = np.flip( np.array([utmProj(pt.mptX, pt.mptY, inverse=True) for _, pt in df_flatfile_newrec.iterrows()]), axis=1 )
#recalculate rupture distance after averaging
df_flatfile_newrec.loc[:,'Rrup'] = np.sqrt(np.linalg.norm(df_flatfile_newrec[['eqX','eqY']].values-df_flatfile_newrec[['staX','staY']].values, axis=1)**2 +
df_flatfile_newrec['eqZ']**2)
# %% Save Data
# ======================================
# create output directories
if not os.path.isdir(dir_out): pathlib.Path(dir_out).mkdir(parents=True, exist_ok=True)
if not os.path.isdir(dir_fig): pathlib.Path(dir_fig).mkdir(parents=True, exist_ok=True)
#full dataframe
df_flatfile_full = df_flatfile_newrec[['eqid','ssn','eventid','staid','netid','station','network',
'mag','mag_type','mag_author','Rrup','Vs30','time','year',
'eqLat','eqLon','staLat','staLon','mptLat','mptLon',
'UTMzone','eqX','eqY','eqZ','staX','staY','staElev','mptX','mptY',
'author','cat','contributor','contributor_id','eq_loc']]
#event dataframe
df_flatfile_event = df_flatfile_newrec.iloc[eq_idx,:][['eqid','eventid','mag','mag_type','mag_author','year',
'eqLat','eqLon','UTMzone','eqX','eqY','eqZ',
'author','cat','contributor','contributor_id','eq_loc']].reset_index(drop=True)
#station dataframe
df_flatfile_station = df_flatfile_newrec.iloc[sta_idx,:][['ssn','Vs30',
'staLat','staLon','UTMzone','staX','staY','staElev']].reset_index(drop=True)
# save dataframe
# ---- ---- ---- ---- ----
#save processed dataframes
fname_flatfile_full= '%s%s'%(dir_out, fname_flatfile)
df_flatfile_full.to_csv(fname_flatfile_full + '.csv', index=True)
df_flatfile_event.to_csv(fname_flatfile_full + '_event.csv', index=False)
df_flatfile_station.to_csv(fname_flatfile_full + '_station.csv', index=False)
# create figures
# ---- ---- ---- ---- ----
# Mag-Dist distribution
fname_fig = 'M-R_dist'
#create figure
fig, ax = plt.subplots(figsize = (10,9))
pl1 = ax.scatter(df_flatfile_full.Rrup, df_flatfile_full.mag)
#edit figure properties
ax.set_xlabel(r'Distance ($km$)', fontsize=30)
ax.set_ylabel(r'Magnitude', fontsize=30)
ax.grid(which='both')
ax.set_xscale('log')
# ax.set_xlim([0.1, 2000])
ax.set_ylim([2, 8])
ax.tick_params(axis='x', labelsize=25)
ax.tick_params(axis='y', labelsize=25)
ax.legend(fontsize=25, loc='upper left')
ax.xaxis.set_tick_params(which='major', size=10, width=2, direction='in', top='on')
ax.xaxis.set_tick_params(which='minor', size=7, width=2, direction='in', top='on')
ax.yaxis.set_tick_params(which='major', size=10, width=2, direction='in', right='on')
ax.yaxis.set_tick_params(which='minor', size=7, width=2, direction='in', right='on')
fig.tight_layout()
#save figure
fig.savefig( dir_fig + fname_fig + '.png' )
# Mag-Year distribution
fname_fig = 'M-date_dist'
#create figure
fig, ax = plt.subplots(figsize = (10,9))
pl1 = ax.scatter(df_flatfile_event['year'].values, df_flatfile_event['mag'].values)
#edit figure properties
ax.set_xlabel(r'time ($year$)', fontsize=30)
ax.set_ylabel(r'Magnitude', fontsize=30)
ax.grid(which='both')
# ax.set_xscale('log')
ax.set_xlim([1965, 2025])
ax.set_ylim([2, 8])
ax.tick_params(axis='x', labelsize=25)
ax.tick_params(axis='y', labelsize=25)
ax.legend(fontsize=25, loc='upper left')
ax.xaxis.set_tick_params(which='major', size=10, width=2, direction='in', top='on')
ax.xaxis.set_tick_params(which='minor', size=7, width=2, direction='in', top='on')
ax.yaxis.set_tick_params(which='major', size=10, width=2, direction='in', right='on')
ax.yaxis.set_tick_params(which='minor', size=7, width=2, direction='in', right='on')
fig.tight_layout()
#save figure
fig.savefig( dir_fig + fname_fig + '.png' )
#eq and sta location
fname_fig = 'eq_sta_locations'
fig, ax, data_crs, gl = pylib_cplt.PlotMap(flag_grid=True)
#plot earthquake and station locations
ax.plot(df_flatfile_event['eqLon'].values, df_flatfile_event['eqLat'].values, '*', transform = data_crs, markersize = 10, zorder=13, label='Events')
ax.plot(df_flatfile_station['staLon'].values, df_flatfile_station['staLat'].values, 'o', transform = data_crs, markersize = 6, zorder=12, label='Stations')
#edit figure properties
gl.ylabel_style = {'size': 25}
gl.xlabel_style = {'size': 25}
# gl.xlocator = mticker.FixedLocator([-124, -122, -120, -118, -116, -114])
gl.ylocator = mticker.FixedLocator([32, 34, 36, 38, 40, 42])
ax.legend(fontsize=25, loc='lower left')
# ax.set_xlim(plt_latlon_win[:,1])
# ax.set_ylim(plt_latlon_win[:,0])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# print data info
# ---- ---- ---- ---- ----
print(r'New Records:')
print(f'\tnumber of rec: %.i'%len(df_flatfile_newrec))
print(f'\tnumber of rec (R<200km): %.i'%np.sum(df_flatfile_newrec.Rrup<=200))
print(f'\tnumber of rec (R<%.1f): %.i'%(rrup_thres, np.sum(df_flatfile_newrec.Rrup<=rrup_thres)))
print(f'\tnumber of eq: %.i'%n_eq)
print(f'\tnumber of sta: %.i'%n_sta)
print(f'\tmin magnitude: %.1f'%df_flatfile_newrec.mag.min())
print(f'\tmax magnitude: %.1f'%df_flatfile_newrec.mag.max())
print(f'\tcoverage: %.i to %i'%(df_flatfile_newrec.year.min(), df_flatfile_newrec.year.max()))
#write out summary
# ---- ---- ---- ---- ----
f = open(dir_out + 'summary_data' + '.txt', 'w')
f.write(f'New Records:\n')
f.write(f'\tnumber of rec: %.i\n'%len(df_flatfile_newrec))
f.write(f'\tnumber of rec (R<200km): %.i\n'%np.sum(df_flatfile_newrec.Rrup<=200))
f.write(f'\tnumber of rec (R<%.1f): %.i\n'%(rrup_thres, np.sum(df_flatfile_newrec.Rrup<=rrup_thres)))
f.write(f'\tnumber of eq: %.i\n'%n_eq)
f.write(f'\tnumber of sta: %.i\n'%n_sta)
f.write(f'\tmin magnitude: %.1f\n'%df_flatfile_newrec.mag.min())
f.write(f'\tmax magnitude: %.1f\n'%df_flatfile_newrec.mag.max())
f.write(f'\tcoverage: %.i to %i\n'%(df_flatfile_newrec.year.min(), df_flatfile_newrec.year.max()))
f.close()
| 12,697 | 41.610738 | 162 | py |
ngmm_tools | ngmm_tools-master/Examples/example1/regression_inla_postprocessing.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 27 12:20:36 2022
@author: glavrent
"""
# Working directory and Packages
# ---------------------------
#load packages
import sys
import pathlib
import glob
import re #regular expression package
import pickle
from joblib import cpu_count
#arithmetic libraries
import numpy as np
#statistics libraries
import pandas as pd
#plot libraries
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoLocator as plt_autotick
# Define Problem
# ---------------------------
#data filename
fname_data = 'data/examp_obs.csv'
#inla regression filename
fname_inla_reg = 'data/inla_regression/inla_regression.csv'
#output directory
dir_out = 'data/inla_regression/'
# Read Data
# ---------------------------
#observation data
df_data = pd.read_csv(fname_data, index_col=0)
#inla regression results
df_reg_summary = pd.read_csv(fname_inla_reg, index_col=0)
# Summary figures
# ---------------------------
#color bar (mean)
cbar_levs_mean = np.linspace(-2, 2, 101).tolist()
cbar_ticks_mean = np.arange(-2, 2.01, 0.8).tolist()
#color bar (sigma)
cbar_levs_sig = np.linspace(0.0, 0.5, 101).tolist()
cbar_ticks_sig = np.arange(0, 0.501, 0.1).tolist()
# scatter comparison
fname_fig = 'inla_gp_scatter'
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#obsevations scatter
hl = ax.plot(df_data.tot, df_reg_summary.tot_mean, 'o')
ax.axline((0,0), slope=1, color="black", linestyle="--")
#figure properties
ax.grid(which='both')
#tick size
ax.tick_params(axis='x', labelsize=32)
ax.tick_params(axis='y', labelsize=32)
#figure limits
ax.set_xticks([-2,-1,0,1,2])
ax.set_yticks([-2,-1,0,1,2])
ax.set_xlim([-2.0, 2.0])
ax.set_ylim([-2.0, 2.0])
#labels
ax.set_xlabel('Data', fontsize=35)
ax.set_ylabel('Estimated', fontsize=35)
#save figure
fig.tight_layout()
fig.savefig( dir_out + fname_fig + '.png' )
#field mean
fname_fig = 'inla_gp_field_mean'
#create figure
fig, ax = plt.subplots(figsize = (10,11))
#obsevations map
hl = ax.scatter(df_reg_summary.X, df_reg_summary.Y, c=df_reg_summary.tot_mean, marker='D', vmin=-2, vmax=2, s=100)
#figure properties
ax.grid(which='both')
#color bar
cbar = fig.colorbar(hl, orientation="horizontal", pad=0.15, boundaries=cbar_levs_mean, ticks=cbar_ticks_mean)
#tick size
ax.tick_params(axis='x', labelsize=30)
ax.tick_params(axis='y', labelsize=30)
#labels
ax.set_xlabel(r'$t_1$', fontsize=35)
ax.set_ylabel(r'$t_2$', fontsize=35)
#figure limits
ax.set_xlim([0, 100])
ax.set_ylim([0, 100])
#update colorbar
cbar.ax.tick_params(tick1On=1, labelsize=30)
cbar.set_label(r'$\mu(c_0 + c_1(\vec{t}))$', size=35)
#save figure
fig.tight_layout()
fig.savefig( dir_out + fname_fig + '.png' )
#field std
fname_fig = 'inla_gp_field_std'
#create figure
fig, ax = plt.subplots(figsize = (10,11))
#obsevations map
hl = ax.scatter(df_reg_summary.X, df_reg_summary.Y, c=df_reg_summary.tot_sig, marker='D', vmin=0, vmax=0.5, s=100, cmap='Oranges')
#figure properties
ax.grid(which='both')
#color bar
cbar = fig.colorbar(hl, orientation="horizontal", pad=0.15, boundaries=cbar_levs_sig, ticks=cbar_ticks_sig)
#tick size
ax.tick_params(axis='x', labelsize=30)
ax.tick_params(axis='y', labelsize=30)
#labels
ax.set_xlabel(r'$t_1$', fontsize=35)
ax.set_ylabel(r'$t_2$', fontsize=35)
#figure limits
ax.set_xlim([0, 100])
ax.set_ylim([0, 100])
#update colorbar
cbar.ax.tick_params(tick1On=1, labelsize=30)
cbar.set_label(r'$\psi(c_0 + c_1(\vec{t}))$', size=35)
#save figure
fig.tight_layout()
fig.savefig( dir_out + fname_fig + '.png' )
| 3,583 | 27 | 130 | py |
ngmm_tools | ngmm_tools-master/Examples/example1/create_examp_data.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 26 16:01:54 2022
@author: glavrent
"""
# Working directory and Packages
# ---------------------------
#load packages
import os
import sys
import pathlib
import numpy as np
import pandas as pd
from scipy import sparse
from scipy import linalg as scipylinalg
#geographic libraries
import pyproj
import geopy.distance
#ground-motion models
import pygmm
#plottign libraries
import matplotlib as mpl
from matplotlib import pyplot as plt
import matplotlib.ticker as mticker
#user libraries
sys.path.insert(0,'../../Analyses/Python_lib/ground_motions')
import pylib_NGMM_prediction as pyNGMM
# Define Problem
# ---------------------------
#hyper-parameters
ell = 25
omega = 0.4
sig = 0.6
#grid
grid_win = np.array([[ 0, 0], [100, 100]])
grid_dxdy = [1, 1]
#number of samples
n_samp = 150
n_rep = 10
#output directory
dir_out = 'data/'
# Grid
# ---------------------------
#create coordinate grid
grid_x_edge = np.arange(grid_win[0,0],grid_win[1,0]+1e-9,grid_dxdy[0])
grid_y_edge = np.arange(grid_win[0,1],grid_win[1,1]+1e-9,grid_dxdy[0])
grid_x, grid_y = np.meshgrid(grid_x_edge, grid_y_edge, indexing='ij')
#create coordinate array with all grid nodes
grid_X = np.vstack([grid_x.T.flatten(), grid_y.T.flatten()]).T
#number of grid points
n_pt_g = grid_X.shape[0]
n_pt_x = len(grid_x_edge)
n_pt_y = len(grid_y_edge)
#grid point ids
grid_ids = np.arange(n_pt_g)
del grid_x, grid_y
# Create Dataset
# ---------------------------
# Underling process
# --- --- --- --- ---
#grid covariance matrix
grid_cov = pyNGMM.KernelNegExp(grid_X, grid_X, hyp_ell=ell, hyp_omega=omega, delta=1e-9)
#grid GP
grid_gp = np.linalg.cholesky(grid_cov) @ np.random.normal(size=n_pt_g)
#constant offset
c0 = np.random.normal(0, 0.1)
#GP dataframe
df_gp = pd.DataFrame({'g_id':grid_ids , 'X':grid_X[:,0], 'Y':grid_X[:,1], 'c0':c0, 'gp':grid_gp}).set_index('g_id')
#total effect
df_gp.loc[:,'tot'] = df_gp[['c0','gp']].sum(axis=1)
# Samples
# --- --- --- --- ---
#random samples
samp_ids_orig = np.random.randint(n_pt_g, size=n_samp)
samp_ids = np.hstack([np.full(np.random.randint(low=1, high=n_rep, size=1), s) for s in samp_ids_orig])
#samples data frame
df_samp = df_gp.loc[samp_ids,:].reset_index()
df_samp.index.name = 'samp_id'
#noise term
df_samp.loc[:,'eps'] = np.random.normal(0, sig, len(df_samp))
#response variable
df_samp.loc[:,'y'] = df_samp[['tot','eps']].sum(axis=1)
# Save Dataset
# ---------------------------
pathlib.Path(dir_out).mkdir(parents=True, exist_ok=True)
df_gp.to_csv( dir_out + 'examp_grid_gp.csv' )
df_samp.to_csv( dir_out + 'examp_obs.csv' )
# Summary Figures
# ---------------------------
#color bar
cbar_levs = np.linspace(-2, 2, 101).tolist()
cbar_ticks = np.arange(-2, 2.01, 0.8).tolist()
#figure title
fname_fig = 'examp_data_gp_field'
#create figure
fig, ax = plt.subplots(figsize = (10,11))
#contour plot
cs = ax.contourf(grid_x_edge, grid_y_edge, df_gp.tot.values.reshape(n_pt_x,n_pt_y), vmin=-2, vmax=2, levels = cbar_levs)
#obsevations
hl = ax.plot(df_samp.X, df_samp.Y, 'o', color='black',markersize=12, markerfacecolor='none', markeredgewidth=2)
#figure properties
ax.grid(which='both')
#color bar
cbar = fig.colorbar(cs, orientation="horizontal", pad=0.15, boundaries=cbar_levs, ticks=cbar_ticks)
#tick size
ax.tick_params(axis='x', labelsize=30)
ax.tick_params(axis='y', labelsize=30)
#labels
ax.set_xlabel(r'$t_1$', fontsize=35)
ax.set_ylabel(r'$t_2$', fontsize=35)
#update colorbar
cbar.ax.tick_params(tick1On=1, labelsize=30)
cbar.set_label(r'$c_0 + c_1(\vec{t})$', size=35)
#save figure
fig.tight_layout()
fig.savefig( dir_out + fname_fig + '.png' )
#figure title
fname_fig = 'examp_obs_gp_field'
#create figure
fig, ax = plt.subplots(figsize = (10,11))
#obsevations
hl = ax.scatter(df_samp.X, df_samp.Y, c=df_samp.tot, vmin=-2, vmax=2, s=100)
#figure properties
ax.grid(which='both')
#color bar
cbar = fig.colorbar(hl, orientation="horizontal", pad=0.15, boundaries=cbar_levs, ticks=cbar_ticks)
#tick size
ax.tick_params(axis='x', labelsize=30)
ax.tick_params(axis='y', labelsize=30)
#labels
ax.set_xlabel(r'$t_1$', fontsize=35)
ax.set_ylabel(r'$t_2$', fontsize=35)
#figure limits
ax.set_xlim([0, 100])
ax.set_ylim([0, 100])
#update colorbar
cbar.ax.tick_params(tick1On=1, labelsize=30)
cbar.set_label(r'$c_0 + c_1(\vec{t})$', size=35)
#save figure
fig.tight_layout()
fig.savefig( dir_out + fname_fig + '.png' )
#figure title
fname_fig = 'examp_obs_noise'
#create figure
fig, ax = plt.subplots(figsize = (10,11))
#obsevations
hl = ax.scatter(df_samp.X, df_samp.Y, c=df_samp.y, vmin=-2, vmax=2, s=100)
#figure properties
ax.grid(which='both')
#color bar
cbar = fig.colorbar(hl, orientation="horizontal", pad=0.15, boundaries=cbar_levs, ticks=cbar_ticks)
#tick size
ax.tick_params(axis='x', labelsize=30)
ax.tick_params(axis='y', labelsize=30)
#labels
ax.set_xlabel(r'$t_1$', fontsize=35)
ax.set_ylabel(r'$t_2$', fontsize=35)
#figure limits
ax.set_xlim([0, 100])
ax.set_ylim([0, 100])
#update colorbar
cbar.ax.tick_params(tick1On=1, labelsize=30)
cbar.set_label(r'$y=c_0 + c_1(\vec{t}) + \epsilon$', size=35)
#save figure
fig.tight_layout()
fig.savefig( dir_out + fname_fig + '.png' )
| 5,227 | 27.259459 | 120 | py |
ngmm_tools | ngmm_tools-master/Examples/example1/regression_stan.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 27 12:20:36 2022
@author: glavrent
"""
# Working directory and Packages
# ---------------------------
#load packages
import os
import sys
import pathlib
import glob
import re #regular expression package
import pickle
from joblib import cpu_count
#arithmetic libraries
import numpy as np
#statistics libraries
import pandas as pd
#plot libraries
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoLocator as plt_autotick
import arviz as az
# Define Problem
# ---------------------------
#data filename
fname_data = 'data/examp_obs.csv'
#stan parameters
pystan_ver = 2
n_iter = 10000
n_chains = 4
adapt_delta = 0.8
max_treedepth = 10
fname_stan_model = 'regression_stan_model.stan'
#output directory
dir_out = 'data/stan_regression/'
# Read Data
# ---------------------------
df_data = pd.read_csv(fname_data, index_col=0)
#read stan model
with open(fname_stan_model, "r") as f:
stan_model_code = f.read()
# Preprocess Data
# ---------------------------
n_data = len(df_data)
#grid data
data_grid_all = df_data[['g_id','X','Y']].values
_, g_idx, g_inv = np.unique(df_data[['g_id']].values, axis=0, return_inverse=True, return_index=True)
data_grid = data_grid_all[g_idx,:]
X_g = data_grid[:,[1,2]] #grid coordinates
#create grid ids for all data (1 to n_g)
g_id = g_inv + 1
n_g = len(data_grid)
#observations
y_data = df_data['y'].to_numpy().copy()
#stan data
stan_data = {'N': n_data,
'NG': n_g,
'gid': g_id, #grid id
'X_g': X_g, #grid coordinates
'Y': y_data
}
# Run Stan
# ---------------------------
if pystan_ver == 2:
import pystan
#control paramters
control_stan = {'adapt_delta':adapt_delta, 'max_treedepth':max_treedepth}
#compile
stan_model = pystan.StanModel(model_code=stan_model_code)
#full Bayesian statistics
stan_fit = stan_model.sampling(data=stan_data, iter=n_iter, chains = n_chains, refresh=10, control = control_stan)
elif pystan_ver == 3:
import nest_asyncio
import stan
#compile stan
nest_asyncio.apply()
stan_model = stan.build(stan_model_code, data=stan_data, random_seed=1)
#run stan
stan_fit = stan_model.sample(num_chains=n_chains, num_samples=n_iter, max_depth=max_treedepth, delta=adapt_delta)
# Post-processing
# ---------------------------
#hyper-parameters
col_names_hyp = ['c_0','ell', 'omega', 'sigma']
#spatially varying term
col_names_c1 = ['c_1.%i'%(k) for k in range(n_g)]
col_names_all = col_names_hyp + col_names_c1
#stan posterior
stan_posterior = np.stack([stan_fit[c_n].flatten() for c_n in col_names_hyp], axis=1)
if pystan_ver == 2:
stan_posterior = np.concatenate((stan_posterior, stan_fit['c_1']), axis=1)
elif pystan_ver == 3:
stan_posterior = np.concatenate((stan_posterior, stan_fit['c_1'].T), axis=1)
#save raw-posterior distribution
df_stan_posterior_raw = pd.DataFrame(stan_posterior, columns = col_names_all)
#summarize posterior distributions of hyper-parameters
perc_array = np.array([0.05,0.25,0.5,0.75,0.95])
df_stan_hyp = df_stan_posterior_raw[col_names_hyp].quantile(perc_array)
df_stan_hyp = df_stan_hyp.append(df_stan_posterior_raw[col_names_hyp].mean(axis = 0), ignore_index=True)
df_stan_hyp.index = ['prc_%.2f'%(prc) for prc in perc_array]+['mean']
# model coefficients
#--- --- --- --- --- --- --- ---
#constant shift coefficient
coeff_0_mu = df_stan_posterior_raw.loc[:,'c_0'].mean() * np.ones(n_data)
coeff_0_med = df_stan_posterior_raw.loc[:,'c_0'].median() * np.ones(n_data)
coeff_0_sig = df_stan_posterior_raw.loc[:,'c_0'].std() * np.ones(n_data)
#spatially varying earthquake constant coefficient
coeff_1_mu = np.array([df_stan_posterior_raw.loc[:,f'c_1.{k}'].mean() for k in range(n_g)])[g_inv]
coeff_1_med = np.array([df_stan_posterior_raw.loc[:,f'c_1.{k}'].median() for k in range(n_g)])[g_inv]
coeff_1_sig = np.array([df_stan_posterior_raw.loc[:,f'c_1.{k}'].std() for k in range(n_g)])[g_inv]
# model prediction and residuals
#--- --- --- --- --- --- --- ---
#mean prediction
y_mu = (coeff_0_mu + coeff_1_mu)
#std of prediction
y_sig = np.sqrt(coeff_0_sig**2 + coeff_1_sig**2)
# residuals
res = y_data - y_mu
# summarize regression results
#--- --- --- --- --- --- --- ---
#initialize flat-file for summary of coefficients and residuals
df_info = df_data[['g_id','X','Y']]
#summarize coeff and predictions
reg_summary = np.vstack((coeff_0_mu, coeff_0_sig,
coeff_1_mu, coeff_1_sig,
y_mu, y_sig, res)).T
columns_names = ['c_0_mean', 'c_0_sig',
'c_1_mean', 'c_1_sig',
'tot_mean', 'tot_sig', 'res']
df_reg_summary = pd.DataFrame(reg_summary, columns = columns_names, index=df_data.index)
df_reg_summary = pd.merge(df_info, df_reg_summary, how='right', left_index=True, right_index=True)
# Output directory
# ---------------------------
pathlib.Path(dir_out).mkdir(parents=True, exist_ok=True)
#regression results
df_reg_summary.to_csv( dir_out + 'stan_regression.csv' )
# Summary figures
# ---------------------------
#color bar (mean)
cbar_levs_mean = np.linspace(-2, 2, 101).tolist()
cbar_ticks_mean = np.arange(-2, 2.01, 0.8).tolist()
#color bar (sigma)
cbar_levs_sig = np.linspace(0.0, 0.5, 101).tolist()
cbar_ticks_sig = np.arange(0, 0.501, 0.1).tolist()
# scatter comparison
fname_fig = 'stan_gp_scatter'
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#obsevations scatter
hl = ax.plot(df_data.tot, df_reg_summary.tot_mean, 'o')
ax.axline((0,0), slope=1, color="black", linestyle="--")
#figure properties
ax.grid(which='both')
#tick size
ax.tick_params(axis='x', labelsize=32)
ax.tick_params(axis='y', labelsize=32)
#figure limits
ax.set_xticks([-2,-1,0,1,2])
ax.set_yticks([-2,-1,0,1,2])
ax.set_xlim([-2.0, 2.0])
ax.set_ylim([-2.0, 2.0])
#labels
ax.set_xlabel('Data', fontsize=35)
ax.set_ylabel('Estimated', fontsize=35)
#save figure
fig.tight_layout()
fig.savefig( dir_out + fname_fig + '.png' )
#field mean
fname_fig = 'stan_gp_field_mean'
#create figure
fig, ax = plt.subplots(figsize = (10,11))
#obsevations map
hl = ax.scatter(df_reg_summary.X, df_reg_summary.Y, c=df_reg_summary.tot_mean, marker='s', vmin=-2, vmax=2, s=100)
#figure properties
ax.grid(which='both')
#color bar
cbar = fig.colorbar(hl, orientation="horizontal", pad=0.15, boundaries=cbar_levs_mean, ticks=cbar_ticks_mean)
#tick size
ax.tick_params(axis='x', labelsize=30)
ax.tick_params(axis='y', labelsize=30)
#labels
ax.set_xlabel(r'$t_1$', fontsize=35)
ax.set_ylabel(r'$t_2$', fontsize=35)
#figure limits
ax.set_xlim([0, 100])
ax.set_ylim([0, 100])
#update colorbar
cbar.ax.tick_params(tick1On=1, labelsize=30)
cbar.set_label(r'$\mu(c_0 + c_1(\vec{t}))$', size=35)
#save figure
fig.tight_layout()
fig.savefig( dir_out + fname_fig + '.png' )
#field std
fname_fig = 'stan_gp_field_std'
#create figure
fig, ax = plt.subplots(figsize = (10,11))
#obsevations map
hl = ax.scatter(df_reg_summary.X, df_reg_summary.Y, c=df_reg_summary.tot_sig, marker='s', vmin=0, vmax=0.5, s=100, cmap='Oranges')
#figure properties
ax.grid(which='both')
#color bar
cbar = fig.colorbar(hl, orientation="horizontal", pad=0.15, boundaries=cbar_levs_sig, ticks=cbar_ticks_sig)
#tick size
ax.tick_params(axis='x', labelsize=30)
ax.tick_params(axis='y', labelsize=30)
#labels
ax.set_xlabel(r'$t_1$', fontsize=35)
ax.set_ylabel(r'$t_2$', fontsize=35)
#figure limits
ax.set_xlim([0, 100])
ax.set_ylim([0, 100])
#update colorbar
cbar.ax.tick_params(tick1On=1, labelsize=30)
cbar.set_label(r'$\psi(c_0 + c_1(\vec{t}))$', size=35)
#save figure
fig.tight_layout()
fig.savefig( dir_out + fname_fig + '.png' )
#create stan trace plots
chain_cmap = mpl.cm.get_cmap('tab10')
for c_name in col_names_hyp:
#create trace plot with arviz
ax = az.plot_trace(stan_fit, var_names=c_name, figsize=(20,10)).ravel()
#change colors
for a in ax:
for c_i in range(n_chains):
a.get_lines()[c_i].set_color(chain_cmap(c_i))
a.get_lines()[c_i].set_linestyle('-')
a.get_lines()[c_i].set_alpha(1)
#edit figure
ax[0].yaxis.set_major_locator(plt_autotick())
# ax[0].set_xlabel('sample value')
# ax[0].set_ylabel('frequency')
ax[0].grid(axis='both')
ax[0].tick_params(axis='x', labelsize=30)
ax[0].tick_params(axis='y', labelsize=30)
ax[0].set_xlabel(c_name, fontsize=35)
ax[0].set_ylabel('posterior(%s)'%c_name, fontsize=35)
ax[0].set_title('')
# ax[1].set_xlabel('iteration')
# ax[1].set_ylabel('sample value')
ax[1].grid(axis='both')
ax[1].legend(['chain %i'%(c_i+1) for c_i in range(n_chains)], loc='upper right', fontsize=32)
ax[1].tick_params(axis='x', labelsize=30)
ax[1].tick_params(axis='y', labelsize=30)
ax[1].set_xlabel('iteration', fontsize=35)
ax[1].set_ylabel(c_name, fontsize=35)
ax[1].set_title('')
if c_name == 'omega':
ax[0].set_xlim([0.2,1.2])
ax[0].set_ylim([0,10])
ax[1].set_ylim([0.2,1.2])
fig = ax[0].figure
fig.suptitle(c_name, fontsize=35)
fig.savefig(dir_out + 'stan_traceplot_' + c_name + '_arviz' + '.png')
| 9,279 | 31.561404 | 130 | py |
ngmm_tools | ngmm_tools-master/Examples/example2/comparison_posterior.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 17 07:00:55 2022
@author: glavrent
"""
# Load Packages
# ---------------------------
#arithmetic libraries
import numpy as np
from scipy import stats
#statistics libraries
import pandas as pd
#plottign libraries
import matplotlib as mpl
from matplotlib import pyplot as plt
# Load Data
# ---------------------------
# posterior distributions
# --- --- ---
#stan regression
# fname_post_stan = ['data/stan_regression_100iter/stan_posterior_raw.csv',
# 'data/stan_regression_200iter/stan_posterior_raw.csv',
# 'data/stan_regression_500iter/stan_posterior_raw.csv',
# 'data/stan_regression_100000iter/stan_posterior_raw.csv']
# n_iter_stan = [100, 200, 500, 100000]
fname_post_stan = ['data/stan_regression_100iter/stan_posterior_raw.csv',
'data/stan_regression_1000iter/stan_posterior_raw.csv',
'data/stan_regression_100000iter/stan_posterior_raw.csv']
n_iter_stan = [100, 1000, 100000]
# fname_post_stan = ['data/stan_regression_200iter/stan_posterior_raw.csv',
# 'data/stan_regression_1000iter/stan_posterior_raw.csv',
# 'data/stan_regression_100000iter/stan_posterior_raw.csv']
# n_iter_stan = [200, 1000, 100000]
#inla regression
fname_post_inla_c0 = 'data/inla_regression/inla_c0_posterior.csv'
fname_post_inla_c1 = 'data/inla_regression/inla_c1_posterior.csv'
fname_post_inla_sigma = 'data/inla_regression/inla_sigma_posterior.csv'
#load posterior distributions
df_post_stan_raw = [pd.read_csv(fn) for fn in fname_post_stan]
#inla
df_post_inla_c0 = pd.read_csv(fname_post_inla_c0)
df_post_inla_c1 = pd.read_csv(fname_post_inla_c1)
df_post_inla_sigma = pd.read_csv(fname_post_inla_sigma)
#process stan posteriors
#c0
c0_array = np.linspace(-.4, 0.0, 1000)
post_stan_c0_kde = [stats.gaussian_kde(df['c_0']) for df in df_post_stan_raw]
df_post_stan_c0 = [pd.DataFrame({'x':c0_array, 'y':p_kde(c0_array)})
for p_kde in post_stan_c0_kde]
#c1
c1_array = np.linspace(0.5, 0.8, 1000)
post_stan_c1_kde = [stats.gaussian_kde(df['c_1']) for df in df_post_stan_raw]
df_post_stan_c1 = [pd.DataFrame({'x':c1_array, 'y':p_kde(c1_array)})
for p_kde in post_stan_c1_kde]
#sigma
sigma_array = np.linspace(0.6, 0.8, 1000)
post_stan_sigma_kde = [stats.gaussian_kde(df['sigma']) for df in df_post_stan_raw]
df_post_stan_sigma = [pd.DataFrame({'x':sigma_array, 'y':p_kde(sigma_array)})
for p_kde in post_stan_sigma_kde]
# Create Figures
# ---------------------------
#figure title
fname_fig = 'post_c0'
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#plot examples
for df, n_iter in zip(df_post_stan_c0, n_iter_stan):
ax.plot(df.x, df.y, linewidth=4, label=r'STAN, $n_{iter}=%i$'%n_iter)
ax.plot(df_post_inla_c0.x, df_post_inla_c0.y, linewidth=4, linestyle='--', color='black', label=r'INLA')
#figure properties
ax.grid(which='both')
#tick size
ax.tick_params(axis='x', labelsize=32)
ax.tick_params(axis='y', labelsize=32)
#figure limits
ax.set_xlim([-.25, -0.1])
ax.set_ylim([0.0, 20.0])
#legend
pos = ax.get_position()
ax.set_position([pos.x0, pos.y0, pos.width * 0.6, pos.height])
ax.legend(loc='center right', bbox_to_anchor=(2.0, 0.5), fontsize=32)
#labels
ax.set_xlabel(r'$c_0$', fontsize=35)
ax.set_ylabel('Probability Density', fontsize=35)
#save figure
fig.tight_layout()
fig.savefig( fname_fig + '.png' )
#figure title
fname_fig = 'post_c1'
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#plot examples
for df, n_iter in zip(df_post_stan_c1, n_iter_stan):
ax.plot(df.x, df.y, linewidth=3, label=r'STAN, $n_{iter}=%i$'%n_iter)
ax.plot(df_post_inla_c1.x, df_post_inla_c1.y, linewidth=4, linestyle='--', color='black', label=r'INLA')
#figure properties
ax.grid(which='both')
#tick size
ax.tick_params(axis='x', labelsize=32)
ax.tick_params(axis='y', labelsize=32)
#figure limits
ax.set_xlim([0.55, 0.75])
ax.set_ylim([0.0, 20.0])
#legend
pos = ax.get_position()
ax.set_position([pos.x0, pos.y0, pos.width * 0.6, pos.height])
ax.legend(loc='center right', bbox_to_anchor=(2.0, 0.5), fontsize=32)
#labels
ax.set_xlabel(r'$c_1$', fontsize=35)
ax.set_ylabel('Probability Density', fontsize=35)
#save figure
fig.tight_layout()
fig.savefig( fname_fig + '.png' )
#figure title
fname_fig = 'post_sigma'
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#plot examples
for df, n_iter in zip(df_post_stan_sigma, n_iter_stan):
ax.plot(df.x, df.y, linewidth=3, label=r'STAN, $n_{iter}=%i$'%n_iter)
ax.plot(df_post_inla_sigma.x, df_post_inla_sigma.y, linewidth=4, linestyle='--', color='black', label=r'INLA')
#figure properties
ax.grid(which='both')
#tick size
ax.tick_params(axis='x', labelsize=32)
ax.tick_params(axis='y', labelsize=32)
#figure limits
ax.set_xlim([0.65, 0.75])
ax.set_ylim([0.0, 30.0])
#legend
pos = ax.get_position()
ax.set_position([pos.x0, pos.y0, pos.width * 0.6, pos.height])
ax.legend(loc='center right', bbox_to_anchor=(2.0, 0.5), fontsize=32)
#labels
ax.set_xlabel(r'$\sigma$', fontsize=35)
ax.set_ylabel('Probability Density', fontsize=35)
#save figure
fig.tight_layout()
fig.savefig( fname_fig + '.png' )
| 5,270 | 33.907285 | 111 | py |
ngmm_tools | ngmm_tools-master/Examples/example2/create_reg_dataset.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 26 16:01:54 2022
@author: glavrent
"""
# Working directory and Packages
# ---------------------------
import os
import sys
import pathlib
#load packages
import numpy as np
import pandas as pd
#plottign libraries
import matplotlib as mpl
from matplotlib import pyplot as plt
import matplotlib.ticker as mticker
# Define Problem
# ---------------------------
#number of samples
n_samp = 1000
#coefficients
c0 = -0.2
c1 = 0.6
sig = 0.7
#output directory
dir_out = 'data/'
# Create Dataset
# ---------------------------
#covariates
x1 = np.random.randn(n_samp )
#noise
eps = sig *np.random.randn(n_samp )
#response
mu_y = c0 + c1 * x1
y = mu_y + eps
#model response
model_x1 = np.linspace(-5,5)
model_y = c0 + c1 * model_x1
#regression data frame
df_data = pd.DataFrame({'x1':x1, 'mu_y':mu_y, 'y':y})
# Save Dataset
# ---------------------------
pathlib.Path(dir_out).mkdir(parents=True, exist_ok=True)
df_data.to_csv( dir_out + 'regression_dataset.csv', index=False )
# Summary Figures
# ---------------------------
#figure title
fname_fig = 'fig_dataset'
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#obsevations
hl1 = ax.plot(df_data.x1, df_data.y, 'o')
#plot response
hl2 = ax.plot(model_x1, model_y, linewidth=3, color='black')
#figure properties
ax.grid(which='both')
#tick size
ax.tick_params(axis='x', labelsize=30)
ax.tick_params(axis='y', labelsize=30)
#labels
ax.set_xlabel(r'$x_1$', fontsize=35)
ax.set_ylabel(r'$y$', fontsize=35)
#figure limits
ax.set_xlim([-4, 4])
ax.set_ylim([-4, 4])
#save figure
fig.tight_layout()
fig.savefig( dir_out + fname_fig + '.png' )
| 1,674 | 19.180723 | 65 | py |
ngmm_tools | ngmm_tools-master/Examples/example2/regression_stan.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 27 12:20:36 2022
@author: glavrent
"""
# Working directory and Packages
# ---------------------------
#load packages
import os
import sys
import pathlib
import glob
import re #regular expression package
import pickle
from joblib import cpu_count
#arithmetic libraries
import numpy as np
#statistics libraries
import pandas as pd
#plot libraries
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoLocator as plt_autotick
import arviz as az
# Define Problem
# ---------------------------
#data filename
fname_data = 'data/regression_dataset.csv'
#stan parameters
pystan_ver = 2
# n_iter = 50
n_iter = 100
# n_iter = 200
# n_iter = 500
# n_iter = 1000
# n_iter = 10000
# n_iter = 100000
n_chains = 4
adapt_delta = 0.8
max_treedepth = 10
fname_stan_model = 'regression_stan_model.stan'
#output directory
dir_out = f'data/stan_regression_%iiter/'%n_iter
# Read Data
# ---------------------------
df_data = pd.read_csv(fname_data)
#read stan model
with open(fname_stan_model, "r") as f:
stan_model_code = f.read()
# Preprocess Data
# ---------------------------
n_data = len(df_data)
#scaling
x1_data = df_data['x1'].to_numpy().copy()
#observations
y_data = df_data['y'].to_numpy().copy()
#stan data
stan_data = {'N': n_data,
'X_1': x1_data,
'Y': y_data
}
# Run Stan
# ---------------------------
if pystan_ver == 2:
import pystan
#control paramters
control_stan = {'adapt_delta':adapt_delta, 'max_treedepth':max_treedepth}
#compile
stan_model = pystan.StanModel(model_code=stan_model_code)
#full Bayesian statistics
stan_fit = stan_model.sampling(data=stan_data, iter=n_iter, chains = n_chains, refresh=10, control = control_stan)
elif pystan_ver == 3:
import nest_asyncio
import stan
#compile stan
nest_asyncio.apply()
stan_model = stan.build(stan_model_code, data=stan_data, random_seed=1)
#run stan
stan_fit = stan_model.sample(num_chains=n_chains, num_samples=n_iter, max_depth=max_treedepth, delta=adapt_delta)
# Post-processing
# ---------------------------
#hyper-parameters and model coeffs
col_names = ['c_0','c_1', 'sigma']
#stan posterior
stan_posterior = np.stack([stan_fit[c_n].flatten() for c_n in col_names], axis=1)
#save raw-posterior distribution
df_stan_posterior_raw = pd.DataFrame(stan_posterior, columns = col_names)
#summarize posterior distributions of hyper-parameters
perc_array = np.array([0.05,0.25,0.5,0.75,0.95])
df_stan_hyp = df_stan_posterior_raw[col_names].quantile(perc_array)
df_stan_hyp = df_stan_hyp.append(df_stan_posterior_raw[col_names].mean(axis = 0), ignore_index=True)
df_stan_hyp.index = ['prc_%.2f'%(prc) for prc in perc_array]+['mean']
# model prediction and residuals
#--- --- --- --- --- --- --- ---
c0_mu = df_stan_hyp.loc['mean','c_0']
c1_mu = df_stan_hyp.loc['mean','c_1']
#mean prediction
y_mu = c0_mu + c1_mu * x1_data
# residuals
res = y_data - y_mu
# prediction
#--- --- --- --- --- --- --- ---
x1_array = np.linspace(-4,4)
y_array = c0_mu + c1_mu * x1_array
# summarize regression results
#--- --- --- --- --- --- --- ---
#initialize flat-file for summary of coefficients and residuals
df_info = df_data[['x1']]
#summarize coeff and predictions
reg_summary = np.vstack((y_mu, res)).T
columns_names = ['y_mu', 'res_mean']
df_reg_summary = pd.DataFrame(reg_summary, columns = columns_names, index=df_data.index)
df_reg_summary = pd.merge(df_info, df_reg_summary, how='right', left_index=True, right_index=True)
# Output directory
# ---------------------------
pathlib.Path(dir_out).mkdir(parents=True, exist_ok=True)
#MCMC samples
df_stan_posterior_raw.to_csv(dir_out + 'stan_posterior_raw.csv')
#regression results
df_reg_summary.to_csv( dir_out + 'stan_regression.csv' )
# Summary figures
# ---------------------------
# prediction
fname_fig = 'stan_prediction'
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#obsevations scatter
hl = ax.plot(df_data.x1, df_data.y, 'o')
hl = ax.plot(x1_array, y_array, color="black", )
#figure properties
ax.grid(which='both')
#tick size
ax.tick_params(axis='x', labelsize=32)
ax.tick_params(axis='y', labelsize=32)
#figure limits
ax.set_xlim([-4, 4])
ax.set_ylim([-3, 3])
#labels
ax.set_xlabel(f'$x_1$', fontsize=35)
ax.set_ylabel(f'$y$', fontsize=35)
#save figure
fig.tight_layout()
fig.savefig( dir_out + fname_fig + '.png' )
#stan residuals
fname_fig = 'stan_residuals'
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#obsevations scatter
hl = ax.plot(df_reg_summary.x1, df_reg_summary.res_mean, 'o')
# ax.axline((0,0), slope=1, color="black", linestyle="--")
#figure properties
ax.grid(which='both')
#tick size
ax.tick_params(axis='x', labelsize=32)
ax.tick_params(axis='y', labelsize=32)
#figure limits
ax.set_xticks([-2,-1,0,1,2])
ax.set_yticks([-2,-1,0,1,2])
ax.set_xlim([-2.0, 2.0])
ax.set_ylim([-2.0, 2.0])
#labels
ax.set_xlabel(f'$x_1$', fontsize=35)
ax.set_ylabel(f'$\epsilon$', fontsize=35)
#save figure
fig.tight_layout()
fig.savefig( dir_out + fname_fig + '.png' )
#create stan trace plots
chain_cmap = mpl.cm.get_cmap('tab10')
for c_name in col_names:
#create trace plot with arviz
ax = az.plot_trace(stan_fit, var_names=c_name, figsize=(20,10)).ravel()
#change colors
for a in ax:
for c_i in range(n_chains):
a.get_lines()[c_i].set_color(chain_cmap(c_i))
a.get_lines()[c_i].set_linestyle('-')
a.get_lines()[c_i].set_alpha(1)
#edit figure
ax[0].yaxis.set_major_locator(plt_autotick())
# ax[0].set_xlabel('sample value')
# ax[0].set_ylabel('frequency')
ax[0].grid(axis='both')
ax[0].tick_params(axis='x', labelsize=30)
ax[0].tick_params(axis='y', labelsize=30)
ax[0].set_xlabel(c_name, fontsize=35)
ax[0].set_ylabel('posterior(%s)'%c_name, fontsize=35)
ax[0].set_title('')
# ax[1].set_xlabel('iteration')
# ax[1].set_ylabel('sample value')
ax[1].grid(axis='both')
ax[1].legend(['chain %i'%(c_i+1) for c_i in range(n_chains)], loc='upper right', fontsize=32)
ax[1].tick_params(axis='x', labelsize=30)
ax[1].tick_params(axis='y', labelsize=30)
ax[1].set_xlabel('iteration', fontsize=35)
ax[1].set_ylabel(c_name, fontsize=35)
ax[1].set_title('')
if c_name == 'c_0':
ax[0].set_xlim([-0.25,-0.1])
ax[0].set_ylim([0,30])
ax[1].set_ylim([-0.4,0.0])
elif c_name == 'c_1':
ax[0].set_xlim([0.5,0.8])
ax[0].set_ylim([0,30])
ax[1].set_ylim([0.5,0.8])
elif c_name == 'sigma':
ax[0].set_xlim([0.6,0.8])
ax[0].set_ylim([0,30])
ax[1].set_ylim([0.6,0.8])
fig = ax[0].figure
fig.suptitle(c_name, fontsize=35)
fig.savefig(dir_out + 'stan_traceplot_' + c_name + '_arviz' + '.png')
| 6,923 | 27.85 | 118 | py |
PT-M2 | PT-M2-main/errant_score.py | from copy import deepcopy
import math
from tqdm import tqdm
def get_ref(edits, src):
cnt = 0
src = src.split()
e_s = src
for edit in edits:
s_idx, e_idx, rep_tok = edit
s_idx = cnt + s_idx
e_idx = cnt + e_idx
e_s = e_s[:s_idx] + rep_tok.split() + e_s[e_idx:] if rep_tok else e_s[:s_idx] + e_s[e_idx:]
cnt += len(rep_tok.split()) - (e_idx - s_idx)
return " ".join(e_s)
def compute_weight_edits(editSeq, gold, source, cand, ref, w_t, scorer=None, sent_level=False):
weight_edits = {}
editSeq = sorted(editSeq, key=lambda x: (x[0], x[1]))
assert cand == get_ref(editSeq, source), f"src: {source}\nref: {cand}\nref_s: {get_ref(editSeq, source)}\nedits: {editSeq}"
gold = sorted(gold, key=lambda x: (x[0], x[1]))
assert ref == get_ref(gold, source), f"src: {source}\nref: {ref}\nref_s: {get_ref(gold, source)}\nedits: {gold}"
edits = list(set(editSeq) | set(gold))
edits = sorted(edits, key=lambda x: (x[0], x[1]))
for i, edit in enumerate(edits):
edit_s = [edit]
edit_s = sorted(edit_s, key=lambda x: (x[0], x[1]))
ref_s = get_ref(edit_s, source)
if w_t == "self":
weight_edits[edit] = 1
elif w_t == "bartscore":
s1, s2 = scorer.score([ref, ref], [source, ref_s], batch_size=2)
weight_edits[edit] = abs(s1 - s2)
elif w_t == "bertscore":
s1 = scorer.score([source], [ref])[-1]
s1 = s1[0].item()
s2 = scorer.score([ref_s], [ref])[-1]
s2 = s2[0].item()
weight_edits[edit] = abs(s1 - s2)
if sent_level:
w_sum = sum(v for v in weight_edits.values())
if w_sum == 0:
weight_edits = {k: 1 / len(weight_edits) for k in weight_edits.keys()}
return weight_edits
def errant_batch_pre_rec_f1(editSeq, gold, source, candidate, ref, scorer, args, beta=0.5):
correct = matchSeq(editSeq, gold, ignore_whitespace_casing=False)
#print(f"correct {correct} sys_edit {editSeq} gold_edit {gold}")
weight_editSeq = compute_weight_edits(editSeq, source, candidate, args.scorer, args.direction, scorer)
weight_gold = compute_weight_edits(gold, source, ref, args.scorer, args.direction, scorer)
if not editSeq:
p = 1.0
else:
p = sum(weight_editSeq[c] for c in correct)
if not gold:
r = 1.0
else:
r = sum(weight_gold[c] for c in correct)
if not beta * beta * p + r:
f1 = 0.0
else:
f1 = (1.0 + beta * beta) * p * r / (beta * beta * p + r)
return (p, r, f1)
def matchSeq(editSeq, gold_edits, ignore_whitespace_casing=False, verbose=False):
m = []
goldSeq = deepcopy(gold_edits)
last_index = 0
CInsCDel = False
CInsWDel = False
CDelWIns = False
for e in editSeq:
# print(e)
# print("====")
for i in range(last_index, len(goldSeq)):
g = goldSeq[i]
# print(g)
if matchEdit(e, g, ignore_whitespace_casing):
# print(f"* {e}")
m.append(e)
last_index = i + 1
return m
def matchEdit(e, g, ignore_whitespace_casing=False):
# start offset
if e[0] != g[0]:
return False
# end offset
if e[1] != g[1]:
return False
# original string
if e[2] != g[2]:
return False
return True
# if __name__ == "__main__":
# print(matchSeq([(3, 3, ','), (19, 19, ','), (21, 22, '')], [(3, 3, ','), (7, 8, 'testing'), (19, 19, ',')]
# ))
def errant_load_annotation(hyp_m2, ref_m2):
hyp_m2 = open(hyp_m2, encoding="utf8").read().strip().split("\n\n")
ref_m2 = open(ref_m2, encoding="utf8").read().strip().split("\n\n")
assert len(hyp_m2) == len(ref_m2)
sources, gold_edits, sys_edits = [], [], []
for sent_id, sent in enumerate(zip(hyp_m2, ref_m2)):
# Simplify the edits into lists of lists
hyp_edits = simplify_edits(sent[0])
ref_edits = simplify_edits(sent[1])
# Process the edits for detection/correction based on args
hyp_dict = process_edits(hyp_edits)
ref_dict = process_edits(ref_edits)
hyp_dict = [k for v in hyp_dict.values() for k in v.keys() if k != (-1, -1, '-NONE-')]
ref_dict = {key: [k for k in value.keys() if k != (-1, -1, '-NONE-')] for key, value in ref_dict.items()}
# original sentence for logging
original_sentence = sent[0][2:].split("\nA")[0]
sources.append(original_sentence)
gold_edits.append(ref_dict)
sys_edits.append(hyp_dict)
return sources, gold_edits, sys_edits
def simplify_edits(sent):
out_edits = []
# Get the edit lines from an m2 block.
edits = sent.split("\n")[1:]
# Loop through the edits
for edit in edits:
# Preprocessing
edit = edit[2:].split("|||") # Ignore "A " then split.
span = edit[0].split()
start = int(span[0])
end = int(span[1])
cat = edit[1]
cor = edit[2]
coder = int(edit[-1])
out_edit = [start, end, cat, cor, coder]
out_edits.append(out_edit)
return out_edits
def process_edits(edits, dt=False, ds=False, single=False, filt=None, multi=False, cse=False):
if filt is None:
filt = []
coder_dict = {}
# Add an explicit noop edit if there are no edits.
if not edits: edits = [[-1, -1, "noop", "-NONE-", 0]]
# Loop through the edits
for edit in edits:
# Name the edit elements for clarity
start = edit[0]
end = edit[1]
cat = edit[2]
cor = edit[3]
coder = edit[4]
# Add the coder to the coder_dict if necessary
if coder not in coder_dict: coder_dict[coder] = {}
# Optionally apply filters based on args
# 1. UNK type edits are only useful for detection, not correction.
if not dt and not ds and cat == "UNK": continue
# 2. Only evaluate single token edits; i.e. 0:1, 1:0 or 1:1
if single and (end-start >= 2 or len(cor.split()) >= 2): continue
# 3. Only evaluate multi token edits; i.e. 2+:n or n:2+
if multi and end-start < 2 and len(cor.split()) < 2: continue
# 4. If there is a filter, ignore the specified error types
if filt and cat in filt: continue
# Token Based Detection
if dt:
# Preserve noop edits.
if start == -1:
if (start, start) in coder_dict[coder].keys():
coder_dict[coder][(start, start)].append(cat)
else:
coder_dict[coder][(start, start)] = [cat]
# Insertions defined as affecting the token on the right
elif start == end and start >= 0:
if (start, start+1) in coder_dict[coder].keys():
coder_dict[coder][(start, start+1)].append(cat)
else:
coder_dict[coder][(start, start+1)] = [cat]
# Edit spans are split for each token in the range.
else:
for tok_id in range(start, end):
if (tok_id, tok_id+1) in coder_dict[coder].keys():
coder_dict[coder][(tok_id, tok_id+1)].append(cat)
else:
coder_dict[coder][(tok_id, tok_id+1)] = [cat]
# Span Based Detection
elif ds:
if (start, end) in coder_dict[coder].keys():
coder_dict[coder][(start, end)].append(cat)
else:
coder_dict[coder][(start, end)] = [cat]
# Span Based Correction
else:
# With error type classification
if cse:
if (start, end, cat, cor) in coder_dict[coder].keys():
coder_dict[coder][(start, end, cat, cor)].append(cat)
else:
coder_dict[coder][(start, end, cat, cor)] = [cat]
# Without error type classification
else:
if (start, end, cor) in coder_dict[coder].keys():
coder_dict[coder][(start, end, cor)].append(cat)
else:
coder_dict[coder][(start, end, cor)] = [cat]
return coder_dict
def evaluate_edits(hyp_dict, ref_dict, best, sent_id, original_sentence, beta=0.5, verbose=False):
# Verbose output: display the original sentence
if verbose:
print('{:-^40}'.format(""))
print("Original sentence " + str(sent_id) + ": " + original_sentence)
# Store the best sentence level scores and hyp+ref combination IDs
# best_f is initialised as -1 cause 0 is a valid result.
best_tp, best_fp, best_fn, best_f, best_hyp, best_ref = 0, 0, 0, -1, 0, 0
best_cat = {}
# Compare each hyp and ref combination
for hyp_id in hyp_dict.keys():
for ref_id in ref_dict.keys():
# Get the local counts for the current combination.
tp, fp, fn, cat_dict = compareEdits(hyp_dict[hyp_id], ref_dict[ref_id])
# Compute the local sentence scores (for verbose output only)
loc_p, loc_r, loc_f = computeFScore(tp, fp, fn, beta)
# Compute the global sentence scores
p, r, f = computeFScore(
tp+best["tp"], fp+best["fp"], fn+best["fn"], beta)
# Save the scores if they are better in terms of:
# 1. Higher F-score
# 2. Same F-score, higher TP
# 3. Same F-score and TP, lower FP
# 4. Same F-score, TP and FP, lower FN
if (f > best_f) or \
(f == best_f and tp > best_tp) or \
(f == best_f and tp == best_tp and fp < best_fp) or \
(f == best_f and tp == best_tp and fp == best_fp and fn < best_fn):
best_tp, best_fp, best_fn = tp, fp, fn
best_f, best_hyp, best_ref = f, hyp_id, ref_id
best_cat = cat_dict
# Verbose output
if verbose:
# Prepare verbose output edits.
hyp_verb = list(sorted(hyp_dict[hyp_id].keys()))
ref_verb = list(sorted(ref_dict[ref_id].keys()))
# add categories
# hyp_dict[hyp_id] looks like (0, 1, "str")
# hyp_dict[hyp_id][h] is a list, always length one, of the corresponding category
hyp_verb = [h + (hyp_dict[hyp_id][h][0],) for h in hyp_verb]
ref_verb = [r + (ref_dict[ref_id][r][0],) for r in ref_verb]
# Ignore noop edits
if not hyp_verb or hyp_verb[0][0] == -1: hyp_verb = []
if not ref_verb or ref_verb[0][0] == -1: ref_verb = []
# Print verbose info
print('{:-^40}'.format(""))
print("SENTENCE "+str(sent_id)+" - HYP "+str(hyp_id)+" - REF "+str(ref_id))
print("HYPOTHESIS EDITS :", hyp_verb)
print("REFERENCE EDITS :", ref_verb)
print("Local TP/FP/FN :", str(tp), str(fp), str(fn))
print("Local P/R/F"+str(beta)+" :", str(loc_p), str(loc_r), str(loc_f))
print("Global TP/FP/FN :", str(tp+best["tp"]), str(fp+best["fp"]), str(fn+best["fn"]))
print("Global P/R/F"+str(beta)+" :", str(p), str(r), str(f))
# Verbose output: display the best hyp+ref combination
if verbose:
print('{:-^40}'.format(""))
print("^^ HYP "+str(best_hyp)+", REF "+str(best_ref)+" chosen for sentence "+str(sent_id))
print("Local results:")
header = ["Category", "TP", "FP", "FN"]
body = [[k, *v] for k, v in best_cat.items()]
print_table([header] + body)
# Save the best TP, FP and FNs as a dict, and return this and the best_cat dict
best_dict = {"tp":best_tp, "fp":best_fp, "fn":best_fn}
return best_dict, best_cat
def compareEdits(hyp_edits, ref_edits):
tp = 0 # True Positives
fp = 0 # False Positives
fn = 0 # False Negatives
cat_dict = {} # {cat: [tp, fp, fn], ...}
for h_edit, h_cats in hyp_edits.items():
# noop hyp edits cannot be TP or FP
if h_cats[0] == "noop": continue
# TRUE POSITIVES
if h_edit in ref_edits.keys():
# On occasion, multiple tokens at same span.
for h_cat in ref_edits[h_edit]: # Use ref dict for TP
tp += 1
# Each dict value [TP, FP, FN]
if h_cat in cat_dict.keys():
cat_dict[h_cat][0] += 1
else:
cat_dict[h_cat] = [1, 0, 0]
# FALSE POSITIVES
else:
# On occasion, multiple tokens at same span.
for h_cat in h_cats:
fp += 1
# Each dict value [TP, FP, FN]
if h_cat in cat_dict.keys():
cat_dict[h_cat][1] += 1
else:
cat_dict[h_cat] = [0, 1, 0]
for r_edit, r_cats in ref_edits.items():
# noop ref edits cannot be FN
if r_cats[0] == "noop": continue
# FALSE NEGATIVES
if r_edit not in hyp_edits.keys():
# On occasion, multiple tokens at same span.
for r_cat in r_cats:
fn += 1
# Each dict value [TP, FP, FN]
if r_cat in cat_dict.keys():
cat_dict[r_cat][2] += 1
else:
cat_dict[r_cat] = [0, 0, 1]
return tp, fp, fn, cat_dict
def comp_p(a, b):
if b:
p = a / b
else:
p = 1.0
return p
def comp_r(c, g):
if g:
r = c / g
else:
r = 1.0
return r
def comp_f1(p, r, beta):
if beta*beta*p+r:
f = (1.0+beta*beta) * p * r / (beta*beta*p+r)
else:
f = 0.0
return f
def print_table(table):
longest_cols = [
(max([len(str(row[i])) for row in table]) + 3)
for i in range(len(table[0]))
]
row_format = "".join(["{:>" + str(longest_col) + "}" for longest_col in longest_cols])
for row in table:
print(row_format.format(*row))
def computeFScore(tp, fp, fn, beta):
p = float(tp)/(tp+fp) if fp else 1.0
r = float(tp)/(tp+fn) if fn else 1.0
f = float((1+(beta**2))*p*r)/(((beta**2)*p)+r) if p+r else 0.0
return round(p, 4), round(r, 4), round(f, 4)
def print_results(best, dt=False, ds=False, cse=False, cat=None, best_cats=None, beta=0.5):
# Prepare output title.
if dt: title = " Token-Based Detection "
elif ds: title = " Span-Based Detection "
elif cse: title = " Span-Based Correction + Classification "
else: title = " Span-Based Correction "
# Category Scores
if cat:
best_cats = processCategories(best_cats, cat)
print("")
print('{:=^66}'.format(title))
print("Category".ljust(14), "TP".ljust(8), "FP".ljust(8), "FN".ljust(8),
"P".ljust(8), "R".ljust(8), "F"+str(beta))
for cat, cnts in sorted(best_cats.items()):
cat_p, cat_r, cat_f = computeFScore(cnts[0], cnts[1], cnts[2], beta)
print(cat.ljust(14), str(cnts[0]).ljust(8), str(cnts[1]).ljust(8),
str(cnts[2]).ljust(8), str(cat_p).ljust(8), str(cat_r).ljust(8), cat_f)
return list(computeFScore(best["tp"], best["fp"], best["fn"], beta))
def processCategories(cat_dict, setting):
# Otherwise, do some processing.
proc_cat_dict = {}
for cat, cnt in cat_dict.items():
if cat == "UNK":
proc_cat_dict[cat] = cnt
continue
# M, U, R or UNK combined only.
if setting == 1:
if cat[0] in proc_cat_dict.keys():
proc_cat_dict[cat[0]] = [x+y for x, y in zip(proc_cat_dict[cat[0]], cnt)]
else:
proc_cat_dict[cat[0]] = cnt
# Everything without M, U or R.
elif setting == 2:
if cat[2:] in proc_cat_dict.keys():
proc_cat_dict[cat[2:]] = [x+y for x, y in zip(proc_cat_dict[cat[2:]], cnt)]
else:
proc_cat_dict[cat[2:]] = cnt
# All error category combinations
else:
return cat_dict
return proc_cat_dict
def batch_multi_pre_rec_f1_errant(candidates, sources, system_edits, gold_edits, references, scorer, scorer_type,
max_unchanged_words=2, beta=0.5, ignore_whitespace_casing=False, verbose=False,
very_verbose=False):
assert len(candidates) == len(sources) == len(gold_edits)
stat_correct = 0.0
stat_proposed = 0.0
stat_gold = 0.0
i = 0
for candidate, source, refs, sys_set, golds_set in tqdm(zip(candidates, sources, references,
system_edits, gold_edits)):
i = i + 1
# Find measures maximizing current cumulative F1; local: curent annotator only
sqbeta = beta * beta
chosen_ann = -1
f1_max = -math.inf
argmax_correct = 0.0
argmax_proposed = 0.0
argmax_gold = 0.0
max_stat_correct = -math.inf
min_stat_proposed = math.inf
min_stat_gold = math.inf
for annotator, gold in golds_set.items():
editSeq = sys_set
correct = matchSeq(editSeq, gold, ignore_whitespace_casing, verbose)
#gold = [(g[0], g[1], g[2], g[-1][0]) for g in gold]
weight_edits = compute_weight_edits(editSeq, gold, source, candidate, refs[annotator], scorer_type, scorer)
# local cumulative counts, P, R and F1
stat_correct_local = stat_correct + sum(weight_edits[c] for c in correct)
stat_proposed_local = stat_proposed + sum(weight_edits[e] for e in editSeq)
stat_gold_local = stat_gold + sum(weight_edits[g] for g in gold)
p_local = comp_p(stat_correct_local, stat_proposed_local)
r_local = comp_r(stat_correct_local, stat_gold_local)
f1_local = comp_f1(p_local, r_local, beta)
if f1_max < f1_local or \
(f1_max == f1_local and max_stat_correct < stat_correct_local) or \
(
f1_max == f1_local and max_stat_correct == stat_correct_local and min_stat_proposed + sqbeta * min_stat_gold > stat_proposed_local + sqbeta * stat_gold_local):
chosen_ann = annotator
f1_max = f1_local
max_stat_correct = stat_correct_local
min_stat_proposed = stat_proposed_local
min_stat_gold = stat_gold_local
argmax_correct = sum(weight_edits[c] for c in correct)
argmax_proposed = sum(weight_edits[e] for e in editSeq)
argmax_gold = sum(weight_edits[g] for g in gold)
if verbose:
print(">> Chosen Annotator for line", i, ":", chosen_ann)
print("")
stat_correct += argmax_correct
stat_proposed += argmax_proposed
stat_gold += argmax_gold
if stat_proposed:
p = stat_correct / stat_proposed
else:
p = 1.0
if stat_gold:
r = stat_correct / stat_gold
else:
r = 1.0
if beta * beta * p + r:
f1 = (1.0 + beta * beta) * p * r / (beta * beta * p + r)
else:
f1 = 0.0
if verbose:
print("CORRECT EDITS :", int(stat_correct))
print("PROPOSED EDITS :", int(stat_proposed))
print("GOLD EDITS :", int(stat_gold))
print("P =", p)
print("R =", r)
print("F_%.1f =" % beta, f1)
return (p, r, f1)
def batch_multi_pre_rec_f1_sent_errant(candidates, sources, system_edits, gold_edits, references, scorer, scorer_type,
max_unchanged_words=2, beta=0.5, ignore_whitespace_casing=False, verbose=False,
very_verbose=False):
assert len(candidates) == len(sources) == len(gold_edits) == len(system_edits)
stat_correct = 0.0
stat_proposed = 0.0
stat_gold = 0.0
i = 0
for candidate, source, refs, editSeq, golds_set in zip(candidates, sources, references,
system_edits, gold_edits):
i = i + 1
# Find measures maximizing current cumulative F1; local: curent annotator only
sqbeta = beta * beta
chosen_ann = -1
f1_max = -math.inf
argmax_correct = 0.0
argmax_proposed = 0.0
argmax_gold = 0.0
max_stat_correct = -math.inf
min_stat_proposed = math.inf
min_stat_gold = math.inf
for annotator, gold in golds_set.items():
correct = matchSeq(editSeq, gold, ignore_whitespace_casing, verbose)
#gold = [(g[0], g[1], g[2], g[-1][0]) for g in gold]
weight_edits = compute_weight_edits(editSeq, gold, source, candidate, refs[annotator], scorer_type, scorer, sent_level=True)
# local cumulative counts, P, R and F1
stat_correct_local = stat_correct + sum(weight_edits[c] for c in correct)
stat_proposed_local = stat_proposed + sum(weight_edits[e] for e in editSeq)
stat_gold_local = stat_gold + sum(weight_edits[g] for g in gold)
p_local = comp_p(stat_correct_local, stat_proposed_local)
r_local = comp_r(stat_correct_local, stat_gold_local)
f1_local = comp_f1(p_local, r_local, beta)
if f1_max < f1_local or \
(f1_max == f1_local and max_stat_correct < stat_correct_local) or \
(
f1_max == f1_local and max_stat_correct == stat_correct_local and min_stat_proposed + sqbeta * min_stat_gold > stat_proposed_local + sqbeta * stat_gold_local):
chosen_ann = annotator
f1_max = f1_local
max_stat_correct = stat_correct_local
min_stat_proposed = stat_proposed_local
min_stat_gold = stat_gold_local
argmax_correct = sum(weight_edits[c] for c in correct)
argmax_proposed = sum(weight_edits[e] for e in editSeq)
argmax_gold = sum(weight_edits[g] for g in gold)
if verbose:
print(">> Chosen Annotator for line", i, ":", chosen_ann)
print("")
stat_correct += argmax_correct
stat_proposed += argmax_proposed
stat_gold += argmax_gold
if stat_proposed:
p = stat_correct / stat_proposed
else:
p = 1.0
if stat_gold:
r = stat_correct / stat_gold
else:
r = 1.0
if beta * beta * p + r:
f1 = (1.0 + beta * beta) * p * r / (beta * beta * p + r)
else:
f1 = 0.0
if verbose:
print("CORRECT EDITS :", int(stat_correct))
print("PROPOSED EDITS :", int(stat_proposed))
print("GOLD EDITS :", int(stat_gold))
print("P =", p)
print("R =", r)
print("F_%.1f =" % beta, f1)
return (p, r, f1) | 22,995 | 39.062718 | 187 | py |
PT-M2 | PT-M2-main/evaluate.py | import argparse
import torch
import os
from utils import load_file, load_dir, write_to_csv
from metrics import PTM2
def main():
parser = argparse.ArgumentParser("PT-M2")
parser.add_argument("--source", type=str, default="source file path")
parser.add_argument("--reference", type=str, default="reference file path")
parser.add_argument("--hypothesis", type=str, default="hypothesis file path")
parser.add_argument("--output", type=str, default="output file path")
parser.add_argument("--base", choices=["m2", "sentm2", "errant", "senterrant"], default="m2", type=str)
parser.add_argument("--scorer", choices=["self", "bertscore", "bartscore"],
default="self", type=str, help="choose the plm scorer type")
parser.add_argument("--model_type", type=str, help="choose the plm type", default="bert-base-uncased")
parser.add_argument("--beta", default=0.5, type=float)
args = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args.device = device
print(args)
sources = load_file(args.source)
references = load_dir(args.reference)
m2_file = f"{args.reference}.m2"
metric = PTM2(args, corpus=None)
if args.base == "m2":
score = metric.compute_m2(m2_file=m2_file, hyp_file=args.hypothesis, sources=sources, references=references)
elif args.base == "sentm2":
score = metric.compute_sentm2(m2_file=m2_file, hyp_file=args.hypothesis, sources=sources, references=references)
elif args.base == "errant":
score = metric.compute_errant(m2_file=m2_file, hyp_file=args.hypothesis, sources=sources, references=references)
elif args.base == "senterrant":
score = metric.compute_senterrant(m2_file=m2_file, hyp_file=args.hypothesis, sources=sources, references=references)
print(f"base={args.base}, scorer={args.scorer}, model_type={args.model_type}, score={score:.4f}")
with open(args.output, "w", encoding="utf8") as fw:
fw.write(f"base={args.base}, scorer={args.scorer}, model_type={args.model_type}, score={score}")
if __name__ == "__main__":
main()
| 2,143 | 43.666667 | 124 | py |
PT-M2 | PT-M2-main/utils.py | import os
import sys
import csv
import random
import numpy as np
import torch
sys.path.append("m2scorer")
def load_file(src_file):
sources = []
with open(src_file, "r", encoding="utf8") as fr:
for line in fr:
sources.append(line.strip("\n"))
return sources
def load_dir(ref_dir):
references = {}
for f_n in os.listdir(ref_dir):
n = int(f_n[3:])
ref_file = os.path.join(ref_dir, f_n)
with open(ref_file, "r", encoding="utf8") as fr:
for i, line in enumerate(fr):
if i not in references:
references[i] = {}
references[i][n] = line.strip("\n")
references = [v for v in references.values()]
return references
def write_to_csv(f_n, datas):
with open(f_n, 'w', encoding='utf-8', newline='') as f:
write = csv.writer(f, delimiter="\t")
for data in datas:
write.writerow(data)
| 945 | 23.25641 | 59 | py |
PT-M2 | PT-M2-main/bart_score.py | # %%
import torch
import torch.nn as nn
import traceback
from transformers import BartTokenizer, BartForConditionalGeneration
from typing import List
import numpy as np
class BARTScorer:
def __init__(self, device='cuda:0', max_length=1024, checkpoint='facebook/bart-large-cnn'):
# Set up model
self.device = device
self.max_length = max_length
self.tokenizer = BartTokenizer.from_pretrained(checkpoint)
self.model = BartForConditionalGeneration.from_pretrained(checkpoint)
self.model.eval()
self.model.to(device)
# Set up loss
self.loss_fct = nn.NLLLoss(reduction='none', ignore_index=self.model.config.pad_token_id)
self.lsm = nn.LogSoftmax(dim=1)
def load(self, path=None):
""" Load model from paraphrase finetuning """
if path is None:
path = 'models/bart.pth'
self.model.load_state_dict(torch.load(path, map_location=self.device))
def score(self, srcs, tgts, batch_size=4):
""" Score a batch of examples """
score_list = []
for i in range(0, len(srcs), batch_size):
src_list = srcs[i: i + batch_size]
tgt_list = tgts[i: i + batch_size]
try:
with torch.no_grad():
encoded_src = self.tokenizer(
src_list,
max_length=self.max_length,
truncation=True,
padding=True,
return_tensors='pt'
)
encoded_tgt = self.tokenizer(
tgt_list,
max_length=self.max_length,
truncation=True,
padding=True,
return_tensors='pt'
)
src_tokens = encoded_src['input_ids'].to(self.device)
src_mask = encoded_src['attention_mask'].to(self.device)
tgt_tokens = encoded_tgt['input_ids'].to(self.device)
tgt_mask = encoded_tgt['attention_mask']
tgt_len = tgt_mask.sum(dim=1).to(self.device)
output = self.model(
input_ids=src_tokens,
attention_mask=src_mask,
labels=tgt_tokens
)
logits = output.logits.view(-1, self.model.config.vocab_size)
loss = self.loss_fct(self.lsm(logits), tgt_tokens.view(-1))
loss = loss.view(tgt_tokens.shape[0], -1)
loss = loss.sum(dim=1) / tgt_len
curr_score_list = [-x.item() for x in loss]
score_list += curr_score_list
except RuntimeError:
traceback.print_exc()
print(f'source: {src_list}')
print(f'target: {tgt_list}')
exit(0)
return score_list
def multi_ref_score(self, srcs, tgts: List[List[str]], agg="mean", batch_size=4):
# Assert we have the same number of references
ref_nums = [len(x) for x in tgts]
if len(set(ref_nums)) > 1:
raise Exception("You have different number of references per test sample.")
ref_num = len(tgts[0])
score_matrix = []
for i in range(ref_num):
curr_tgts = [x[i] for x in tgts]
scores = self.score(srcs, curr_tgts, batch_size)
score_matrix.append(scores)
if agg == "mean":
score_list = np.mean(score_matrix, axis=0)
elif agg == "max":
score_list = np.max(score_matrix, axis=0)
else:
raise NotImplementedError
return list(score_list)
def test(self, batch_size=3):
""" Test """
src_list = [
'This is a very good idea. Although simple, but very insightful.',
'Can I take a look?',
'Do not trust him, he is a liar.'
]
tgt_list = [
"That's stupid.",
"What's the problem?",
'He is trustworthy.'
]
print(self.score(src_list, tgt_list, batch_size)) | 4,219 | 36.678571 | 97 | py |
PT-M2 | PT-M2-main/metrics.py | from tqdm import tqdm
import numpy as np
import sys
sys.path.append("m2score")
from m2score.m2scorer import load_annotation
from m2score.util import smart_open
from m2score.levenshtein import batch_multi_pre_rec_f1, batch_multi_pre_rec_f1_sent
from errant_score import batch_multi_pre_rec_f1_errant, batch_multi_pre_rec_f1_sent_errant, errant_load_annotation
from bart_score import BARTScorer
from bert_score import BERTScorer
class PTM2:
def __init__(self, args, corpus=None):
self.args = args
self.beta = args.beta
self.device = args.device
self.model_type = args.model_type
self.corpus = corpus
self.scorer = self.get_plm_scorer(corpus)
def compute_sentm2(self, m2_file, hyp_file, sources, references):
_, gold_edits = load_annotation(m2_file)
fin = smart_open(hyp_file, 'r')
system_sentences = [line.strip() for line in fin.readlines()]
fin.close()
score_lst = []
for hyp, src, refs, golds in tqdm(zip(system_sentences, sources, references, gold_edits)):
f1 = batch_multi_pre_rec_f1_sent(candidates=[hyp], sources=[src], gold_edits=[golds],
references=[refs], scorer=self.scorer, scorer_type=self.args.scorer, beta=self.beta)[-1]
score_lst.append(f1)
return sum(np.array(score_lst)) / len(system_sentences)
def compute_m2(self, m2_file, hyp_file, sources, references):
_, gold_edits = load_annotation(m2_file)
fin = smart_open(hyp_file, 'r')
system_sentences = [line.strip() for line in fin.readlines()]
fin.close()
score = batch_multi_pre_rec_f1(candidates=system_sentences, sources=sources, gold_edits=gold_edits,
references=references, scorer=self.scorer, scorer_type=self.args.scorer, beta=self.beta)[-1]
return score
def compute_senterrant(self, m2_file, hyp_file, sources, references):
sys_file = f"{hyp_file}.m2"
_, gold_edits, sys_edits = errant_load_annotation(sys_file, m2_file)
fin = smart_open(hyp_file, 'r')
system_sentences = [line.strip() for line in fin.readlines()]
fin.close()
score_lst = []
for hyp, src, refs, sys, golds in tqdm(
zip(system_sentences, sources, references, sys_edits, gold_edits)):
f1 = batch_multi_pre_rec_f1_sent_errant(candidates=[hyp], sources=[src], system_edits=[sys], gold_edits=[golds],
references=[refs], scorer=self.scorer, scorer_type=self.args.scorer, beta=self.beta)[-1]
score_lst.append(f1)
return sum(np.array(score_lst)) / len(system_sentences)
def compute_errant(self, m2_file, hyp_file, sources, references):
sys_file = f"{hyp_file}.m2"
_, gold_edits, sys_edits = errant_load_annotation(sys_file, m2_file)
fin = smart_open(hyp_file, 'r')
system_sentences = [line.strip() for line in fin.readlines()]
fin.close()
score = \
batch_multi_pre_rec_f1_errant(candidates=system_sentences, sources=sources, system_edits=sys_edits, gold_edits=gold_edits,
references=references, scorer=self.scorer, scorer_type=self.args.scorer, beta=self.beta)[-1]
return score
def get_plm_scorer(self, corpus=None):
scorer = None
if self.args.scorer == "bertscore":
if corpus:
scorer = BERTScorer(device=self.device, model_type=self.model_type,
lang="en", rescale_with_baseline=True,
idf=True, idf_sents=corpus)
else:
scorer = BERTScorer(device=self.device, model_type=self.model_type,
lang="en", rescale_with_baseline=True)
elif self.args.scorer == "bartscore":
scorer = BARTScorer(device=self.device, checkpoint=f"facebook/{self.model_type}")
return scorer | 4,065 | 44.177778 | 140 | py |
PT-M2 | PT-M2-main/bert_score/score.py | import os
import sys
import time
import pathlib
import torch
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import numpy as np
import pandas as pd
from collections import defaultdict
from transformers import AutoTokenizer
from .utils import (
get_model,
get_tokenizer,
get_idf_dict,
bert_cos_score_idf,
get_bert_embedding,
lang2model,
model2layers,
get_hash,
cache_scibert,
sent_encode,
)
__all__ = ["score", "plot_example"]
def score(
cands,
refs,
model_type=None,
num_layers=None,
verbose=False,
idf=False,
device=None,
batch_size=64,
nthreads=4,
all_layers=False,
lang=None,
return_hash=False,
rescale_with_baseline=False,
baseline_path=None,
use_fast_tokenizer=False
):
"""
BERTScore metric.
Args:
- :param: `cands` (list of str): candidate sentences
- :param: `refs` (list of str or list of list of str): reference sentences
- :param: `model_type` (str): bert specification, default using the suggested
model for the target langauge; has to specify at least one of
`model_type` or `lang`
- :param: `num_layers` (int): the layer of representation to use.
default using the number of layer tuned on WMT16 correlation data
- :param: `verbose` (bool): turn on intermediate status update
- :param: `idf` (bool or dict): use idf weighting, can also be a precomputed idf_dict
- :param: `device` (str): on which the contextual embedding model will be allocated on.
If this argument is None, the model lives on cuda:0 if cuda is available.
- :param: `nthreads` (int): number of threads
- :param: `batch_size` (int): bert score processing batch size
- :param: `lang` (str): language of the sentences; has to specify
at least one of `model_type` or `lang`. `lang` needs to be
specified when `rescale_with_baseline` is True.
- :param: `return_hash` (bool): return hash code of the setting
- :param: `rescale_with_baseline` (bool): rescale bertscore with pre-computed baseline
- :param: `baseline_path` (str): customized baseline file
- :param: `use_fast_tokenizer` (bool): `use_fast` parameter passed to HF tokenizer
Return:
- :param: `(P, R, F)`: each is of shape (N); N = number of input
candidate reference pairs. if returning hashcode, the
output will be ((P, R, F), hashcode). If a candidate have
multiple references, the returned score of this candidate is
the *best* score among all references.
"""
assert len(cands) == len(refs), "Different number of candidates and references"
assert lang is not None or model_type is not None, "Either lang or model_type should be specified"
ref_group_boundaries = None
if not isinstance(refs[0], str):
ref_group_boundaries = []
ori_cands, ori_refs = cands, refs
cands, refs = [], []
count = 0
for cand, ref_group in zip(ori_cands, ori_refs):
cands += [cand] * len(ref_group)
refs += ref_group
ref_group_boundaries.append((count, count + len(ref_group)))
count += len(ref_group)
if rescale_with_baseline:
assert lang is not None, "Need to specify Language when rescaling with baseline"
if model_type is None:
lang = lang.lower()
model_type = lang2model[lang]
if num_layers is None:
num_layers = model2layers[model_type]
tokenizer = get_tokenizer(model_type, use_fast_tokenizer)
model = get_model(model_type, num_layers, all_layers)
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
model.to(device)
if not idf:
idf_dict = defaultdict(lambda: 1.0)
# set idf for [SEP] and [CLS] to 0
idf_dict[tokenizer.sep_token_id] = 0
idf_dict[tokenizer.cls_token_id] = 0
elif isinstance(idf, dict):
if verbose:
print("using predefined IDF dict...")
idf_dict = idf
else:
if verbose:
print("preparing IDF dict...")
start = time.perf_counter()
idf_dict = get_idf_dict(refs, tokenizer, nthreads=nthreads)
if verbose:
print("done in {:.2f} seconds".format(time.perf_counter() - start))
if verbose:
print("calculating scores...")
start = time.perf_counter()
all_preds = bert_cos_score_idf(
model,
refs,
cands,
tokenizer,
idf_dict,
verbose=verbose,
device=device,
batch_size=batch_size,
all_layers=all_layers,
).cpu()
if ref_group_boundaries is not None:
max_preds = []
for beg, end in ref_group_boundaries:
max_preds.append(all_preds[beg:end].max(dim=0)[0])
all_preds = torch.stack(max_preds, dim=0)
use_custom_baseline = baseline_path is not None
if rescale_with_baseline:
if baseline_path is None:
baseline_path = os.path.join(os.path.dirname(__file__), f"rescale_baseline/{lang}/{model_type}.tsv")
if os.path.isfile(baseline_path):
if not all_layers:
baselines = torch.from_numpy(pd.read_csv(baseline_path).iloc[num_layers].to_numpy())[1:].float()
else:
baselines = torch.from_numpy(pd.read_csv(baseline_path).to_numpy())[:, 1:].unsqueeze(1).float()
all_preds = (all_preds - baselines) / (1 - baselines)
else:
print(
f"Warning: Baseline not Found for {model_type} on {lang} at {baseline_path}", file=sys.stderr,
)
out = all_preds[..., 0], all_preds[..., 1], all_preds[..., 2] # P, R, F
if verbose:
time_diff = time.perf_counter() - start
print(f"done in {time_diff:.2f} seconds, {len(refs) / time_diff:.2f} sentences/sec")
if return_hash:
return tuple(
[
out,
get_hash(model_type, num_layers, idf, rescale_with_baseline,
use_custom_baseline=use_custom_baseline,
use_fast_tokenizer=use_fast_tokenizer),
]
)
return out
def plot_example(
candidate,
reference,
model_type=None,
num_layers=None,
lang=None,
rescale_with_baseline=False,
baseline_path=None,
use_fast_tokenizer=False,
fname="",
):
"""
BERTScore metric.
Args:
- :param: `candidate` (str): a candidate sentence
- :param: `reference` (str): a reference sentence
- :param: `verbose` (bool): turn on intermediate status update
- :param: `model_type` (str): bert specification, default using the suggested
model for the target langauge; has to specify at least one of
`model_type` or `lang`
- :param: `num_layers` (int): the layer of representation to use
- :param: `lang` (str): language of the sentences; has to specify
at least one of `model_type` or `lang`. `lang` needs to be
specified when `rescale_with_baseline` is True.
- :param: `return_hash` (bool): return hash code of the setting
- :param: `rescale_with_baseline` (bool): rescale bertscore with pre-computed baseline
- :param: `use_fast_tokenizer` (bool): `use_fast` parameter passed to HF tokenizer
- :param: `fname` (str): path to save the output plot
"""
assert isinstance(candidate, str)
assert isinstance(reference, str)
assert lang is not None or model_type is not None, "Either lang or model_type should be specified"
if rescale_with_baseline:
assert lang is not None, "Need to specify Language when rescaling with baseline"
if model_type is None:
lang = lang.lower()
model_type = lang2model[lang]
if num_layers is None:
num_layers = model2layers[model_type]
tokenizer = get_tokenizer(model_type, use_fast_tokenizer)
model = get_model(model_type, num_layers)
device = "cuda" if torch.cuda.is_available() else "cpu"
model.to(device)
idf_dict = defaultdict(lambda: 1.0)
# set idf for [SEP] and [CLS] to 0
idf_dict[tokenizer.sep_token_id] = 0
idf_dict[tokenizer.cls_token_id] = 0
hyp_embedding, masks, padded_idf = get_bert_embedding(
[candidate], model, tokenizer, idf_dict, device=device, all_layers=False
)
ref_embedding, masks, padded_idf = get_bert_embedding(
[reference], model, tokenizer, idf_dict, device=device, all_layers=False
)
ref_embedding.div_(torch.norm(ref_embedding, dim=-1).unsqueeze(-1))
hyp_embedding.div_(torch.norm(hyp_embedding, dim=-1).unsqueeze(-1))
sim = torch.bmm(hyp_embedding, ref_embedding.transpose(1, 2))
sim = sim.squeeze(0).cpu()
# remove [CLS] and [SEP] tokens
r_tokens = [tokenizer.decode([i]) for i in sent_encode(tokenizer, reference)][1:-1]
h_tokens = [tokenizer.decode([i]) for i in sent_encode(tokenizer, candidate)][1:-1]
sim = sim[1:-1, 1:-1]
if rescale_with_baseline:
if baseline_path is None:
baseline_path = os.path.join(os.path.dirname(__file__), f"rescale_baseline/{lang}/{model_type}.tsv")
if os.path.isfile(baseline_path):
baselines = torch.from_numpy(pd.read_csv(baseline_path).iloc[num_layers].to_numpy())[1:].float()
sim = (sim - baselines[2].item()) / (1 - baselines[2].item())
else:
print(
f"Warning: Baseline not Found for {model_type} on {lang} at {baseline_path}", file=sys.stderr,
)
fig, ax = plt.subplots(figsize=(len(r_tokens), len(h_tokens)))
im = ax.imshow(sim, cmap="Blues", vmin=0, vmax=1)
# We want to show all ticks...
ax.set_xticks(np.arange(len(r_tokens)))
ax.set_yticks(np.arange(len(h_tokens)))
# ... and label them with the respective list entries
ax.set_xticklabels(r_tokens, fontsize=10)
ax.set_yticklabels(h_tokens, fontsize=10)
ax.grid(False)
plt.xlabel("Reference (tokenized)", fontsize=14)
plt.ylabel("Candidate (tokenized)", fontsize=14)
title = "Similarity Matrix"
if rescale_with_baseline:
title += " (after Rescaling)"
plt.title(title, fontsize=14)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="2%", pad=0.2)
fig.colorbar(im, cax=cax)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
for i in range(len(h_tokens)):
for j in range(len(r_tokens)):
text = ax.text(
j,
i,
"{:.3f}".format(sim[i, j].item()),
ha="center",
va="center",
color="k" if sim[i, j].item() < 0.5 else "w",
)
fig.tight_layout()
if fname != "":
plt.savefig(fname, dpi=100)
print("Saved figure to file: ", fname)
plt.show()
| 11,254 | 35.781046 | 112 | py |
PT-M2 | PT-M2-main/bert_score/scorer.py | import os
import sys
import time
import pathlib
import torch
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import numpy as np
import pandas as pd
import warnings
from collections import defaultdict
from transformers import AutoTokenizer
from .utils import (
get_model,
get_tokenizer,
get_idf_dict,
bert_cos_score_idf,
get_bert_embedding,
lang2model,
model2layers,
get_hash,
cache_scibert,
sent_encode,
)
class BERTScorer:
"""
BERTScore Scorer Object.
"""
def __init__(
self,
model_type=None,
num_layers=None,
batch_size=64,
nthreads=4,
all_layers=False,
idf=False,
idf_sents=None,
device=None,
lang=None,
rescale_with_baseline=False,
baseline_path=None,
use_fast_tokenizer=False
):
"""
Args:
- :param: `model_type` (str): contexual embedding model specification, default using the suggested
model for the target langauge; has to specify at least one of
`model_type` or `lang`
- :param: `num_layers` (int): the layer of representation to use.
default using the number of layer tuned on WMT16 correlation data
- :param: `verbose` (bool): turn on intermediate status update
- :param: `idf` (bool): a booling to specify whether to use idf or not (this should be True even if `idf_sents` is given)
- :param: `idf_sents` (List of str): list of sentences used to compute the idf weights
- :param: `device` (str): on which the contextual embedding model will be allocated on.
If this argument is None, the model lives on cuda:0 if cuda is available.
- :param: `batch_size` (int): bert score processing batch size
- :param: `nthreads` (int): number of threads
- :param: `lang` (str): language of the sentences; has to specify
at least one of `model_type` or `lang`. `lang` needs to be
specified when `rescale_with_baseline` is True.
- :param: `return_hash` (bool): return hash code of the setting
- :param: `rescale_with_baseline` (bool): rescale bertscore with pre-computed baseline
- :param: `baseline_path` (str): customized baseline file
- :param: `use_fast_tokenizer` (bool): `use_fast` parameter passed to HF tokenizer
"""
assert lang is not None or model_type is not None, "Either lang or model_type should be specified"
if rescale_with_baseline:
assert lang is not None, "Need to specify Language when rescaling with baseline"
if device is None:
self.device = "cuda" if torch.cuda.is_available() else "cpu"
else:
self.device = device
self._lang = lang
self._rescale_with_baseline = rescale_with_baseline
self._idf = idf
self.batch_size = batch_size
self.nthreads = nthreads
self.all_layers = all_layers
if model_type is None:
lang = lang.lower()
self._model_type = lang2model[lang]
else:
self._model_type = model_type
if num_layers is None:
self._num_layers = model2layers[self.model_type]
else:
self._num_layers = num_layers
# Building model and tokenizer
self._use_fast_tokenizer = use_fast_tokenizer
self._tokenizer = get_tokenizer(self.model_type, self._use_fast_tokenizer)
self._model = get_model(self.model_type, self.num_layers, self.all_layers)
self._model.to(self.device)
self._idf_dict = None
if idf_sents is not None:
self.compute_idf(idf_sents)
self._baseline_vals = None
self.baseline_path = baseline_path
self.use_custom_baseline = self.baseline_path is not None
if self.baseline_path is None:
self.baseline_path = os.path.join(
os.path.dirname(__file__), f"rescale_baseline/{self.lang}/{self.model_type}.tsv"
)
@property
def lang(self):
return self._lang
@property
def idf(self):
return self._idf
@property
def model_type(self):
return self._model_type
@property
def num_layers(self):
return self._num_layers
@property
def rescale_with_baseline(self):
return self._rescale_with_baseline
@property
def baseline_vals(self):
if self._baseline_vals is None:
if os.path.isfile(self.baseline_path):
if not self.all_layers:
self._baseline_vals = torch.from_numpy(
pd.read_csv(self.baseline_path).iloc[self.num_layers].to_numpy()
)[1:].float()
else:
self._baseline_vals = (
torch.from_numpy(pd.read_csv(self.baseline_path).to_numpy())[:, 1:].unsqueeze(1).float()
)
else:
raise ValueError(f"Baseline not Found for {self.model_type} on {self.lang} at {self.baseline_path}")
return self._baseline_vals
@property
def use_fast_tokenizer(self):
return self._use_fast_tokenizer
@property
def hash(self):
return get_hash(
self.model_type, self.num_layers, self.idf, self.rescale_with_baseline, self.use_custom_baseline, self.use_fast_tokenizer
)
def compute_idf(self, sents):
"""
Args:
"""
if self._idf_dict is not None:
warnings.warn("Overwriting the previous importance weights.")
self._idf_dict = get_idf_dict(sents, self._tokenizer, nthreads=self.nthreads)
def score(self, cands, refs, verbose=False, batch_size=64, return_hash=False):
"""
Args:
- :param: `cands` (list of str): candidate sentences
- :param: `refs` (list of str or list of list of str): reference sentences
Return:
- :param: `(P, R, F)`: each is of shape (N); N = number of input
candidate reference pairs. if returning hashcode, the
output will be ((P, R, F), hashcode). If a candidate have
multiple references, the returned score of this candidate is
the *best* score among all references.
"""
ref_group_boundaries = None
if not isinstance(refs[0], str):
ref_group_boundaries = []
ori_cands, ori_refs = cands, refs
cands, refs = [], []
count = 0
for cand, ref_group in zip(ori_cands, ori_refs):
cands += [cand] * len(ref_group)
refs += ref_group
ref_group_boundaries.append((count, count + len(ref_group)))
count += len(ref_group)
if verbose:
print("calculating scores...")
start = time.perf_counter()
if self.idf:
assert self._idf_dict, "IDF weights are not computed"
idf_dict = self._idf_dict
else:
idf_dict = defaultdict(lambda: 1.0)
idf_dict[self._tokenizer.sep_token_id] = 0
idf_dict[self._tokenizer.cls_token_id] = 0
all_preds = bert_cos_score_idf(
self._model,
refs,
cands,
self._tokenizer,
idf_dict,
verbose=verbose,
device=self.device,
batch_size=batch_size,
all_layers=self.all_layers,
).cpu()
if ref_group_boundaries is not None:
max_preds = []
for start, end in ref_group_boundaries:
max_preds.append(all_preds[start:end].max(dim=0)[0])
all_preds = torch.stack(max_preds, dim=0)
if self.rescale_with_baseline:
all_preds = (all_preds - self.baseline_vals) / (1 - self.baseline_vals)
out = all_preds[..., 0], all_preds[..., 1], all_preds[..., 2] # P, R, F
if verbose:
time_diff = time.perf_counter() - start
print(f"done in {time_diff:.2f} seconds, {len(refs) / time_diff:.2f} sentences/sec")
if return_hash:
out = tuple([out, self.hash])
return out
def plot_example(self, candidate, reference, fname=""):
"""
Args:
- :param: `candidate` (str): a candidate sentence
- :param: `reference` (str): a reference sentence
- :param: `fname` (str): path to save the output plot
"""
assert isinstance(candidate, str)
assert isinstance(reference, str)
idf_dict = defaultdict(lambda: 1.0)
idf_dict[self._tokenizer.sep_token_id] = 0
idf_dict[self._tokenizer.cls_token_id] = 0
hyp_embedding, masks, padded_idf = get_bert_embedding(
[candidate], self._model, self._tokenizer, idf_dict, device=self.device, all_layers=False,
)
ref_embedding, masks, padded_idf = get_bert_embedding(
[reference], self._model, self._tokenizer, idf_dict, device=self.device, all_layers=False,
)
ref_embedding.div_(torch.norm(ref_embedding, dim=-1).unsqueeze(-1))
hyp_embedding.div_(torch.norm(hyp_embedding, dim=-1).unsqueeze(-1))
sim = torch.bmm(hyp_embedding, ref_embedding.transpose(1, 2))
sim = sim.squeeze(0).cpu()
r_tokens = [self._tokenizer.decode([i]) for i in sent_encode(self._tokenizer, reference)][1:-1]
h_tokens = [self._tokenizer.decode([i]) for i in sent_encode(self._tokenizer, candidate)][1:-1]
sim = sim[1:-1, 1:-1]
if self.rescale_with_baseline:
sim = (sim - self.baseline_vals[2].item()) / (1 - self.baseline_vals[2].item())
fig, ax = plt.subplots(figsize=(len(r_tokens), len(h_tokens)))
im = ax.imshow(sim, cmap="Blues", vmin=0, vmax=1)
# We want to show all ticks...
ax.set_xticks(np.arange(len(r_tokens)))
ax.set_yticks(np.arange(len(h_tokens)))
# ... and label them with the respective list entries
ax.set_xticklabels(r_tokens, fontsize=10)
ax.set_yticklabels(h_tokens, fontsize=10)
ax.grid(False)
plt.xlabel("Reference (tokenized)", fontsize=14)
plt.ylabel("Candidate (tokenized)", fontsize=14)
title = "Similarity Matrix"
if self.rescale_with_baseline:
title += " (after Rescaling)"
plt.title(title, fontsize=14)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="2%", pad=0.2)
fig.colorbar(im, cax=cax)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
for i in range(len(h_tokens)):
for j in range(len(r_tokens)):
text = ax.text(
j,
i,
"{:.3f}".format(sim[i, j].item()),
ha="center",
va="center",
color="k" if sim[i, j].item() < 0.5 else "w",
)
fig.tight_layout()
if fname != "":
plt.savefig(fname, dpi=100)
print("Saved figure to file: ", fname)
plt.show()
def __repr__(self):
return f"{self.__class__.__name__}(hash={self.hash}, batch_size={self.batch_size}, nthreads={self.nthreads})"
def __str__(self):
return self.__repr__()
| 11,730 | 35.095385 | 133 | py |
PT-M2 | PT-M2-main/bert_score/utils.py | import sys
import os
import torch
from math import log
from itertools import chain
from collections import defaultdict, Counter
from multiprocessing import Pool
from functools import partial
from tqdm.auto import tqdm
from torch.nn.utils.rnn import pad_sequence
from distutils.version import LooseVersion
from transformers import BertConfig, XLNetConfig, XLMConfig, RobertaConfig
from transformers import AutoModel, GPT2Tokenizer, AutoTokenizer
from . import __version__
from transformers import __version__ as trans_version
__all__ = []
SCIBERT_URL_DICT = {
"scibert-scivocab-uncased": "https://s3-us-west-2.amazonaws.com/ai2-s2-research/scibert/pytorch_models/scibert_scivocab_uncased.tar", # recommend by the SciBERT authors
"scibert-scivocab-cased": "https://s3-us-west-2.amazonaws.com/ai2-s2-research/scibert/pytorch_models/scibert_scivocab_cased.tar",
"scibert-basevocab-uncased": "https://s3-us-west-2.amazonaws.com/ai2-s2-research/scibert/pytorch_models/scibert_basevocab_uncased.tar",
"scibert-basevocab-cased": "https://s3-us-west-2.amazonaws.com/ai2-s2-research/scibert/pytorch_models/scibert_basevocab_cased.tar",
}
lang2model = defaultdict(lambda: "bert-base-multilingual-cased")
lang2model.update(
{
"en": "roberta-large",
"zh": "bert-base-chinese",
"tr": "dbmdz/bert-base-turkish-cased",
"en-sci": "allenai/scibert_scivocab_uncased",
}
)
model2layers = {
"bert-base-uncased": 9, # 0.6925188074454226
"bert-large-uncased": 18, # 0.7210358126642836
"bert-base-cased-finetuned-mrpc": 9, # 0.6721947475618048
"bert-base-multilingual-cased": 9, # 0.6680687802637132
"bert-base-chinese": 8,
"roberta-base": 10, # 0.706288719158983
"roberta-large": 17, # 0.7385974720781534
"roberta-large-mnli": 19, # 0.7535618640417984
"roberta-base-openai-detector": 7, # 0.7048158349432633
"roberta-large-openai-detector": 15, # 0.7462770207355116
"xlnet-base-cased": 5, # 0.6630103662114238
"xlnet-large-cased": 7, # 0.6598800720297179
"xlm-mlm-en-2048": 6, # 0.651262570131464
"xlm-mlm-100-1280": 10, # 0.6475166424401905
# "scibert-scivocab-uncased": 8, # 0.6590354319927313
# "scibert-scivocab-cased": 9, # 0.6536375053937445
# "scibert-basevocab-uncased": 9, # 0.6748944832703548
# "scibert-basevocab-cased": 9, # 0.6524624150542374
'allenai/scibert_scivocab_uncased': 8, # 0.6590354393124127
'allenai/scibert_scivocab_cased': 9, # 0.6536374902465466
'nfliu/scibert_basevocab_uncased': 9, # 0.6748945076082333
"distilroberta-base": 5, # 0.6797558139322964
"distilbert-base-uncased": 5, # 0.6756659152782033
"distilbert-base-uncased-distilled-squad": 4, # 0.6718318036382493
"distilbert-base-multilingual-cased": 5, # 0.6178131050889238
"albert-base-v1": 10, # 0.654237567249745
"albert-large-v1": 17, # 0.6755890754323239
"albert-xlarge-v1": 16, # 0.7031844211905911
"albert-xxlarge-v1": 8, # 0.7508642218461096
"albert-base-v2": 9, # 0.6682455591837927
"albert-large-v2": 14, # 0.7008537594374035
"albert-xlarge-v2": 13, # 0.7317228357869254
"albert-xxlarge-v2": 8, # 0.7505160257184014
"xlm-roberta-base": 9, # 0.6506799445871697
"xlm-roberta-large": 17, # 0.6941551437476826
"google/electra-small-generator": 9, # 0.6659421842117754
"google/electra-small-discriminator": 11, # 0.6534639151385759
"google/electra-base-generator": 10, # 0.6730033453857188
"google/electra-base-discriminator": 9, # 0.7032089590812965
"google/electra-large-generator": 18, # 0.6813370013104459
"google/electra-large-discriminator": 14, # 0.6896675824733477
"google/bert_uncased_L-2_H-128_A-2": 1, # 0.5887998733228855
"google/bert_uncased_L-2_H-256_A-4": 1, # 0.6114863547661203
"google/bert_uncased_L-2_H-512_A-8": 1, # 0.6177345529192847
"google/bert_uncased_L-2_H-768_A-12": 2, # 0.6191261237956839
"google/bert_uncased_L-4_H-128_A-2": 3, # 0.6076202863798991
"google/bert_uncased_L-4_H-256_A-4": 3, # 0.6205239036810148
"google/bert_uncased_L-4_H-512_A-8": 3, # 0.6375351621856903
"google/bert_uncased_L-4_H-768_A-12": 3, # 0.6561849979644787
"google/bert_uncased_L-6_H-128_A-2": 5, # 0.6200458425360283
"google/bert_uncased_L-6_H-256_A-4": 5, # 0.6277501629539081
"google/bert_uncased_L-6_H-512_A-8": 5, # 0.641952305130849
"google/bert_uncased_L-6_H-768_A-12": 5, # 0.6762186226247106
"google/bert_uncased_L-8_H-128_A-2": 7, # 0.6186876506711779
"google/bert_uncased_L-8_H-256_A-4": 7, # 0.6447993208267708
"google/bert_uncased_L-8_H-512_A-8": 6, # 0.6489729408169956
"google/bert_uncased_L-8_H-768_A-12": 7, # 0.6705203359541737
"google/bert_uncased_L-10_H-128_A-2": 8, # 0.6126762064125278
"google/bert_uncased_L-10_H-256_A-4": 8, # 0.6376350032576573
"google/bert_uncased_L-10_H-512_A-8": 9, # 0.6579006292799915
"google/bert_uncased_L-10_H-768_A-12": 8, # 0.6861146692220176
"google/bert_uncased_L-12_H-128_A-2": 10, # 0.6184105693383591
"google/bert_uncased_L-12_H-256_A-4": 11, # 0.6374004994430261
"google/bert_uncased_L-12_H-512_A-8": 10, # 0.65880012149526
"google/bert_uncased_L-12_H-768_A-12": 9, # 0.675911357700092
"amazon/bort": 0, # 0.41927911053036643
"facebook/bart-base": 6, # 0.7122259132414092
"facebook/bart-large": 10, # 0.7448671872459683
"facebook/bart-large-cnn": 10, # 0.7393148105835096
"facebook/bart-large-mnli": 11, # 0.7531665445691358
"facebook/bart-large-xsum": 9, # 0.7496408866539556
"t5-small": 6, # 0.6813843919496912
"t5-base": 11, # 0.7096044814981418
"t5-large": 23, # 0.7244153820191929
"vinai/bertweet-base": 9, # 0.6529471006118857
"microsoft/deberta-base": 9, # 0.7088459455930344
"microsoft/deberta-base-mnli": 9, # 0.7395257063907247
"microsoft/deberta-large": 16, # 0.7511806792052013
"microsoft/deberta-large-mnli": 18, # 0.7736263649679905
"microsoft/deberta-xlarge": 18, # 0.7568670944373346
"microsoft/deberta-xlarge-mnli": 40, # 0.7780600929333213
"YituTech/conv-bert-base": 10, # 0.7058253551080789
"YituTech/conv-bert-small": 10, # 0.6544473011107349
"YituTech/conv-bert-medium-small": 9, # 0.6590097075123257
"microsoft/mpnet-base": 8, # 0.724976539498804
"squeezebert/squeezebert-uncased": 9, # 0.6543868703018726
"squeezebert/squeezebert-mnli": 9, # 0.6654799051284791
"squeezebert/squeezebert-mnli-headless": 9, # 0.6654799051284791
"tuner007/pegasus_paraphrase": 15, # 0.7188349436772694
"google/pegasus-large": 8, # 0.63960462272448
"google/pegasus-xsum": 11, # 0.6836878575233349
"sshleifer/tiny-mbart": 2, # 0.028246072231946733
"facebook/mbart-large-cc25": 12, # 0.6582922975802958
"facebook/mbart-large-50": 12, # 0.6464972230103133
"facebook/mbart-large-en-ro": 12, # 0.6791285137459857
"facebook/mbart-large-50-many-to-many-mmt": 12, # 0.6904136529270892
"facebook/mbart-large-50-one-to-many-mmt": 12, # 0.6847906439540236
"allenai/led-base-16384": 6, # 0.7122259170564179
"facebook/blenderbot_small-90M": 7, # 0.6489176335400088
"facebook/blenderbot-400M-distill": 2, # 0.5874774070540008
"microsoft/prophetnet-large-uncased": 4, # 0.586496184234925
"microsoft/prophetnet-large-uncased-cnndm": 7, # 0.6478379437729287
"SpanBERT/spanbert-base-cased": 8, # 0.6824006863686848
"SpanBERT/spanbert-large-cased": 17, # 0.705352690855603
"microsoft/xprophetnet-large-wiki100-cased": 7, # 0.5852499775879524
"ProsusAI/finbert": 10, # 0.6923213940752796
"Vamsi/T5_Paraphrase_Paws": 12, # 0.6941611753807352
"ramsrigouthamg/t5_paraphraser": 11, # 0.7200917597031539
"microsoft/deberta-v2-xlarge": 10, # 0.7393675784473045
"microsoft/deberta-v2-xlarge-mnli": 17, # 0.7620620803716714
"microsoft/deberta-v2-xxlarge": 21, # 0.7520547670281869
"microsoft/deberta-v2-xxlarge-mnli": 22, # 0.7742603457742682
"allenai/longformer-base-4096": 7, # 0.7089559593129316
"allenai/longformer-large-4096": 14, # 0.732408493548181
"allenai/longformer-large-4096-finetuned-triviaqa": 14, # 0.7365882744744722
"zhiheng-huang/bert-base-uncased-embedding-relative-key": 4, # 0.5995636595368777
"zhiheng-huang/bert-base-uncased-embedding-relative-key-query": 7, # 0.6303599452145718
"zhiheng-huang/bert-large-uncased-whole-word-masking-embedding-relative-key-query": 19, # 0.6896878492850327
'google/mt5-small': 8, # 0.6401166527273479
'google/mt5-base': 11, # 0.5663956536597241
'google/mt5-large': 19, # 0.6430931371732798
'google/mt5-xl': 24, # 0.6707200963021145
'google/bigbird-roberta-base': 10, # 0.6695606423502717
'google/bigbird-roberta-large': 14, # 0.6755874042374509
'google/bigbird-base-trivia-itc': 8, # 0.6930725491629892
'princeton-nlp/unsup-simcse-bert-base-uncased': 10, # 0.6703066531921142
'princeton-nlp/unsup-simcse-bert-large-uncased': 18, # 0.6958302800755326
'princeton-nlp/unsup-simcse-roberta-base': 8, # 0.6436615893535319
'princeton-nlp/unsup-simcse-roberta-large': 13, # 0.6812864385585965
'princeton-nlp/sup-simcse-bert-base-uncased': 10, # 0.7068074935240984
'princeton-nlp/sup-simcse-bert-large-uncased': 18, # 0.7111049471332378
'princeton-nlp/sup-simcse-roberta-base': 10, # 0.7253123806661946
'princeton-nlp/sup-simcse-roberta-large': 16, # 0.7497820277237173
'dbmdz/bert-base-turkish-cased': 10, # WMT18 seg en-tr 0.5522827687776142
'dbmdz/distilbert-base-turkish-cased': 4, # WMT18 seg en-tr 0.4742268041237113
'google/byt5-small': 1, # 0.5100025975052146
'google/byt5-base': 17, # 0.5810347173565313
'google/byt5-large': 30, # 0.6151895697554877
'microsoft/deberta-v3-xsmall': 10, # 0.6941803815412021
'microsoft/deberta-v3-small': 4, # 0.6651551203179679
'microsoft/deberta-v3-base': 9, # 0.7261586651018335
'microsoft/mdeberta-v3-base': 10, # 0.6778713684091584
'microsoft/deberta-v3-large': 12, # 0.6927693082293821
'khalidalt/DeBERTa-v3-large-mnli': 18, # 0.7428756686018376
}
def sent_encode(tokenizer, sent):
"Encoding as sentence based on the tokenizer"
sent = sent.strip()
if sent == "":
return tokenizer.build_inputs_with_special_tokens([])
elif isinstance(tokenizer, GPT2Tokenizer):
# for RoBERTa and GPT-2
if LooseVersion(trans_version) >= LooseVersion("4.0.0"):
return tokenizer.encode(
sent,
add_special_tokens=True,
add_prefix_space=True,
max_length=tokenizer.model_max_length,
truncation=True,
)
elif LooseVersion(trans_version) >= LooseVersion("3.0.0"):
return tokenizer.encode(
sent, add_special_tokens=True, add_prefix_space=True, max_length=tokenizer.max_len, truncation=True,
)
elif LooseVersion(trans_version) >= LooseVersion("2.0.0"):
return tokenizer.encode(sent, add_special_tokens=True, add_prefix_space=True, max_length=tokenizer.max_len)
else:
raise NotImplementedError(f"transformers version {trans_version} is not supported")
else:
if LooseVersion(trans_version) >= LooseVersion("4.0.0"):
return tokenizer.encode(
sent, add_special_tokens=True, max_length=tokenizer.model_max_length, truncation=True,
)
elif LooseVersion(trans_version) >= LooseVersion("3.0.0"):
return tokenizer.encode(sent, add_special_tokens=True, max_length=tokenizer.max_len, truncation=True)
elif LooseVersion(trans_version) >= LooseVersion("2.0.0"):
return tokenizer.encode(sent, add_special_tokens=True, max_length=tokenizer.max_len)
else:
raise NotImplementedError(f"transformers version {trans_version} is not supported")
def get_model(model_type, num_layers, all_layers=None):
if model_type.startswith("scibert"):
model = AutoModel.from_pretrained(cache_scibert(model_type))
elif "t5" in model_type:
from transformers import T5EncoderModel
model = T5EncoderModel.from_pretrained(model_type)
else:
model = AutoModel.from_pretrained(model_type)
model.eval()
if hasattr(model, "decoder") and hasattr(model, "encoder"):
model = model.encoder
# drop unused layers
if not all_layers:
if hasattr(model, "n_layers"): # xlm
assert (
0 <= num_layers <= model.n_layers
), f"Invalid num_layers: num_layers should be between 0 and {model.n_layers} for {model_type}"
model.n_layers = num_layers
elif hasattr(model, "layer"): # xlnet
assert (
0 <= num_layers <= len(model.layer)
), f"Invalid num_layers: num_layers should be between 0 and {len(model.layer)} for {model_type}"
model.layer = torch.nn.ModuleList([layer for layer in model.layer[:num_layers]])
elif hasattr(model, "encoder"): # albert
if hasattr(model.encoder, "albert_layer_groups"):
assert (
0 <= num_layers <= model.encoder.config.num_hidden_layers
), f"Invalid num_layers: num_layers should be between 0 and {model.encoder.config.num_hidden_layers} for {model_type}"
model.encoder.config.num_hidden_layers = num_layers
elif hasattr(model.encoder, "block"): # t5
assert (
0 <= num_layers <= len(model.encoder.block)
), f"Invalid num_layers: num_layers should be between 0 and {len(model.encoder.block)} for {model_type}"
model.encoder.block = torch.nn.ModuleList([layer for layer in model.encoder.block[:num_layers]])
else: # bert, roberta
assert (
0 <= num_layers <= len(model.encoder.layer)
), f"Invalid num_layers: num_layers should be between 0 and {len(model.encoder.layer)} for {model_type}"
model.encoder.layer = torch.nn.ModuleList([layer for layer in model.encoder.layer[:num_layers]])
elif hasattr(model, "transformer"): # bert, roberta
assert (
0 <= num_layers <= len(model.transformer.layer)
), f"Invalid num_layers: num_layers should be between 0 and {len(model.transformer.layer)} for {model_type}"
model.transformer.layer = torch.nn.ModuleList([layer for layer in model.transformer.layer[:num_layers]])
elif hasattr(model, "layers"): # bart
assert (
0 <= num_layers <= len(model.layers)
), f"Invalid num_layers: num_layers should be between 0 and {len(model.layers)} for {model_type}"
model.layers = torch.nn.ModuleList([layer for layer in model.layers[:num_layers]])
else:
raise ValueError("Not supported")
else:
if hasattr(model, "output_hidden_states"):
model.output_hidden_states = True
elif hasattr(model, "encoder"):
model.encoder.output_hidden_states = True
elif hasattr(model, "transformer"):
model.transformer.output_hidden_states = True
# else:
# raise ValueError(f"Not supported model architecture: {model_type}")
return model
def get_tokenizer(model_type, use_fast=False):
if model_type.startswith("scibert"):
model_type = cache_scibert(model_type)
if LooseVersion(trans_version) >= LooseVersion("4.0.0"):
tokenizer = AutoTokenizer.from_pretrained(model_type, use_fast=use_fast)
else:
assert not use_fast, "Fast tokenizer is not available for version < 4.0.0"
tokenizer = AutoTokenizer.from_pretrained(model_type)
return tokenizer
def padding(arr, pad_token, dtype=torch.long):
lens = torch.LongTensor([len(a) for a in arr])
max_len = lens.max().item()
padded = torch.ones(len(arr), max_len, dtype=dtype) * pad_token
mask = torch.zeros(len(arr), max_len, dtype=torch.long)
for i, a in enumerate(arr):
padded[i, : lens[i]] = torch.tensor(a, dtype=dtype)
mask[i, : lens[i]] = 1
return padded, lens, mask
def bert_encode(model, x, attention_mask, all_layers=False):
model.eval()
with torch.no_grad():
out = model(x, attention_mask=attention_mask, output_hidden_states=all_layers)
if all_layers:
emb = torch.stack(out[-1], dim=2)
else:
emb = out[0]
return emb
def process(a, tokenizer=None):
if tokenizer is not None:
a = sent_encode(tokenizer, a)
return set(a)
def get_idf_dict(arr, tokenizer, nthreads=4):
"""
Returns mapping from word piece index to its inverse document frequency.
Args:
- :param: `arr` (list of str) : sentences to process.
- :param: `tokenizer` : a BERT tokenizer corresponds to `model`.
- :param: `nthreads` (int) : number of CPU threads to use
"""
idf_count = Counter()
num_docs = len(arr)
process_partial = partial(process, tokenizer=tokenizer)
with Pool(nthreads) as p:
idf_count.update(chain.from_iterable(p.map(process_partial, arr)))
idf_dict = defaultdict(lambda: log((num_docs + 1) / (1)))
idf_dict.update({idx: log((num_docs + 1) / (c + 1)) for (idx, c) in idf_count.items()})
return idf_dict
def collate_idf(arr, tokenizer, idf_dict, device="cuda:0"):
"""
Helper function that pads a list of sentences to hvae the same length and
loads idf score for words in the sentences.
Args:
- :param: `arr` (list of str): sentences to process.
- :param: `tokenize` : a function that takes a string and return list
of tokens.
- :param: `numericalize` : a function that takes a list of tokens and
return list of token indexes.
- :param: `idf_dict` (dict): mapping a word piece index to its
inverse document frequency
- :param: `pad` (str): the padding token.
- :param: `device` (str): device to use, e.g. 'cpu' or 'cuda'
"""
arr = [sent_encode(tokenizer, a) for a in arr]
idf_weights = [[idf_dict[i] for i in a] for a in arr]
pad_token = tokenizer.pad_token_id
padded, lens, mask = padding(arr, pad_token, dtype=torch.long)
padded_idf, _, _ = padding(idf_weights, 0, dtype=torch.float)
padded = padded.to(device=device)
mask = mask.to(device=device)
lens = lens.to(device=device)
return padded, padded_idf, lens, mask
def get_bert_embedding(all_sens, model, tokenizer, idf_dict, batch_size=-1, device="cuda:0", all_layers=False):
"""
Compute BERT embedding in batches.
Args:
- :param: `all_sens` (list of str) : sentences to encode.
- :param: `model` : a BERT model from `pytorch_pretrained_bert`.
- :param: `tokenizer` : a BERT tokenizer corresponds to `model`.
- :param: `idf_dict` (dict) : mapping a word piece index to its
inverse document frequency
- :param: `device` (str): device to use, e.g. 'cpu' or 'cuda'
"""
padded_sens, padded_idf, lens, mask = collate_idf(all_sens, tokenizer, idf_dict, device=device)
if batch_size == -1:
batch_size = len(all_sens)
embeddings = []
with torch.no_grad():
for i in range(0, len(all_sens), batch_size):
batch_embedding = bert_encode(
model, padded_sens[i : i + batch_size], attention_mask=mask[i : i + batch_size], all_layers=all_layers,
)
embeddings.append(batch_embedding)
del batch_embedding
total_embedding = torch.cat(embeddings, dim=0)
return total_embedding, mask, padded_idf
def greedy_cos_idf(ref_embedding, ref_masks, ref_idf, hyp_embedding, hyp_masks, hyp_idf, all_layers=False):
"""
Compute greedy matching based on cosine similarity.
Args:
- :param: `ref_embedding` (torch.Tensor):
embeddings of reference sentences, BxKxd,
B: batch size, K: longest length, d: bert dimenison
- :param: `ref_lens` (list of int): list of reference sentence length.
- :param: `ref_masks` (torch.LongTensor): BxKxK, BERT attention mask for
reference sentences.
- :param: `ref_idf` (torch.Tensor): BxK, idf score of each word
piece in the reference setence
- :param: `hyp_embedding` (torch.Tensor):
embeddings of candidate sentences, BxKxd,
B: batch size, K: longest length, d: bert dimenison
- :param: `hyp_lens` (list of int): list of candidate sentence length.
- :param: `hyp_masks` (torch.LongTensor): BxKxK, BERT attention mask for
candidate sentences.
- :param: `hyp_idf` (torch.Tensor): BxK, idf score of each word
piece in the candidate setence
"""
ref_embedding.div_(torch.norm(ref_embedding, dim=-1).unsqueeze(-1))
hyp_embedding.div_(torch.norm(hyp_embedding, dim=-1).unsqueeze(-1))
if all_layers:
B, _, L, D = hyp_embedding.size()
hyp_embedding = hyp_embedding.transpose(1, 2).transpose(0, 1).contiguous().view(L * B, hyp_embedding.size(1), D)
ref_embedding = ref_embedding.transpose(1, 2).transpose(0, 1).contiguous().view(L * B, ref_embedding.size(1), D)
batch_size = ref_embedding.size(0)
sim = torch.bmm(hyp_embedding, ref_embedding.transpose(1, 2))
masks = torch.bmm(hyp_masks.unsqueeze(2).float(), ref_masks.unsqueeze(1).float())
if all_layers:
masks = masks.unsqueeze(0).expand(L, -1, -1, -1).contiguous().view_as(sim)
else:
masks = masks.expand(batch_size, -1, -1).contiguous().view_as(sim)
masks = masks.float().to(sim.device)
sim = sim * masks
word_precision = sim.max(dim=2)[0]
word_recall = sim.max(dim=1)[0]
hyp_idf.div_(hyp_idf.sum(dim=1, keepdim=True))
ref_idf.div_(ref_idf.sum(dim=1, keepdim=True))
precision_scale = hyp_idf.to(word_precision.device)
recall_scale = ref_idf.to(word_recall.device)
if all_layers:
precision_scale = precision_scale.unsqueeze(0).expand(L, B, -1).contiguous().view_as(word_precision)
recall_scale = recall_scale.unsqueeze(0).expand(L, B, -1).contiguous().view_as(word_recall)
P = (word_precision * precision_scale).sum(dim=1)
R = (word_recall * recall_scale).sum(dim=1)
F = 2 * P * R / (P + R)
hyp_zero_mask = hyp_masks.sum(dim=1).eq(2)
ref_zero_mask = ref_masks.sum(dim=1).eq(2)
if all_layers:
P = P.view(L, B)
R = R.view(L, B)
F = F.view(L, B)
if torch.any(hyp_zero_mask):
print(
"Warning: Empty candidate sentence detected; setting raw BERTscores to 0.", file=sys.stderr,
)
P = P.masked_fill(hyp_zero_mask, 0.0)
R = R.masked_fill(hyp_zero_mask, 0.0)
if torch.any(ref_zero_mask):
print("Warning: Empty reference sentence detected; setting raw BERTScores to 0.", file=sys.stderr)
P = P.masked_fill(ref_zero_mask, 0.0)
R = R.masked_fill(ref_zero_mask, 0.0)
F = F.masked_fill(torch.isnan(F), 0.0)
return P, R, F
def bert_cos_score_idf(
model, refs, hyps, tokenizer, idf_dict, verbose=False, batch_size=64, device="cuda:0", all_layers=False,
):
"""
Compute BERTScore.
Args:
- :param: `model` : a BERT model in `pytorch_pretrained_bert`
- :param: `refs` (list of str): reference sentences
- :param: `hyps` (list of str): candidate sentences
- :param: `tokenzier` : a BERT tokenizer corresponds to `model`
- :param: `idf_dict` : a dictionary mapping a word piece index to its
inverse document frequency
- :param: `verbose` (bool): turn on intermediate status update
- :param: `batch_size` (int): bert score processing batch size
- :param: `device` (str): device to use, e.g. 'cpu' or 'cuda'
"""
preds = []
def dedup_and_sort(l):
return sorted(list(set(l)), key=lambda x: len(x.split(" ")), reverse=True)
sentences = dedup_and_sort(refs + hyps)
embs = []
iter_range = range(0, len(sentences), batch_size)
if verbose:
print("computing bert embedding.")
iter_range = tqdm(iter_range)
stats_dict = dict()
for batch_start in iter_range:
sen_batch = sentences[batch_start : batch_start + batch_size]
embs, masks, padded_idf = get_bert_embedding(
sen_batch, model, tokenizer, idf_dict, device=device, all_layers=all_layers
)
embs = embs.cpu()
masks = masks.cpu()
padded_idf = padded_idf.cpu()
for i, sen in enumerate(sen_batch):
sequence_len = masks[i].sum().item()
emb = embs[i, :sequence_len]
idf = padded_idf[i, :sequence_len]
stats_dict[sen] = (emb, idf)
def pad_batch_stats(sen_batch, stats_dict, device):
stats = [stats_dict[s] for s in sen_batch]
emb, idf = zip(*stats)
emb = [e.to(device) for e in emb]
idf = [i.to(device) for i in idf]
lens = [e.size(0) for e in emb]
emb_pad = pad_sequence(emb, batch_first=True, padding_value=2.0)
idf_pad = pad_sequence(idf, batch_first=True)
def length_to_mask(lens):
lens = torch.tensor(lens, dtype=torch.long)
max_len = max(lens)
base = torch.arange(max_len, dtype=torch.long).expand(len(lens), max_len)
return base < lens.unsqueeze(1)
pad_mask = length_to_mask(lens).to(device)
return emb_pad, pad_mask, idf_pad
device = next(model.parameters()).device
iter_range = range(0, len(refs), batch_size)
if verbose:
print("computing greedy matching.")
iter_range = tqdm(iter_range)
with torch.no_grad():
for batch_start in iter_range:
batch_refs = refs[batch_start : batch_start + batch_size]
batch_hyps = hyps[batch_start : batch_start + batch_size]
ref_stats = pad_batch_stats(batch_refs, stats_dict, device)
hyp_stats = pad_batch_stats(batch_hyps, stats_dict, device)
P, R, F1 = greedy_cos_idf(*ref_stats, *hyp_stats, all_layers)
preds.append(torch.stack((P, R, F1), dim=-1).cpu())
preds = torch.cat(preds, dim=1 if all_layers else 0)
return preds
def get_hash(model, num_layers, idf, rescale_with_baseline, use_custom_baseline, use_fast_tokenizer):
msg = "{}_L{}{}_version={}(hug_trans={})".format(
model, num_layers, "_idf" if idf else "_no-idf", __version__, trans_version
)
if rescale_with_baseline:
if use_custom_baseline:
msg += "-custom-rescaled"
else:
msg += "-rescaled"
if use_fast_tokenizer:
msg += "_fast-tokenizer"
return msg
def cache_scibert(model_type, cache_folder="~/.cache/torch/transformers"):
if not model_type.startswith("scibert"):
return model_type
underscore_model_type = model_type.replace("-", "_")
cache_folder = os.path.abspath(os.path.expanduser(cache_folder))
filename = os.path.join(cache_folder, underscore_model_type)
# download SciBERT models
if not os.path.exists(filename):
cmd = f"mkdir -p {cache_folder}; cd {cache_folder};"
cmd += f"wget {SCIBERT_URL_DICT[model_type]}; tar -xvf {underscore_model_type}.tar;"
cmd += (
f"rm -f {underscore_model_type}.tar ; cd {underscore_model_type}; tar -zxvf weights.tar.gz; mv weights/* .;"
)
cmd += f"rm -f weights.tar.gz; rmdir weights; mv bert_config.json config.json;"
print(cmd)
print(f"downloading {model_type} model")
os.system(cmd)
# fix the missing files in scibert
json_file = os.path.join(filename, "special_tokens_map.json")
if not os.path.exists(json_file):
with open(json_file, "w") as f:
print(
'{"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}',
file=f,
)
json_file = os.path.join(filename, "added_tokens.json")
if not os.path.exists(json_file):
with open(json_file, "w") as f:
print("{}", file=f)
if "uncased" in model_type:
json_file = os.path.join(filename, "tokenizer_config.json")
if not os.path.exists(json_file):
with open(json_file, "w") as f:
print('{"do_lower_case": true, "max_len": 512, "init_inputs": []}', file=f)
return filename
| 28,789 | 44.553797 | 173 | py |
PT-M2 | PT-M2-main/bert_score/__init__.py | __version__ = "0.3.11"
from .score import *
from .scorer import *
| 66 | 15.75 | 22 | py |
PT-M2 | PT-M2-main/m2score/token_offsets.py |
import sys
import re
import os
from util import *
from Tokenizer import PTBTokenizer
assert len(sys.argv) == 1
# main
# loop over sentences cum annotation
tokenizer = PTBTokenizer()
sentence = ''
for line in sys.stdin:
line = line.decode("utf8").strip()
if line.startswith("S "):
sentence = line[2:]
sentence_tok = "S " + ' '.join(tokenizer.tokenize(sentence))
print sentence_tok.encode("utf8")
elif line.startswith("A "):
fields = line[2:].split('|||')
start_end = fields[0]
char_start, char_end = [int(a) for a in start_end.split()]
# calculate token offsets
prefix = sentence[:char_start]
tok_start = len(tokenizer.tokenize(prefix))
postfix = sentence[:char_end]
tok_end = len(tokenizer.tokenize(postfix))
start_end = str(tok_start) + " " + str(tok_end)
fields[0] = start_end
# tokenize corrections, remove trailing whitespace
corrections = [(' '.join(tokenizer.tokenize(c))).strip() for c in fields[2].split('||')]
fields[2] = '||'.join(corrections)
annotation = "A " + '|||'.join(fields)
print annotation.encode("utf8")
else:
print line.encode("utf8")
| 1,233 | 28.380952 | 96 | py |
PT-M2 | PT-M2-main/m2score/levenshtein.py | from optparse import OptionParser
from util import uniq
import re
import sys
import math
from copy import deepcopy
from tqdm import tqdm
from util import compute_weight_edits
# batch evaluation of a list of sentences
def batch_precision(candidates, sources, gold_edits, max_unchanged_words=2, beta=0.5, ignore_whitespace_casing=False, verbose=False):
return batch_pre_rec_f1(candidates, sources, gold_edits, max_unchanged_words, beta, ignore_whitespace_casing, verbose)[0]
def batch_recall(candidates, sources, gold_edits, max_unchanged_words=2, beta=0.5, ignore_whitespace_casing=False, verbose=False):
return batch_pre_rec_f1(candidates, sources, gold_edits, max_unchanged_words, beta, ignore_whitespace_casing, verbose)[1]
def batch_f1(candidates, sources, gold_edits, max_unchanged_words=2, beta=0.5, ignore_whitespace_casing=False, verbose=False):
return batch_pre_rec_f1(candidates, sources, gold_edits, max_unchanged_words, beta, ignore_whitespace_casing, verbose)[2]
def comp_p(a, b):
if b:
p = a / b
else:
p = 1.0
return p
def comp_r(c, g):
if g:
r = c / g
else:
r = 1.0
return r
def comp_f1(p, r, beta):
if beta*beta*p+r:
f = (1.0+beta*beta) * p * r / (beta*beta*p+r)
else:
f = 0.0
return f
def f1_suffstats(candidate, source, gold_edits, max_unchanged_words=2, ignore_whitespace_casing= False, verbose=False, very_verbose=False):
stat_correct = 0.0
stat_proposed = 0.0
stat_gold = 0.0
candidate_tok = candidate.split()
source_tok = source.split()
lmatrix, backpointers = levenshtein_matrix(source_tok, candidate_tok)
V, E, dist, edits = edit_graph(lmatrix, backpointers)
if very_verbose:
print("edit matrix:", lmatrix)
print("backpointers:", backpointers)
print("edits (w/o transitive arcs):", edits)
V, E, dist, edits = transitive_arcs(V, E, dist, edits, max_unchanged_words, very_verbose)
dist = set_weights(E, dist, edits, gold_edits, very_verbose)
editSeq = best_edit_seq_bf(V, E, dist, edits, very_verbose)
if very_verbose:
print("Graph(V,E) = ")
print("V =", V)
print("E =", E)
print("edits (with transitive arcs):", edits)
print("dist() =", dist)
print("viterbi path =", editSeq)
if ignore_whitespace_casing:
editSeq = [x for x in editSeq if not equals_ignore_whitespace_casing(x[2], x[3])]
correct = matchSeq(editSeq, gold_edits, ignore_whitespace_casing)
stat_correct = len(correct)
stat_proposed = len(editSeq)
stat_gold = len(gold_edits)
if verbose:
print("SOURCE :", source.encode("utf8"))
print("HYPOTHESIS :", candidate.encode("utf8"))
print("EDIT SEQ :", list(reversed(editSeq)))
print("GOLD EDITS :", gold_edits)
print("CORRECT EDITS :", correct)
print("# correct :", int(stat_correct))
print("# proposed :", int(stat_proposed))
print("# gold :", int(stat_gold))
print("-------------------------------------------")
return (stat_correct, stat_proposed, stat_gold)
def batch_multi_pre_rec_f1(candidates, sources, gold_edits, references, scorer, scorer_type,
max_unchanged_words=2, beta=0.5, ignore_whitespace_casing= False, verbose=False, very_verbose=False):
assert len(candidates) == len(sources) == len(gold_edits)
stat_correct = 0.0
stat_proposed = 0.0
stat_gold = 0.0
i = 0
for candidate, source, refs, golds_set in tqdm(zip(candidates, sources, references, gold_edits)):
i = i + 1
# Candidate system edit extraction
candidate_tok = candidate.split()
source_tok = source.split()
#lmatrix, backpointers = levenshtein_matrix(source_tok, candidate_tok)
lmatrix1, backpointers1 = levenshtein_matrix(source_tok, candidate_tok, 1, 1, 1)
lmatrix2, backpointers2 = levenshtein_matrix(source_tok, candidate_tok, 1, 1, 2)
#V, E, dist, edits = edit_graph(lmatrix, backpointers)
V1, E1, dist1, edits1 = edit_graph(lmatrix1, backpointers1)
V2, E2, dist2, edits2 = edit_graph(lmatrix2, backpointers2)
V, E, dist, edits = merge_graph(V1, V2, E1, E2, dist1, dist2, edits1, edits2)
if very_verbose:
print("edit matrix 1:", lmatrix1)
print("edit matrix 2:", lmatrix2)
print("backpointers 1:", backpointers1)
print("backpointers 2:", backpointers2)
print("edits (w/o transitive arcs):", edits)
V, E, dist, edits = transitive_arcs(V, E, dist, edits, max_unchanged_words, very_verbose)
# Find measures maximizing current cumulative F1; local: curent annotator only
sqbeta = beta * beta
chosen_ann = -1
f1_max = -math.inf
argmax_correct = 0.0
argmax_proposed = 0.0
argmax_gold = 0.0
max_stat_correct = -math.inf
min_stat_proposed = math.inf
min_stat_gold = math.inf
for annotator, gold in golds_set.items():
localdist = set_weights(E, dist, edits, gold, verbose, very_verbose)
editSeq = best_edit_seq_bf(V, E, localdist, edits, very_verbose)
if verbose:
print(">> Annotator:", annotator)
if very_verbose:
print("Graph(V,E) = ")
print("V =", V)
print("E =", E)
print("edits (with transitive arcs):", edits)
print("dist() =", localdist)
print("viterbi path =", editSeq)
if ignore_whitespace_casing:
editSeq = [x for x in editSeq if not equals_ignore_whitespace_casing(x[2], x[3])]
correct = matchSeq(editSeq, gold, ignore_whitespace_casing, verbose)
gold = [(g[0], g[1], g[2], g[-1][0]) for g in gold]
weight_edits = compute_weight_edits(editSeq, gold, source, candidate, refs[annotator], scorer_type, scorer)
# local cumulative counts, P, R and F1
stat_correct_local = stat_correct + sum(weight_edits[c] for c in correct)
stat_proposed_local = stat_proposed + sum(weight_edits[e] for e in editSeq)
stat_gold_local = stat_gold + sum(weight_edits[g] for g in gold)
p_local = comp_p(stat_correct_local, stat_proposed_local)
r_local = comp_r(stat_correct_local, stat_gold_local)
f1_local = comp_f1(p_local, r_local, beta)
if f1_max < f1_local or \
(f1_max == f1_local and max_stat_correct < stat_correct_local) or \
(f1_max == f1_local and max_stat_correct == stat_correct_local and min_stat_proposed + sqbeta * min_stat_gold > stat_proposed_local + sqbeta * stat_gold_local):
chosen_ann = annotator
f1_max = f1_local
max_stat_correct = stat_correct_local
min_stat_proposed = stat_proposed_local
min_stat_gold = stat_gold_local
argmax_correct = sum(weight_edits[c] for c in correct)
argmax_proposed = sum(abs(weight_edits[e]) for e in editSeq)
argmax_gold = sum(weight_edits[g] for g in gold)
if verbose:
print("SOURCE :", source)
print("HYPOTHESIS :", candidate)
print("EDIT SEQ :", [shrinkEdit(ed) for ed in list(reversed(editSeq))])
print("GOLD EDITS :", gold)
print("CORRECT EDITS :", correct)
print("# correct :", int(stat_correct_local))
print("# proposed :", int(stat_proposed_local))
print("# gold :", int(stat_gold_local))
print("precision :", p_local)
print("recall :", r_local)
print("f_%.1f :" % beta, f1_local)
print("-------------------------------------------")
if verbose:
print(">> Chosen Annotator for line", i, ":", chosen_ann)
print("")
stat_correct += argmax_correct
stat_proposed += argmax_proposed
stat_gold += argmax_gold
if stat_proposed:
p = stat_correct / stat_proposed
else:
p = 1.0
if stat_gold:
r = stat_correct / stat_gold
else:
r = 1.0
if beta * beta * p + r:
f1 = (1.0+beta*beta) * p * r / (beta*beta*p+r)
else:
f1 = 0.0
if verbose:
print("CORRECT EDITS :", int(stat_correct))
print("PROPOSED EDITS :", int(stat_proposed))
print("GOLD EDITS :", int(stat_gold))
print("P =", p)
print("R =", r)
print("F_%.1f =" % beta, f1)
return (p, r, f1)
def batch_multi_pre_rec_f1_sent(candidates, sources, gold_edits, references, scorer, scorer_type, max_unchanged_words=2, beta=0.5,
ignore_whitespace_casing=False, verbose=False, very_verbose=False):
assert len(candidates) == len(sources) == len(gold_edits)
stat_correct = 0.0
stat_proposed = 0.0
stat_gold = 0.0
i = 0
for candidate, source, refs, golds_set in zip(candidates, sources, references, gold_edits):
i = i + 1
# Candidate system edit extraction
candidate_tok = candidate.split()
source_tok = source.split()
# lmatrix, backpointers = levenshtein_matrix(source_tok, candidate_tok)
lmatrix1, backpointers1 = levenshtein_matrix(source_tok, candidate_tok, 1, 1, 1)
lmatrix2, backpointers2 = levenshtein_matrix(source_tok, candidate_tok, 1, 1, 2)
# V, E, dist, edits = edit_graph(lmatrix, backpointers)
V1, E1, dist1, edits1 = edit_graph(lmatrix1, backpointers1)
V2, E2, dist2, edits2 = edit_graph(lmatrix2, backpointers2)
V, E, dist, edits = merge_graph(V1, V2, E1, E2, dist1, dist2, edits1, edits2)
if very_verbose:
print("edit matrix 1:", lmatrix1)
print("edit matrix 2:", lmatrix2)
print("backpointers 1:", backpointers1)
print("backpointers 2:", backpointers2)
print("edits (w/o transitive arcs):", edits)
V, E, dist, edits = transitive_arcs(V, E, dist, edits, max_unchanged_words, very_verbose)
# Find measures maximizing current cumulative F1; local: curent annotator only
sqbeta = beta * beta
chosen_ann = -1
f1_max = -math.inf
argmax_correct = 0.0
argmax_proposed = 0.0
argmax_gold = 0.0
max_stat_correct = -math.inf
min_stat_proposed = math.inf
min_stat_gold = math.inf
for annotator, gold in golds_set.items():
localdist = set_weights(E, dist, edits, gold, verbose, very_verbose)
editSeq = best_edit_seq_bf(V, E, localdist, edits, very_verbose)
if verbose:
print(">> Annotator:", annotator)
if very_verbose:
print("Graph(V,E) = ")
print("V =", V)
print("E =", E)
print("edits (with transitive arcs):", edits)
print("dist() =", localdist)
print("viterbi path =", editSeq)
if ignore_whitespace_casing:
editSeq = [x for x in editSeq if not equals_ignore_whitespace_casing(x[2], x[3])]
correct = matchSeq(editSeq, gold, ignore_whitespace_casing, verbose)
gold = [(g[0], g[1], g[2], g[-1][0]) for g in gold]
weight_edits = compute_weight_edits(editSeq, gold, source, candidate, refs[annotator], scorer_type, scorer, sent_level=True)
# lo cal cumulative counts, P, R and F1
stat_correct_local = stat_correct + sum(weight_edits[c] for c in correct)
stat_proposed_local = stat_proposed + sum(weight_edits[e] for e in editSeq)
stat_gold_local = stat_gold + sum(weight_edits[g] for g in gold)
p_local = comp_p(stat_correct_local, stat_proposed_local)
r_local = comp_r(stat_correct_local, stat_gold_local)
f1_local = comp_f1(p_local, r_local, beta)
if f1_max < f1_local or \
(f1_max == f1_local and max_stat_correct < stat_correct_local) or \
(
f1_max == f1_local and max_stat_correct == stat_correct_local and min_stat_proposed + sqbeta * min_stat_gold > stat_proposed_local + sqbeta * stat_gold_local):
chosen_ann = annotator
f1_max = f1_local
max_stat_correct = stat_correct_local
min_stat_proposed = stat_proposed_local
min_stat_gold = stat_gold_local
argmax_correct = sum(weight_edits[c] for c in correct)
argmax_proposed = sum(abs(weight_edits[e]) for e in editSeq)
argmax_gold = sum(weight_edits[g] for g in gold)
if verbose:
print("SOURCE :", source)
print("HYPOTHESIS :", candidate)
print("EDIT SEQ :", [shrinkEdit(ed) for ed in list(reversed(editSeq))])
print("GOLD EDITS :", gold)
print("CORRECT EDITS :", correct)
print("# correct :", int(stat_correct_local))
print("# proposed :", int(stat_proposed_local))
print("# gold :", int(stat_gold_local))
print("precision :", p_local)
print("recall :", r_local)
print("f_%.1f :" % beta, f1_local)
print("-------------------------------------------")
if verbose:
print(">> Chosen Annotator for line", i, ":", chosen_ann)
print("")
stat_correct += argmax_correct
stat_proposed += argmax_proposed
stat_gold += argmax_gold
if stat_proposed:
p = stat_correct / stat_proposed
else:
p = 1.0
if stat_gold:
r = stat_correct / stat_gold
else:
r = 1.0
if beta * beta * p + r:
f1 = (1.0 + beta * beta) * p * r / (beta * beta * p + r)
else:
f1 = 0.0
if verbose:
print("CORRECT EDITS :", int(stat_correct))
print("PROPOSED EDITS :", int(stat_proposed))
print("GOLD EDITS :", int(stat_gold))
print("P =", p)
print("R =", r)
print("F_%.1f =" % beta, f1)
return (p, r, f1)
def batch_pre_rec_f1(candidates, sources, gold_edits, max_unchanged_words=2, beta=0.5, ignore_whitespace_casing= False, verbose=False, very_verbose=False):
assert len(candidates) == len(sources) == len(gold_edits)
stat_correct = 0.0
stat_proposed = 0.0
stat_gold = 0.0
for candidate, source, gold in zip(candidates, sources, gold_edits):
candidate_tok = candidate.split()
source_tok = source.split()
lmatrix, backpointers = levenshtein_matrix(source_tok, candidate_tok)
V, E, dist, edits = edit_graph(lmatrix, backpointers)
if very_verbose:
print("edit matrix:", lmatrix)
print("backpointers:", backpointers)
print("edits (w/o transitive arcs):", edits)
V, E, dist, edits = transitive_arcs(V, E, dist, edits, max_unchanged_words, very_verbose)
dist = set_weights(E, dist, edits, gold, verbose, very_verbose)
editSeq = best_edit_seq_bf(V, E, dist, edits, very_verbose)
if very_verbose:
print("Graph(V,E) = ")
print("V =", V)
print("E =", E)
print("edits (with transitive arcs):", edits)
print("dist() =", dist)
print("viterbi path =", editSeq)
if ignore_whitespace_casing:
editSeq = [x for x in editSeq if not equals_ignore_whitespace_casing(x[2], x[3])]
correct = matchSeq(editSeq, gold, ignore_whitespace_casing)
stat_correct += len(correct)
stat_proposed += len(editSeq)
stat_gold += len(gold)
if verbose:
print("SOURCE :", source.encode("utf8"))
print("HYPOTHESIS :", candidate.encode("utf8"))
print("EDIT SEQ :", list(reversed(editSeq)))
print("GOLD EDITS :", gold)
print("CORRECT EDITS :", correct)
print("# correct :", stat_correct)
print("# proposed :", stat_proposed)
print("# gold :", stat_gold)
print("precision :", comp_p(stat_correct, stat_proposed))
print("recall :", comp_r(stat_correct, stat_gold))
print("f_%.1f :" % beta, comp_f1(stat_correct, stat_proposed, stat_gold, beta))
print("-------------------------------------------")
try:
p = stat_correct / stat_proposed
except ZeroDivisionError:
p = 1.0
try:
r = stat_correct / stat_gold
except ZeroDivisionError:
r = 1.0
try:
f1 = (1.0+beta*beta) * p * r / (beta*beta*p+r)
#f1 = 2.0 * p * r / (p+r)
except ZeroDivisionError:
f1 = 0.0
if verbose:
print("CORRECT EDITS :", stat_correct)
print("PROPOSED EDITS :", stat_proposed)
print("GOLD EDITS :", stat_gold)
print("P =", p)
print("R =", r)
print("F_%.1f =" % beta, f1)
return (p, r, f1)
# precision, recall, F1
def precision(candidate, source, gold_edits, max_unchanged_words=2, beta=0.5, verbose=False):
return pre_rec_f1(candidate, source, gold_edits, max_unchanged_words, beta, verbose)[0]
def recall(candidate, source, gold_edits, max_unchanged_words=2, beta=0.5, verbose=False):
return pre_rec_f1(candidate, source, gold_edits, max_unchanged_words, beta, verbose)[1]
def f1(candidate, source, gold_edits, max_unchanged_words=2, beta=0.5, verbose=False):
return pre_rec_f1(candidate, source, gold_edits, max_unchanged_words, beta, verbose)[2]
def shrinkEdit(edit):
shrunkEdit = deepcopy(edit)
origtok = edit[2].split()
corrtok = edit[3].split()
i = 0
cstart = 0
cend = len(corrtok)
found = False
while i < min(len(origtok), len(corrtok)) and not found:
if origtok[i] != corrtok[i]:
found = True
else:
cstart += 1
i += 1
j = 1
found = False
while j <= min(len(origtok), len(corrtok)) - cstart and not found:
if origtok[len(origtok) - j] != corrtok[len(corrtok) - j]:
found = True
else:
cend -= 1
j += 1
shrunkEdit = (edit[0] + i, edit[1] - (j-1), ' '.join(origtok[i : len(origtok)-(j-1)]), ' '.join(corrtok[i : len(corrtok)-(j-1)]))
return shrunkEdit
def matchSeq(editSeq, gold_edits, ignore_whitespace_casing= False, verbose=False):
m = []
goldSeq = deepcopy(gold_edits)
last_index = 0
CInsCDel = False
CInsWDel = False
CDelWIns = False
for e in reversed(editSeq):
for i in range(last_index, len(goldSeq)):
g = goldSeq[i]
if matchEdit(e,g, ignore_whitespace_casing):
m.append(e)
last_index = i+1
if verbose:
nextEditList = [shrinkEdit(edit) for edit in editSeq if e[1] == edit[0]]
prevEditList = [shrinkEdit(edit) for edit in editSeq if e[0] == edit[1]]
if e[0] != e[1]:
nextEditList = [edit for edit in nextEditList if edit[0] == edit[1]]
prevEditList = [edit for edit in prevEditList if edit[0] == edit[1]]
else:
nextEditList = [edit for edit in nextEditList if edit[0] < edit[1] and edit[3] == '']
prevEditList = [edit for edit in prevEditList if edit[0] < edit[1] and edit[3] == '']
matchAdj = any(any(matchEdit(edit, gold, ignore_whitespace_casing) for gold in goldSeq) for edit in nextEditList) or \
any(any(matchEdit(edit, gold, ignore_whitespace_casing) for gold in goldSeq) for edit in prevEditList)
if e[0] < e[1] and len(e[3].strip()) == 0 and \
(len(nextEditList) > 0 or len(prevEditList) > 0):
if matchAdj:
print("!", e)
else:
print("&", e)
elif e[0] == e[1] and \
(len(nextEditList) > 0 or len(prevEditList) > 0):
if matchAdj:
print("!", e)
else:
print("*", e)
return m
def matchEdit(e, g, ignore_whitespace_casing= False):
# start offset
if e[0] != g[0]:
return False
# end offset
if e[1] != g[1]:
return False
# original string
if e[2] != g[2]:
return False
# correction string
if not e[3] in g[3]:
return False
# all matches
return True
def equals_ignore_whitespace_casing(a,b):
return a.replace(" ", "").lower() == b.replace(" ", "").lower()
def get_edits(candidate, source, gold_edits, max_unchanged_words=2, ignore_whitespace_casing= False, verbose=False, very_verbose=False):
candidate_tok = candidate.split()
source_tok = source.split()
lmatrix, backpointers = levenshtein_matrix(source_tok, candidate_tok)
V, E, dist, edits = edit_graph(lmatrix, backpointers)
V, E, dist, edits = transitive_arcs(V, E, dist, edits, max_unchanged_words, very_verbose)
dist = set_weights(E, dist, edits, gold_edits, verbose, very_verbose)
editSeq = best_edit_seq_bf(V, E, dist, edits)
if ignore_whitespace_casing:
editSeq = [x for x in editSeq if not equals_ignore_whitespace_casing(x[2], x[3])]
correct = matchSeq(editSeq, gold_edits)
return (correct, editSeq, gold_edits)
def pre_rec_f1(candidate, source, gold_edits, max_unchanged_words=2, beta=0.5, ignore_whitespace_casing= False, verbose=False, very_verbose=False):
candidate_tok = candidate.split()
source_tok = source.split()
lmatrix, backpointers = levenshtein_matrix(source_tok, candidate_tok)
V, E, dist, edits = edit_graph(lmatrix, backpointers)
V, E, dist, edits = transitive_arcs(V, E, dist, edits, max_unchanged_words, very_verbose)
dist = set_weights(E, dist, edits, gold_edits, verbose, very_verbose)
editSeq = best_edit_seq_bf(V, E, dist, edits)
if ignore_whitespace_casing:
editSeq = [x for x in editSeq if not equals_ignore_whitespace_casing(x[2], x[3])]
correct = matchSeq(editSeq, gold_edits)
try:
p = float(len(correct)) / len(editSeq)
except ZeroDivisionError:
p = 1.0
try:
r = float(len(correct)) / len(gold_edits)
except ZeroDivisionError:
r = 1.0
try:
f1 = (1.0+beta*beta) * p * r / (beta*beta*p+r)
#f1 = 2.0 * p * r / (p+r)
except ZeroDivisionError:
f1 = 0.0
if verbose:
print("Source:", source.encode("utf8"))
print("Hypothesis:", candidate.encode("utf8"))
print("edit seq", editSeq)
print("gold edits", gold_edits)
print("correct edits", correct)
print("p =", p)
print("r =", r)
print("f_%.1f =" % beta, f1)
return (p, r, f1)
# distance function
def get_distance(dist, v1, v2):
try:
return dist[(v1, v2)]
except KeyError:
return float('inf')
# find maximally matching edit squence through the graph using bellman-ford
def best_edit_seq_bf(V, E, dist, edits, verby_verbose=False):
thisdist = {}
path = {}
for v in V:
thisdist[v] = float('inf')
thisdist[(0,0)] = 0
for i in range(len(V)-1):
for edge in E:
v = edge[0]
w = edge[1]
if thisdist[v] + dist[edge] < thisdist[w]:
thisdist[w] = thisdist[v] + dist[edge]
path[w] = v
# backtrack
v = sorted(V)[-1]
editSeq = []
while True:
try:
w = path[v]
except KeyError:
break
edit = edits[(w,v)]
if edit[0] != 'noop':
editSeq.append((edit[1], edit[2], edit[3], edit[4]))
v = w
return editSeq
# # find maximally matching edit squence through the graph
# def best_edit_seq(V, E, dist, edits, verby_verbose=False):
# thisdist = {}
# path = {}
# for v in V:
# thisdist[v] = float('inf')
# thisdist[(0,0)] = 0
# queue = [(0,0)]
# while len(queue) > 0:
# v = queue[0]
# queue = queue[1:]
# for edge in E:
# if edge[0] != v:
# continue
# w = edge[1]
# if thisdist[v] + dist[edge] < thisdist[w]:
# thisdist[w] = thisdist[v] + dist[edge]
# path[w] = v
# if not w in queue:
# queue.append(w)
# # backtrack
# v = sorted(V)[-1]
# editSeq = []
# while True:
# try:
# w = path[v]
# except KeyError:
# break
# edit = edits[(w,v)]
# if edit[0] != 'noop':
# editSeq.append((edit[1], edit[2], edit[3], edit[4]))
# v = w
# return editSeq
def prev_identical_edge(cur, E, edits):
for e in E:
if e[1] == cur[0] and edits[e] == edits[cur]:
return e
return None
def next_identical_edge(cur, E, edits):
for e in E:
if e[0] == cur[1] and edits[e] == edits[cur]:
return e
return None
def get_prev_edges(cur, E):
prev = []
for e in E:
if e[0] == cur[1]:
prev.append(e)
return prev
def get_next_edges(cur, E):
next = []
for e in E:
if e[0] == cur[1]:
next.append(e)
return next
# set weights on the graph, gold edits edges get negative weight
# other edges get an epsilon weight added
# gold_edits = (start, end, original, correction)
def set_weights(E, dist, edits, gold_edits, verbose=False, very_verbose=False):
EPSILON = 0.001
if very_verbose:
print("set weights of edges()", end=' ')
print("gold edits :", gold_edits)
gold_set = deepcopy(gold_edits)
retdist = deepcopy(dist)
M = {}
G = {}
for edge in E:
tE = edits[edge]
s, e = tE[1], tE[2]
if (s, e) not in M:
M[(s,e)] = []
M[(s,e)].append(edge)
if (s, e) not in G:
G[(s,e)] = []
for gold in gold_set:
s, e = gold[0], gold[1]
if (s, e) not in G:
G[(s,e)] = []
G[(s,e)].append(gold)
for k in sorted(M.keys()):
M[k] = sorted(M[k])
if k[0] == k[1]: # insertion case
lptr = 0
rptr = len(M[k])-1
cur = lptr
g_lptr = 0
g_rptr = len(G[k])-1
while lptr <= rptr:
hasGoldMatch = False
edge = M[k][cur]
thisEdit = edits[edge]
# only check start offset, end offset, original string, corrections
if very_verbose:
print("set weights of edge", edge)
print("edit =", thisEdit)
cur_gold = []
if cur == lptr:
cur_gold = list(range(g_lptr, g_rptr+1))
else:
cur_gold = reversed(list(range(g_lptr, g_rptr+1)))
for i in cur_gold:
gold = G[k][i]
if thisEdit[1] == gold[0] and \
thisEdit[2] == gold[1] and \
thisEdit[3] == gold[2] and \
thisEdit[4] in gold[3]:
hasGoldMatch = True
retdist[edge] = - len(E)
if very_verbose:
print("matched gold edit :", gold)
print("set weight to :", retdist[edge])
if cur == lptr:
#g_lptr += 1 # why?
g_lptr = i + 1
else:
#g_rptr -= 1 # why?
g_rptr = i - 1
break
if not hasGoldMatch and thisEdit[0] != 'noop':
retdist[edge] += EPSILON
if hasGoldMatch:
if cur == lptr:
lptr += 1
while lptr < len(M[k]) and M[k][lptr][0] != M[k][cur][1]:
if edits[M[k][lptr]] != 'noop':
retdist[M[k][lptr]] += EPSILON
lptr += 1
cur = lptr
else:
rptr -= 1
while rptr >= 0 and M[k][rptr][1] != M[k][cur][0]:
if edits[M[k][rptr]] != 'noop':
retdist[M[k][rptr]] += EPSILON
rptr -= 1
cur = rptr
else:
if cur == lptr:
lptr += 1
cur = rptr
else:
rptr -= 1
cur = lptr
else: #deletion or substitution, don't care about order, no harm if setting parallel edges weight < 0
for edge in M[k]:
hasGoldMatch = False
thisEdit = edits[edge]
if very_verbose:
print("set weights of edge", edge)
print("edit =", thisEdit)
for gold in G[k]:
if thisEdit[1] == gold[0] and \
thisEdit[2] == gold[1] and \
thisEdit[3] == gold[2] and \
thisEdit[4] in gold[3]:
hasGoldMatch = True
retdist[edge] = - len(E)
if very_verbose:
print("matched gold edit :", gold)
print("set weight to :", retdist[edge])
break
if not hasGoldMatch and thisEdit[0] != 'noop':
retdist[edge] += EPSILON
return retdist
# add transitive arcs
def transitive_arcs(V, E, dist, edits, max_unchanged_words=2, very_verbose=False):
if very_verbose:
print("-- Add transitive arcs --")
for k in range(len(V)):
vk = V[k]
if very_verbose:
print("v _k :", vk)
for i in range(len(V)):
vi = V[i]
if very_verbose:
print("v _i :", vi)
try:
eik = edits[(vi, vk)]
except KeyError:
continue
for j in range(len(V)):
vj = V[j]
if very_verbose:
print("v _j :", vj)
try:
ekj = edits[(vk, vj)]
except KeyError:
continue
dik = get_distance(dist, vi, vk)
dkj = get_distance(dist, vk, vj)
if dik + dkj < get_distance(dist, vi, vj):
eij = merge_edits(eik, ekj)
if eij[-1] <= max_unchanged_words:
if very_verbose:
print(" add new arcs v_i -> v_j:", eij)
E.append((vi, vj))
dist[(vi, vj)] = dik + dkj
edits[(vi, vj)] = eij
# remove noop transitive arcs
if very_verbose:
print("-- Remove transitive noop arcs --")
for edge in E:
e = edits[edge]
if e[0] == 'noop' and dist[edge] > 1:
if very_verbose:
print(" remove noop arc v_i -> vj:", edge)
E.remove(edge)
dist[edge] = float('inf')
del edits[edge]
return(V, E, dist, edits)
# combine two edits into one
# edit = (type, start, end, orig, correction, #unchanged_words)
def merge_edits(e1, e2, joiner = ' '):
if e1[0] == 'ins':
if e2[0] == 'ins':
e = ('ins', e1[1], e2[2], '', e1[4] + joiner + e2[4], e1[5] + e2[5])
elif e2[0] == 'del':
e = ('sub', e1[1], e2[2], e2[3], e1[4], e1[5] + e2[5])
elif e2[0] == 'sub':
e = ('sub', e1[1], e2[2], e2[3], e1[4] + joiner + e2[4], e1[5] + e2[5])
elif e2[0] == 'noop':
e = ('sub', e1[1], e2[2], e2[3], e1[4] + joiner + e2[4], e1[5] + e2[5])
elif e1[0] == 'del':
if e2[0] == 'ins':
e = ('sub', e1[1], e2[2], e1[3], e2[4], e1[5] + e2[5])
elif e2[0] == 'del':
e = ('del', e1[1], e2[2], e1[3] + joiner + e2[3], '', e1[5] + e2[5])
elif e2[0] == 'sub':
e = ('sub', e1[1], e2[2], e1[3] + joiner + e2[3], e2[4], e1[5] + e2[5])
elif e2[0] == 'noop':
e = ('sub', e1[1], e2[2], e1[3] + joiner + e2[3], e2[4], e1[5] + e2[5])
elif e1[0] == 'sub':
if e2[0] == 'ins':
e = ('sub', e1[1], e2[2], e1[3], e1[4] + joiner + e2[4], e1[5] + e2[5])
elif e2[0] == 'del':
e = ('sub', e1[1], e2[2], e1[3] + joiner + e2[3], e1[4], e1[5] + e2[5])
elif e2[0] == 'sub':
e = ('sub', e1[1], e2[2], e1[3] + joiner + e2[3], e1[4] + joiner + e2[4], e1[5] + e2[5])
elif e2[0] == 'noop':
e = ('sub', e1[1], e2[2], e1[3] + joiner + e2[3], e1[4] + joiner + e2[4], e1[5] + e2[5])
elif e1[0] == 'noop':
if e2[0] == 'ins':
e = ('sub', e1[1], e2[2], e1[3], e1[4] + joiner + e2[4], e1[5] + e2[5])
elif e2[0] == 'del':
e = ('sub', e1[1], e2[2], e1[3] + joiner + e2[3], e1[4], e1[5] + e2[5])
elif e2[0] == 'sub':
e = ('sub', e1[1], e2[2], e1[3] + joiner + e2[3], e1[4] + joiner + e2[4], e1[5] + e2[5])
elif e2[0] == 'noop':
e = ('noop', e1[1], e2[2], e1[3] + joiner + e2[3], e1[4] + joiner + e2[4], e1[5] + e2[5])
else:
assert False
return e
# build edit graph
def edit_graph(levi_matrix, backpointers):
V = []
E = []
dist = {}
edits = {}
# breath-first search through the matrix
v_start = (len(levi_matrix)-1, len(levi_matrix[0])-1)
queue = [v_start]
while len(queue) > 0:
v = queue[0]
queue = queue[1:]
if v in V:
continue
V.append(v)
try:
for vnext_edits in backpointers[v]:
vnext = vnext_edits[0]
edit_next = vnext_edits[1]
E.append((vnext, v))
dist[(vnext, v)] = 1
edits[(vnext, v)] = edit_next
if not vnext in queue:
queue.append(vnext)
except KeyError:
pass
return (V, E, dist, edits)
# merge two lattices, vertices, edges, and distance and edit table
def merge_graph(V1, V2, E1, E2, dist1, dist2, edits1, edits2):
# vertices
V = deepcopy(V1)
for v in V2:
if v not in V:
V.append(v)
V = sorted(V)
# edges
E = E1
for e in E2:
if e not in V:
E.append(e)
E = sorted(E)
# distances
dist = deepcopy(dist1)
for k in list(dist2.keys()):
if k not in list(dist.keys()):
dist[k] = dist2[k]
else:
if dist[k] != dist2[k]:
print("WARNING: merge_graph: distance does not match!", file=sys.stderr)
dist[k] = min(dist[k], dist2[k])
# edit contents
edits = deepcopy(edits1)
for e in list(edits2.keys()):
if e not in list(edits.keys()):
edits[e] = edits2[e]
else:
if edits[e] != edits2[e]:
print("WARNING: merge_graph: edit does not match!", file=sys.stderr)
return (V, E, dist, edits)
# convenience method for levenshtein distance
def levenshtein_distance(first, second):
lmatrix, backpointers = levenshtein_matrix(first, second)
return lmatrix[-1][-1]
# levenshtein matrix
def levenshtein_matrix(first, second, cost_ins=1, cost_del=1, cost_sub=2):
#if len(second) == 0 or len(second) == 0:
# return len(first) + len(second)
first_length = len(first) + 1
second_length = len(second) + 1
# init
distance_matrix = [[None] * second_length for x in range(first_length)]
backpointers = {}
distance_matrix[0][0] = 0
for i in range(1, first_length):
distance_matrix[i][0] = i
edit = ("del", i-1, i, first[i-1], '', 0)
backpointers[(i, 0)] = [((i-1,0), edit)]
for j in range(1, second_length):
distance_matrix[0][j]=j
edit = ("ins", j-1, j-1, '', second[j-1], 0)
backpointers[(0, j)] = [((0,j-1), edit)]
# fill the matrix
for i in range(1, first_length):
for j in range(1, second_length):
deletion = distance_matrix[i-1][j] + cost_del
insertion = distance_matrix[i][j-1] + cost_ins
if first[i-1] == second[j-1]:
substitution = distance_matrix[i-1][j-1]
else:
substitution = distance_matrix[i-1][j-1] + cost_sub
if substitution == min(substitution, deletion, insertion):
distance_matrix[i][j] = substitution
if first[i-1] != second[j-1]:
edit = ("sub", i-1, i, first[i-1], second[j-1], 0)
else:
edit = ("noop", i-1, i, first[i-1], second[j-1], 1)
try:
backpointers[(i, j)].append(((i-1,j-1), edit))
except KeyError:
backpointers[(i, j)] = [((i-1,j-1), edit)]
if deletion == min(substitution, deletion, insertion):
distance_matrix[i][j] = deletion
edit = ("del", i-1, i, first[i-1], '', 0)
try:
backpointers[(i, j)].append(((i-1,j), edit))
except KeyError:
backpointers[(i, j)] = [((i-1,j), edit)]
if insertion == min(substitution, deletion, insertion):
distance_matrix[i][j] = insertion
edit = ("ins", i, i, '', second[j-1], 0)
try:
backpointers[(i, j)].append(((i,j-1), edit))
except KeyError:
backpointers[(i, j)] = [((i,j-1), edit)]
return (distance_matrix, backpointers)
| 38,568 | 38.761856 | 187 | py |
PT-M2 | PT-M2-main/m2score/combiner.py |
import sys
import levenshtein
from getopt import getopt
from util import paragraphs
from util import smart_open
def load_annotation(gold_file):
source_sentences = []
gold_edits = []
fgold = smart_open(gold_file, 'r')
puffer = fgold.read()
fgold.close()
puffer = puffer.decode('utf8')
for item in paragraphs(puffer.splitlines(True)):
item = item.splitlines(False)
sentence = [line[2:].strip() for line in item if line.startswith('S ')]
assert sentence != []
annotations = {}
for line in item[1:]:
if line.startswith('I ') or line.startswith('S '):
continue
assert line.startswith('A ')
line = line[2:]
fields = line.split('|||')
start_offset = int(fields[0].split()[0])
end_offset = int(fields[0].split()[1])
etype = fields[1]
if etype == 'noop':
start_offset = -1
end_offset = -1
corrections = [c.strip() if c != '-NONE-' else '' for c in fields[2].split('||')]
# NOTE: start and end are *token* offsets
original = ' '.join(' '.join(sentence).split()[start_offset:end_offset])
annotator = int(fields[5])
if annotator not in annotations.keys():
annotations[annotator] = []
annotations[annotator].append((start_offset, end_offset, original, corrections))
tok_offset = 0
for this_sentence in sentence:
tok_offset += len(this_sentence.split())
source_sentences.append(this_sentence)
this_edits = {}
for annotator, annotation in annotations.iteritems():
this_edits[annotator] = [edit for edit in annotation if edit[0] <= tok_offset and edit[1] <= tok_offset and edit[0] >= 0 and edit[1] >= 0]
if len(this_edits) == 0:
this_edits[0] = []
gold_edits.append(this_edits)
return (source_sentences, gold_edits)
def print_usage():
print >> sys.stderr, "Usage: m2scorer.py [OPTIONS] proposed_sentences gold_source"
print >> sys.stderr, "where"
print >> sys.stderr, " proposed_sentences - system output, sentence per line"
print >> sys.stderr, " source_gold - source sentences with gold token edits"
print >> sys.stderr, "OPTIONS"
print >> sys.stderr, " -v --verbose - print verbose output"
print >> sys.stderr, " --very_verbose - print lots of verbose output"
print >> sys.stderr, " --max_unchanged_words N - Maximum unchanged words when extraction edit. Default 2."
print >> sys.stderr, " --ignore_whitespace_casing - Ignore edits that only affect whitespace and caseing. Default no."
max_unchanged_words=2
ignore_whitespace_casing= False
verbose = False
very_verbose = False
opts, args = getopt(sys.argv[1:], "v", ["max_unchanged_words=", "verbose", "ignore_whitespace_casing", "very_verbose"])
for o, v in opts:
if o in ('-v', '--verbose'):
verbose = True
elif o == '--very_verbose':
very_verbose = True
elif o == '--max_unchanged_words':
max_unchanged_words = int(v)
elif o == '--ignore_whitespace_casing':
ignore_whitespace_casing = True
else:
print >> sys.stderr, "Unknown option :", o
print_usage()
sys.exit(-1)
| 3,436 | 38.505747 | 154 | py |
PT-M2 | PT-M2-main/m2score/m2scorer.py |
import sys
import levenshtein
from getopt import getopt
from util import paragraphs
from util import smart_open
def load_annotation(gold_file):
source_sentences = []
gold_edits = []
fgold = smart_open(gold_file, 'r')
puffer = fgold.read()
fgold.close()
# puffer = puffer.decode('utf8')
for item in paragraphs(puffer.splitlines(True)):
item = item.splitlines(False)
sentence = [line[2:].strip() for line in item if line.startswith('S ')]
assert sentence != []
annotations = {}
for line in item[1:]:
if line.startswith('I ') or line.startswith('S '):
continue
assert line.startswith('A ')
line = line[2:]
fields = line.split('|||')
start_offset = int(fields[0].split()[0])
end_offset = int(fields[0].split()[1])
etype = fields[1]
if etype == 'noop':
start_offset = -1
end_offset = -1
corrections = [c.strip() if c != '-NONE-' else '' for c in fields[2].split('||')]
# NOTE: start and end are *token* offsets
original = ' '.join(' '.join(sentence).split()[start_offset:end_offset])
annotator = int(fields[5])
if annotator not in list(annotations.keys()):
annotations[annotator] = []
annotations[annotator].append((start_offset, end_offset, original, corrections))
tok_offset = 0
for this_sentence in sentence:
tok_offset += len(this_sentence.split())
source_sentences.append(this_sentence)
this_edits = {}
for annotator, annotation in annotations.items():
this_edits[annotator] = [edit for edit in annotation if edit[0] <= tok_offset and edit[1] <= tok_offset and edit[0] >= 0 and edit[1] >= 0]
if len(this_edits) == 0:
this_edits[0] = []
gold_edits.append(this_edits)
return (source_sentences, gold_edits)
def print_usage():
print("Usage: m2scorer.py [OPTIONS] proposed_sentences gold_source", file=sys.stderr)
print("where", file=sys.stderr)
print(" proposed_sentences - system output, sentence per line", file=sys.stderr)
print(" source_gold - source sentences with gold token edits", file=sys.stderr)
print("OPTIONS", file=sys.stderr)
print(" -v --verbose - print verbose output", file=sys.stderr)
print(" --very_verbose - print lots of verbose output", file=sys.stderr)
print(" --max_unchanged_words N - Maximum unchanged words when extraction edit. Default 2.", file=sys.stderr)
print(" --beta B - Beta value for F-measure. Default 0.5.", file=sys.stderr)
print(" --ignore_whitespace_casing - Ignore edits that only affect whitespace and caseing. Default no.", file=sys.stderr)
#
# max_unchanged_words=2
# beta = 0.5
# ignore_whitespace_casing= False
# verbose = False
# very_verbose = False
# opts, args = getopt(sys.argv[1:], "v", ["max_unchanged_words=", "beta=", "verbose", "ignore_whitespace_casing", "very_verbose"])
# for o, v in opts:
# if o in ('-v', '--verbose'):
# verbose = True
# elif o == '--very_verbose':
# very_verbose = True
# elif o == '--max_unchanged_words':
# max_unchanged_words = int(v)
# elif o == '--beta':
# beta = float(v)
# elif o == '--ignore_whitespace_casing':
# ignore_whitespace_casing = True
# else:
# print("Unknown option :", o, file=sys.stderr)
# print_usage()
# sys.exit(-1)
#
# # starting point
# if len(args) != 2:
# print_usage()
# sys.exit(-1)
#
# system_file = args[0]
# gold_file = args[1]
#
# # load source sentences and gold edits
# source_sentences, gold_edits = load_annotation(gold_file)
#
# # load system hypotheses
# fin = smart_open(system_file, 'r')
# # system_sentences = [line.decode("utf8").strip() for line in fin.readlines()]
# system_sentences = [line.strip() for line in fin.readlines()]
# fin.close()
#
# p, r, f1 = levenshtein.batch_multi_pre_rec_f1(system_sentences, source_sentences, gold_edits, max_unchanged_words, beta, ignore_whitespace_casing, verbose, very_verbose)
#
# print(("Precision : %.4f" % p))
# print(("Recall : %.4f" % r))
# print(("F_%.1f : %.4f" % (beta, f1)))
| 4,440 | 37.95614 | 171 | py |
PT-M2 | PT-M2-main/m2score/util.py |
import operator
import random
import math
import re
def smart_open(fname, mode = 'r'):
if fname.endswith('.gz'):
import gzip
# Using max compression (9) by default seems to be slow.
# Let's try using the fastest.
return gzip.open(fname, mode, 1)
else:
return open(fname, mode, encoding="utf8")
def randint(b, a=0):
return random.randint(a,b)
def uniq(seq, idfun=None):
# order preserving
if idfun is None:
def idfun(x): return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
# in old Python versions:
# if seen.has_key(marker)
# but in new ones:
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
def get_ref(edits, src):
cnt = 0
src = src.split()
e_s = src
for i in range(len(edits)):
s_idx, e_idx, oral_tok, rep_tok = edits[i]
if oral_tok == "":
e_idx = s_idx
s_idx = cnt + s_idx
e_idx = cnt + e_idx
e_s = e_s[:s_idx] + rep_tok.split() + e_s[e_idx:] if rep_tok else e_s[:s_idx] + e_s[e_idx:]
cnt += len(rep_tok.split()) - len(oral_tok.split())
return " ".join(e_s)
def compute_weight_edits(editSeq, gold, source, cand, ref, w_t, scorer=None, sent_level=False):
weight_edits, filters = {}, {}
editSeq = sorted(editSeq, key=lambda x: (x[0], x[1]))
assert cand == get_ref(editSeq, source), f"src: {source}\nref: {cand}\nref_s: {get_ref(editSeq, source)}\nedits: {editSeq}"
gold = sorted(gold, key=lambda x: (x[0], x[1]))
assert ref == get_ref(gold, source), f"src: {source}\nref: {ref}\nref_s: {get_ref(gold, source)}\nedits: {gold}"
edits = list(set(editSeq) | set(gold))
edits = sorted(edits, key=lambda x: (x[0], x[1]))
for i, edit in enumerate(edits):
edit_s = [edit]
ref_s = get_ref(edit_s, source)
if w_t == "self":
weight_edits[edit] = 1
elif w_t == "bartscore":
s1, s2 = scorer.score([ref, ref], [ref_s, source], batch_size=2)
weight_edits[edit] = abs(s1 - s2)
elif w_t == "bertscore":
s1 = scorer.score([ref_s], [ref])[-1]
s1 = s1[0].item()
s2 = scorer.score([source], [ref])[-1]
s2 = s2[0].item()
weight_edits[edit] = abs(s1 - s2)
if sent_level:
w_sum = sum(v for v in weight_edits.values())
if w_sum == 0:
weight_edits = {k: 1 / len(weight_edits) for k in weight_edits.keys()}
return weight_edits
def sort_dict(myDict, byValue=False, reverse=False):
if byValue:
items = myDict.items()
items.sort(key = operator.itemgetter(1), reverse=reverse)
else:
items = sorted(myDict.items())
return items
def max_dict(myDict, byValue=False):
if byValue:
skey=lambda x:x[1]
else:
skey=lambda x:x[0]
return max(myDict.items(), key=skey)
def min_dict(myDict, byValue=False):
if byValue:
skey=lambda x:x[1]
else:
skey=lambda x:x[0]
return min(myDict.items(), key=skey)
def paragraphs(lines, is_separator=lambda x : x == '\n', joiner=''.join):
paragraph = []
for line in lines:
if is_separator(line):
if paragraph:
yield joiner(paragraph)
paragraph = []
else:
paragraph.append(line)
if paragraph:
yield joiner(paragraph)
def isASCII(word):
try:
word = word.decode("ascii")
return True
except UnicodeEncodeError :
return False
except UnicodeDecodeError:
return False
def intersect(x, y):
return [z for z in x if z in y]
# Mapping Windows CP1252 Gremlins to Unicode
# from http://effbot.org/zone/unicode-gremlins.htm
cp1252 = {
# from http://www.microsoft.com/typography/unicode/1252.htm
u"\x80": u"\u20AC", # EURO SIGN
u"\x82": u"\u201A", # SINGLE LOW-9 QUOTATION MARK
u"\x83": u"\u0192", # LATIN SMALL LETTER F WITH HOOK
u"\x84": u"\u201E", # DOUBLE LOW-9 QUOTATION MARK
u"\x85": u"\u2026", # HORIZONTAL ELLIPSIS
u"\x86": u"\u2020", # DAGGER
u"\x87": u"\u2021", # DOUBLE DAGGER
u"\x88": u"\u02C6", # MODIFIER LETTER CIRCUMFLEX ACCENT
u"\x89": u"\u2030", # PER MILLE SIGN
u"\x8A": u"\u0160", # LATIN CAPITAL LETTER S WITH CARON
u"\x8B": u"\u2039", # SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u"\x8C": u"\u0152", # LATIN CAPITAL LIGATURE OE
u"\x8E": u"\u017D", # LATIN CAPITAL LETTER Z WITH CARON
u"\x91": u"\u2018", # LEFT SINGLE QUOTATION MARK
u"\x92": u"\u2019", # RIGHT SINGLE QUOTATION MARK
u"\x93": u"\u201C", # LEFT DOUBLE QUOTATION MARK
u"\x94": u"\u201D", # RIGHT DOUBLE QUOTATION MARK
u"\x95": u"\u2022", # BULLET
u"\x96": u"\u2013", # EN DASH
u"\x97": u"\u2014", # EM DASH
u"\x98": u"\u02DC", # SMALL TILDE
u"\x99": u"\u2122", # TRADE MARK SIGN
u"\x9A": u"\u0161", # LATIN SMALL LETTER S WITH CARON
u"\x9B": u"\u203A", # SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u"\x9C": u"\u0153", # LATIN SMALL LIGATURE OE
u"\x9E": u"\u017E", # LATIN SMALL LETTER Z WITH CARON
u"\x9F": u"\u0178", # LATIN CAPITAL LETTER Y WITH DIAERESIS
}
def fix_cp1252codes(text):
# map cp1252 gremlins to real unicode characters
if re.search(u"[\x80-\x9f]", text):
def fixup(m):
s = m.group(0)
return cp1252.get(s, s)
if isinstance(text, type("")):
# make sure we have a unicode string
text = unicode(text, "iso-8859-1")
text = re.sub(u"[\x80-\x9f]", fixup, text)
return text
def clean_utf8(text):
return filter(lambda x : x > '\x1f' and x < '\x7f', text)
def pairs(iterable, overlapping=False):
iterator = iterable.__iter__()
token = iterator.next()
i = 0
for lookahead in iterator:
if overlapping or i % 2 == 0:
yield (token, lookahead)
token = lookahead
i += 1
if i % 2 == 0:
yield (token, None)
def frange(start, end=None, inc=None):
"A range function, that does accept float increments..."
if end == None:
end = start + 0.0
start = 0.0
if inc == None:
inc = 1.0
L = []
while 1:
next = start + len(L) * inc
if inc > 0 and next >= end:
break
elif inc < 0 and next <= end:
break
L.append(next)
return L
def softmax(values):
a = max(values)
Z = 0.0
for v in values:
Z += math.exp(v - a)
sm = [math.exp(v-a) / Z for v in values]
return sm
| 7,012 | 30.308036 | 127 | py |
PT-M2 | PT-M2-main/m2score/__init__.py | 0 | 0 | 0 | py |
|
PT-M2 | PT-M2-main/m2score/Tokenizer.py |
import re
import sys
class DummyTokenizer(object):
def tokenize(self, text):
return text.split()
class PTBTokenizer(object):
def __init__(self, language="en"):
self.language = language
self.nonbreaking_prefixes = {}
self.nonbreaking_prefixes_numeric = {}
self.nonbreaking_prefixes["en"] = ''' A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
Adj Adm Adv Asst Bart Bldg Brig Bros Capt Cmdr Col Comdr Con Corp Cpl DR Dr Drs Ens
Gen Gov Hon Hr Hosp Insp Lt MM MR MRS MS Maj Messrs Mlle Mme Mr Mrs Ms Msgr Op Ord
Pfc Ph Prof Pvt Rep Reps Res Rev Rt Sen Sens Sfc Sgt Sr St Supt Surg
v vs i.e rev e.g Nos Nr'''.split()
self.nonbreaking_prefixes_numeric["en"] = '''No Art pp'''.split()
self.special_chars = re.compile(r"([^\w\s\.\'\`\,\-\"\|\/])", flags=re.UNICODE)
def tokenize(self, text, ptb=False):
text = text.strip()
text = " " + text + " "
# Separate all "other" punctuation
text = re.sub(self.special_chars, r' \1 ', text)
text = re.sub(r";", r' ; ', text)
text = re.sub(r":", r' : ', text)
# replace the pipe character
text = re.sub(r"\|", r' -PIPE- ', text)
# split internal slash, keep others
text = re.sub(r"(\S)/(\S)", r'\1 / \2', text)
# PTB tokenization
if ptb:
text = re.sub(r"\(", r' -LRB- ', text)
text = re.sub(r"\)", r' -RRB- ', text)
text = re.sub(r"\[", r' -LSB- ', text)
text = re.sub(r"\]", r' -RSB- ', text)
text = re.sub(r"\{", r' -LCB- ', text)
text = re.sub(r"\}", r' -RCB- ', text)
text = re.sub(r"\"\s*$", r" '' ", text)
text = re.sub(r"^\s*\"", r' `` ', text)
text = re.sub(r"(\S)\"\s", r"\1 '' ", text)
text = re.sub(r"\s\"(\S)", r" `` \1", text)
text = re.sub(r"(\S)\"", r"\1 '' ", text)
text = re.sub(r"\"(\S)", r" `` \1", text)
text = re.sub(r"'\s*$", r" ' ", text)
text = re.sub(r"^\s*'", r" ` ", text)
text = re.sub(r"(\S)'\s", r"\1 ' ", text)
text = re.sub(r"\s'(\S)", r" ` \1", text)
text = re.sub(r"'ll", r" -CONTRACT-ll", text)
text = re.sub(r"'re", r" -CONTRACT-re", text)
text = re.sub(r"'ve", r" -CONTRACT-ve", text)
text = re.sub(r"n't", r" n-CONTRACT-t", text)
text = re.sub(r"'LL", r" -CONTRACT-LL", text)
text = re.sub(r"'RE", r" -CONTRACT-RE", text)
text = re.sub(r"'VE", r" -CONTRACT-VE", text)
text = re.sub(r"N'T", r" N-CONTRACT-T", text)
text = re.sub(r"cannot", r"can not", text)
text = re.sub(r"Cannot", r"Can not", text)
# multidots stay together
text = re.sub(r"\.([\.]+)", r" DOTMULTI\1", text)
while re.search("DOTMULTI\.", text):
text = re.sub(r"DOTMULTI\.([^\.])", r"DOTDOTMULTI \1", text)
text = re.sub(r"DOTMULTI\.", r"DOTDOTMULTI", text)
# multidashes stay together
text = re.sub(r"\-([\-]+)", r" DASHMULTI\1", text)
while re.search("DASHMULTI\-", text):
text = re.sub(r"DASHMULTI\-([^\-])", r"DASHDASHMULTI \1", text)
text = re.sub(r"DASHMULTI\-", r"DASHDASHMULTI", text)
# Separate ',' except if within number.
text = re.sub(r"(\D),(\D)", r'\1 , \2', text)
# Separate ',' pre and post number.
text = re.sub(r"(\d),(\D)", r'\1 , \2', text)
text = re.sub(r"(\D),(\d)", r'\1 , \2', text)
if self.language == "en":
text = re.sub(r"([^a-zA-Z])'([^a-zA-Z])", r"\1 ' \2", text)
text = re.sub(r"(\W)'([a-zA-Z])", r"\1 ' \2", text)
text = re.sub(r"([a-zA-Z])'([^a-zA-Z])", r"\1 ' \2", text)
text = re.sub(r"([a-zA-Z])'([a-zA-Z])", r"\1 '\2", text)
text = re.sub(r"(\d)'(s)", r"\1 '\2", text)
text = re.sub(r" '\s+s ", r" 's ", text)
text = re.sub(r" '\s+s ", r" 's ", text)
elif self.language == "fr":
text = re.sub(r"([^a-zA-Z])'([^a-zA-Z])", r"\1 ' \2", text)
text = re.sub(r"([^a-zA-Z])'([a-zA-Z])", r"\1 ' \2", text)
text = re.sub(r"([a-zA-Z])'([^a-zA-Z])", r"\1 ' \2", text)
text = re.sub(r"([a-zA-Z])'([a-zA-Z])", r"\1' \2", text)
else:
text = re.sub(r"'", r" ' ")
# re-combine single quotes
text = re.sub(r"' '", r"''", text)
words = text.split()
text = ''
for i, word in enumerate(words):
m = re.match("^(\S+)\.$", word)
if m:
pre = m.group(1)
if ((re.search("\.", pre) and re.search("[a-zA-Z]", pre)) or \
(pre in self.nonbreaking_prefixes[self.language]) or \
((i < len(words)-1) and re.match("^\d+", words[i+1]))):
pass # do nothing
elif ((pre in self.nonbreaking_prefixes_numeric[self.language] ) and \
(i < len(words)-1) and re.match("\d+", words[i+1])):
pass # do nothing
else:
word = pre + " ."
text += word + " "
text = re.sub(r"'\s+'", r"''", text)
# restore multidots
while re.search("DOTDOTMULTI", text):
text = re.sub(r"DOTDOTMULTI", r"DOTMULTI.", text)
text = re.sub(r"DOTMULTI", r".", text)
# restore multidashes
while re.search("DASHDASHMULTI", text):
text = re.sub(r"DASHDASHMULTI", r"DASHMULTI-", text)
text = re.sub(r"DASHMULTI", r"-", text)
text = re.sub(r"-CONTRACT-", r"'", text)
return text.split()
def tokenize_all(self,sentences, ptb=False):
return [self.tokenize(t, ptb) for t in sentences]
# starting point
if __name__ == "__main__":
tokenizer = PTBTokenizer()
for line in sys.stdin:
line = line.decode("utf8")
tokens = tokenizer.tokenize(line.strip())
out = ' '.join(tokens)
print out.encode("utf8")
| 6,383 | 39.923077 | 98 | py |