From c0a504d8ac2e6fcf5875c234b06df7b84e2a0c1b Mon Sep 17 00:00:00 2001 From: gbellandi Date: Tue, 11 Feb 2020 15:09:38 +0100 Subject: [PATCH 1/3] python3 version translated to python3 all py scripts, installation works but import still gives error Co-Authored-By: stijnvanhoey --- pystran/SCE_cceua.py | 2 +- pystran/__init__.py | 4 +- pystran/distributions.py | 18 +++---- pystran/evaluationfunctions.py | 14 +++--- pystran/latextablegenerator.py | 56 +++++++++++----------- pystran/optimization_sce.py | 72 ++++++++++++++-------------- pystran/parameter.py | 10 ++-- pystran/plot_functions_rev.py | 48 +++++++++---------- pystran/sensitivity_base.py | 46 +++++++++--------- pystran/sensitivity_dynamic.py | 8 ++-- pystran/sensitivity_globaloat.py | 34 +++++++------- pystran/sensitivity_morris.py | 54 ++++++++++----------- pystran/sensitivity_regression.py | 48 +++++++++---------- pystran/sensitivity_rsa.py | 12 ++--- pystran/sensitivity_sobol.py | 78 +++++++++++++++---------------- pystran/sobol_lib.py | 44 ++++++++--------- 16 files changed, 273 insertions(+), 275 deletions(-) diff --git a/pystran/SCE_cceua.py b/pystran/SCE_cceua.py index 36a8484..beb1a89 100644 --- a/pystran/SCE_cceua.py +++ b/pystran/SCE_cceua.py @@ -126,7 +126,7 @@ def EvalObjF(npar,x,testcase=True,testnr=1,extra=[]): if testnr==5: return testfunctn5(npar,x) else: - return Modrun(npar,x,extra) #Welk model/welke objfunctie/welke periode/.... users keuze! + return Modrun(npar,x,extra) #Welk model/welke objfunctie/welke periode/.... users keuze! diff --git a/pystran/__init__.py b/pystran/__init__.py index 829121d..b8b9f02 100644 --- a/pystran/__init__.py +++ b/pystran/__init__.py @@ -8,7 +8,7 @@ import numpy as np import matplotlib.pyplot as plt -from evaluationfunctions import Evaluation, Likelihood +#from evaluationfunctions import Evaluation, Likelihood from sensitivity_base import SensitivityAnalysis from sensitivity_dynamic import DynamicSensitivity @@ -22,4 +22,4 @@ if __name__ == '__main__': - print 'pySTAN: python STRucture ANalyst (Van Hoey S. 2012)' \ No newline at end of file + print('pySTAN: python STRucture ANalyst (Van Hoey S. 2012)') \ No newline at end of file diff --git a/pystran/distributions.py b/pystran/distributions.py index 70baaac..03a555e 100644 --- a/pystran/distributions.py +++ b/pystran/distributions.py @@ -48,7 +48,7 @@ def TriangularDistribution(x,left,mode,right): see numpy manual (or Beven_book: left=0, right=1) ''' if mode>right: - print 'right en mode zijn omgewisseld!!' + print('right en mode zijn omgewisseld!!') if left<=x<=mode: px=2*(x-left)/((right-left)*(mode-left)) elif mode<=x<=right: @@ -63,11 +63,11 @@ def TrapezoidalDistribution(x,left,mode1,mode2,right): based on a certain inputvalue ''' if mode1>right: - print 'right en mode1 zijn omgewisseld!!' + print('right en mode1 zijn omgewisseld!!') if mode2>right: - print 'right en mode2 zijn omgewisseld!!' + print('right en mode1 zijn omgewisseld!!') if mode1>mode2: - print 'mode1 en mode2 zijn omgewisseld!!' + print('right en mode1 zijn omgewisseld!!') u=2/(right+mode2-mode1-left) @@ -150,7 +150,7 @@ def randomTriangular(left=0.0, mode=None, right=1.0, rnsize=None): ''' if mode==None: - print 'Triangular needs mode-value' + print('Triangular needs mode-value') rn=np.random.triangular(left, mode, right, rnsize) return rn @@ -175,9 +175,9 @@ def randomTrapezoidal(left=0.0, mode1=None, mode2=None, ''' if mode1==None: - print 'Triangular needs 2 mode-values' + print('Triangular needs 2 mode-values') if mode1==None: - print 'Triangular needs 2 mode-values' + print('Triangular needs 2 mode-values') rn=np.zeros(rnsize) for i in range(np.size(rn)): @@ -196,7 +196,7 @@ def randomTrapezoidal(left=0.0, mode1=None, mode2=None, elif (1-h*(b-d)/2)<=y<=1.0: rn[i]=b-np.sqrt(2*(b-d)/h)*np.sqrt(1-y) else: - print 'not in correct range' + print('not in correct range') return rn #Normal @@ -335,5 +335,5 @@ def ltqnorm(p): def ltqnormarr(parr, mu=0.0, sigma=1.): stnorm=np.array([ltqnorm(p) for p in parr]) - print type(stnorm) + print((type(stnorm))) return stnorm2norm(stnorm, mu, sigma) diff --git a/pystran/evaluationfunctions.py b/pystran/evaluationfunctions.py index a5aba0f..5e50ac1 100644 --- a/pystran/evaluationfunctions.py +++ b/pystran/evaluationfunctions.py @@ -41,7 +41,7 @@ class Evaluation(FlowAnalysis): def __init__(self, observed, modelled): FlowAnalysis.__init__(self, observed) - if modelled.shape <> observed.shape: + if modelled.shape != observed.shape: raise Exception('Modelled and observed timeseries need \ to be of the same size') @@ -49,7 +49,7 @@ def __init__(self, observed, modelled): self.residuals = self.observed - self.modelled self.infodict() - print 'Criteria suited for model minimization, use optim=True to be sure' + print('Criteria suited for model minimization, use optim=True to be sure') def infodict(self): ''' @@ -1422,7 +1422,7 @@ class Likelihood(FlowAnalysis): def __init__(self, observed, modelled): FlowAnalysis.__init__(self, observed) - if modelled.shape <> observed.shape: + if modelled.shape != observed.shape: raise Exception('Modelled and observed timeseries need \ to be of the same size') @@ -1430,10 +1430,10 @@ def __init__(self, observed, modelled): self.residuals = self.observed - self.modelled self.infodict() - print 'Criteria suited for Likihood maximization, use optim=True\ + print('Criteria suited for Likihood maximization, use optim=True\ to use in minimzation exercise; Not adviced to use in automated\ minimalization algorithms, because of jumps between 0 and none\ - zero values, use the evaluation class instead; use log version instead' + zero values, use the evaluation class instead; use log version instead') def infodict(self): ''' @@ -1551,7 +1551,7 @@ class evalmodselection(FlowAnalysis): def __init__(self, observed, modelled, npar): FlowAnalysis.__init__(self, observed) - if modelled.shape <> observed.shape: + if modelled.shape != observed.shape: raise Exception('Modelled and observed timeseries need \ to be of the same size') @@ -1560,7 +1560,7 @@ def __init__(self, observed, modelled, npar): self.infodict() self.npar = npar - print 'Criteria suited for statistical model structure selection' + print('Criteria suited for statistical model structure selection') def SSE(self): return sum(self.residuals**2) diff --git a/pystran/latextablegenerator.py b/pystran/latextablegenerator.py index bd16abc..2792873 100644 --- a/pystran/latextablegenerator.py +++ b/pystran/latextablegenerator.py @@ -15,7 +15,7 @@ import os,string,re,sys import types -float_types = [types.FloatType, numpy.float16, numpy.float32, numpy.float64] +float_types = [float, numpy.float16, numpy.float32, numpy.float64] ''' This module provides function for working with significant @@ -26,12 +26,12 @@ def round_sig(x, n): '''round floating point x to n significant figures''' - if type(n) is not types.IntType: - raise TypeError, "n must be an integer" + if type(n) is not int: + raise TypeError("n must be an integer") try: x = float(x) except: - raise TypeError, "x must be a floating point object" + raise TypeError("x must be a floating point object") form = "%0." + str(n-1) + "e" st = form % x num,expo = epat.findall(st)[0] @@ -84,7 +84,7 @@ def format_table(cols, errors, n, labels=None, headers=None, latex=False): an optional list of column headers. If [latex] is true, format the table so that it can be included in a LaTeX table ''' if len(cols) != len(errors): - raise ValueError, "Error: cols and errors must have same length" + raise ValueError("Error: cols and errors must have same length") ncols = len(cols) nrows = len(cols[0]) @@ -96,14 +96,14 @@ def format_table(cols, errors, n, labels=None, headers=None, latex=False): elif len(headers) == ncols+1: pass else: - raise ValueError, "length of headers should be %d" % (ncols+1) + raise ValueError("length of headers should be %d" % (ncols+1)) else: if len(headers) != ncols: - raise ValueError, "length of headers should be %d" % (ncols) + raise ValueError("length of headers should be %d" % (ncols)) if labels is not None: if len(labels) != nrows: - raise ValueError, "length of labels should be %d" % (nrows) + raise ValueError("length of labels should be %d" % (nrows)) strcols = [] for col,error in zip(cols,errors): @@ -117,7 +117,7 @@ def format_table(cols, errors, n, labels=None, headers=None, latex=False): lengths = [max([len(item) for item in strcol]) for strcol in strcols] format = "" if labels is not None: - format += "%%%ds " % (max(map(len, labels))) + format += "%%%ds " % (max(list(map(len, labels)))) if latex: format += "& " for length in lengths: @@ -192,10 +192,10 @@ def __init__(self, numcols, justs=None, fontsize=None, rotate=False, else: self.justs = list(justs) if len(self.justs) != numcols: - raise ValueError, "Error, justs must have %d elements" % (numcols) + raise ValueError("Error, justs must have %d elements" % (numcols)) for just in self.justs: if just not in ['c','r','l']: - raise ValueError, "Error, invalid character for just: %s" % just + raise ValueError("Error, invalid character for just: %s" % just) self.fontsize = fontsize self.rotate = rotate self.tablewidth = tablewidth @@ -224,21 +224,21 @@ def add_header_row(self, headers, cols=None): if cols is None: if len(headers) != self.numcols: - raise ValueError, "Error, headers must be a list of length %d" %\ - self.numcols + raise ValueError("Error, headers must be a list of length %d" %\ + self.numcols) self.headers.append(headers) - self.header_ids.append(range(self.numcols)) + self.header_ids.append(list(range(self.numcols))) else: ids = [] for item in cols: - if type(item) is types.IntType: + if type(item) is int: ids.append(item) - elif type(item) is types.TupleType: - ids += range(item[0],item[1]+1) + elif type(item) is tuple: + ids += list(range(item[0],item[1]+1)) ids.sort - if ids != range(self.numcols): - raise ValueError, "Error, missing columns in cols" + if ids != list(range(self.numcols)): + raise ValueError("Error, missing columns in cols") self.headers.append(headers) self.header_ids.append(cols) return @@ -255,29 +255,27 @@ def add_data(self, data, label="", sigfigs=2, labeltype='cutin'): given, it will be printed in the table with \cutinhead if labeltype is 'cutin' or \sidehead if labeltype is 'side'.''' - if type(data) is not types.ListType: - raise ValueError, "data should be a list" + if type(data) is not list: + raise ValueError("data should be a list") if len(data) != self.numcols: - raise ValueError, \ - "Error, length of data mush match number of table columns" + raise ValueError("Error, length of data mush match number of table columns") for datum in data: - if type(datum) not in [types.ListType, numpy.ndarray]: - raise ValueError, "data must be list of lists and numpy arrays" + if type(datum) not in [list, numpy.ndarray]: + raise ValueError("data must be list of lists and numpy arrays") if len(numpy.shape(datum)) not in [1,2]: - raise ValueError, "data items must be 1D or 2D" + raise ValueError("data items must be 1D or 2D") nrows = numpy.shape(data[0])[0] for datum in data[1:]: if numpy.shape(datum)[0] != nrows: - raise ValueError, "each data item must have same first dimension" + raise ValueError("each data item must have same first dimension") self.nrows.append(nrows) if len(numpy.shape(sigfigs)) == 0: self.sigfigs.append([sigfigs for i in range(self.numcols)]) else: if len(numpy.shape(sigfigs)) != 1: - raise ValueError, \ - "sigfigs must be scalar or have same length as number of columns" + raise ValueError("sigfigs must be scalar or have same length as number of columns") self.sigfigs.append(sigfigs) self.data_labels.append(label) self.data_label_types.append(labeltype) diff --git a/pystran/optimization_sce.py b/pystran/optimization_sce.py index 83a9ea2..e367176 100644 --- a/pystran/optimization_sce.py +++ b/pystran/optimization_sce.py @@ -64,12 +64,12 @@ def cceua(s, sf, bl, bu, icall, maxn, ibound=0 s1=snew-bl idx=(s1<0).nonzero() - if idx[0].size <> 0: + if idx[0].size != 0: ibound=1 s1=bu-snew idx=(s1<0).nonzero() - if idx[0].size <> 0: + if idx[0].size != 0: ibound=2 if ibound >= 1: @@ -202,26 +202,26 @@ def sceua(x0, bl, bu, maxn, kstop, pcento, peps, ngs, iseed, # Computes the normalized geometric range of the parameters gnrng=np.exp(np.mean(np.log((np.max(x,axis=0)-np.min(x,axis=0))/bound))) - print 'The Initial Loop: 0' - print ' BESTF: %f ' %bestf - print ' BESTX: ' - print bestx - print ' WORSTF: %f ' %worstf - print ' WORSTX: ' - print worstx - print ' ' + print('The Initial Loop: 0') + print(' BESTF: %f ' %bestf) + print(' BESTX: ') + print(bestx) + print(' WORSTF: %f ' %worstf) + print(' WORSTX: ') + print(worstx) + print(' ') # Check for convergency; if icall >= maxn: - print '*** OPTIMIZATION SEARCH TERMINATED BECAUSE THE LIMIT' - print 'ON THE MAXIMUM NUMBER OF TRIALS ' - print maxn - print 'HAS BEEN EXCEEDED. SEARCH WAS STOPPED AT TRIAL NUMBER:' - print icall - print 'OF THE INITIAL LOOP!' + print('*** OPTIMIZATION SEARCH TERMINATED BECAUSE THE LIMIT') + print('ON THE MAXIMUM NUMBER OF TRIALS ') + print(maxn) + print('HAS BEEN EXCEEDED. SEARCH WAS STOPPED AT TRIAL NUMBER:') + print(icall) + print('OF THE INITIAL LOOP!') if gnrng < peps: - print 'THE POPULATION HAS CONVERGED TO A PRESPECIFIED SMALL PARAMETER SPACE' + print('THE POPULATION HAS CONVERGED TO A PRESPECIFIED SMALL PARAMETER SPACE') # Begin evolution loops: nloop = 0 @@ -237,7 +237,7 @@ def sceua(x0, bl, bu, maxn, kstop, pcento, peps, ngs, iseed, cx=np.zeros((npg,nopt)) cf=np.zeros((npg)) - k1=np.array(range(npg)) + k1=np.array(list(range(npg))) k2=k1*ngs+igs cx[k1,:] = x[k2,:] cf[k1] = xf[k2] @@ -314,24 +314,24 @@ def sceua(x0, bl, bu, maxn, kstop, pcento, peps, ngs, iseed, # Computes the normalized geometric range of the parameters gnrng=np.exp(np.mean(np.log((np.max(x,axis=0)-np.min(x,axis=0))/bound))) - print 'Evolution Loop: %d - Trial - %d' %(nloop,icall) - print ' BESTF: %f ' %bestf - print ' BESTX: ' - print bestx - print ' WORSTF: %f ' %worstf - print ' WORSTX: ' - print worstx - print ' ' + print('Evolution Loop: %d - Trial - %d' %(nloop,icall)) + print(' BESTF: %f ' %bestf) + print(' BESTX: ') + print(bestx) + print(' WORSTF: %f ' %worstf) + print(' WORSTX: ') + print(worstx) + print(' ') # Check for convergency; if icall >= maxn: - print '*** OPTIMIZATION SEARCH TERMINATED BECAUSE THE LIMIT' - print 'ON THE MAXIMUM NUMBER OF TRIALS ' - print maxn - print 'HAS BEEN EXCEEDED.' + print('*** OPTIMIZATION SEARCH TERMINATED BECAUSE THE LIMIT') + print('ON THE MAXIMUM NUMBER OF TRIALS ') + print(maxn) + print('HAS BEEN EXCEEDED.') if gnrng < peps: - print 'THE POPULATION HAS CONVERGED TO A PRESPECIFIED SMALL PARAMETER SPACE' + print('THE POPULATION HAS CONVERGED TO A PRESPECIFIED SMALL PARAMETER SPACE') criter=np.append(criter,bestf) @@ -339,13 +339,13 @@ def sceua(x0, bl, bu, maxn, kstop, pcento, peps, ngs, iseed, criter_change= np.abs(criter[nloop-1]-criter[nloop-kstop])*100 criter_change= criter_change/np.mean(np.abs(criter[nloop-kstop:nloop])) if criter_change < pcento: - print 'THE BEST POINT HAS IMPROVED IN LAST %d LOOPS BY LESS THAN THE THRESHOLD %f' %(kstop,pcento) - print 'CONVERGENCY HAS ACHIEVED BASED ON OBJECTIVE FUNCTION CRITERIA!!!' + print('THE BEST POINT HAS IMPROVED IN LAST %d LOOPS BY LESS THAN THE THRESHOLD %f' %(kstop,pcento)) + print('CONVERGENCY HAS ACHIEVED BASED ON OBJECTIVE FUNCTION CRITERIA!!!') # End of the Outer Loops - print 'SEARCH WAS STOPPED AT TRIAL NUMBER: %d' %icall - print 'NORMALIZED GEOMETRIC RANGE = %f' %gnrng - print 'THE BEST POINT HAS IMPROVED IN LAST %d LOOPS BY %f' %(kstop,criter_change) + print('SEARCH WAS STOPPED AT TRIAL NUMBER: %d' %icall) + print('NORMALIZED GEOMETRIC RANGE = %f' %gnrng) + print('THE BEST POINT HAS IMPROVED IN LAST %d LOOPS BY %f' %(kstop,criter_change)) #reshape BESTX BESTX=BESTX.reshape(BESTX.size/nopt,nopt) diff --git a/pystran/parameter.py b/pystran/parameter.py index 4321a76..fd9b750 100644 --- a/pystran/parameter.py +++ b/pystran/parameter.py @@ -301,7 +301,7 @@ def latinhypercube(self, nruns): for j in range(0, nruns): pranges.append(random.uniform(low + j*delta, low + (j + 1)*delta)) - s = range(0, nruns) + s = list(range(0, nruns)) result = [] for i in range(0, nruns): added = random.sample(s, 1)[0] @@ -359,13 +359,13 @@ def sobol(parsin, nruns, seed = 1): ndim = len(parsin) pars = np.zeros((nruns, ndim)) - for i in xrange(1, nruns+1): + for i in range(1, nruns+1): [r, seed_out] = i4_sobol(ndim, seed) pars[i-1, :] = r seed = seed_out for i in range(ndim): pars[:, i] = rescale(pars[:,i], parsin[i].min, parsin[i].max) - print 'The seed to continue this sampling procedure is', seed,'.' - print 'If you do not update the seed for extra samples, the samples will \ - be the same!' + print('The seed to continue this sampling procedure is', seed,'.') + print('If you do not update the seed for extra samples, the samples will \ + be the same!') return pars diff --git a/pystran/plot_functions_rev.py b/pystran/plot_functions_rev.py index bdc76be..4e18b30 100644 --- a/pystran/plot_functions_rev.py +++ b/pystran/plot_functions_rev.py @@ -251,25 +251,25 @@ def scatterplot_matrix(data1, plottext=None, data2 = False, limin = False, limax1=np.around(np.max(data1[i]),decimals = dec1) limin2=np.around(np.min(data2[i]),decimals = dec2) limax2=np.around(np.max(data2[i]),decimals = dec2) - print dec2 + print(dec2) limin.append(min(limin1,limin2)) limax.append(max(limax1,limax2)) if np.abs(limin1 - limin2) > min(limin1,limin2): - print np.abs(limin1 - limin2), min(limin1,limin2),'min' - print 'potentially the datalimits of two datasets are \ - too different for presenting results' + print(np.abs(limin1 - limin2), min(limin1,limin2),'min') + print('potentially the datalimits of two datasets are \ + too different for presenting results') if np.abs(limax1 - limax2) > min(limax1,limax2): - print np.abs(limax1 - limax2), min(limax1,limax2),'max' - print 'potentially the datalimits of two datasets are\ - too different for acceptabel results' + print(np.abs(limax1 - limax2), min(limax1,limax2),'max') + print('potentially the datalimits of two datasets are\ + too different for acceptabel results') else: dec1 = definedec(np.min(data1[i]),np.max(data1[i])) limin.append(np.around(np.min(data1[i]),decimals = dec1)) limax.append(np.around(np.max(data1[i]),decimals = dec1)) - print 'used limits are', limin,'and', limax + print('used limits are', limin,'and', limax) else: - print 'used limits are', limin,'and', limax + print('used limits are', limin,'and', limax) # Plot the data. for i, j in zip(*np.triu_indices_from(axes, k=1)): @@ -369,17 +369,17 @@ def scatterplot_matrix(data1, plottext=None, data2 = False, limin = False, facecolor = 'none', bins=20, edgecolor=str(cls[ig]), linewidth = 1.5) axes[i,i].set_xlim(limin[i],limax[i]) - print limin[i],limax[i] + print(limin[i],limax[i]) else: axes[i,i].hist(data1[i],bins=20,color='k') axes[i,i].set_xlim(limin[i],limax[i]) - print limin[i],limax[i] + print(limin[i],limax[i]) if plothist: - print 'plottext is not added' + print('plottext is not added') # Turn on the proper x or y axes ticks. - for i, j in zip(range(numvars), itertools.cycle((-1, 0))): + for i, j in zip(list(range(numvars)), itertools.cycle((-1, 0))): axes[j,i].xaxis.set_visible(True) axes[i,j].yaxis.set_visible(True) @@ -763,8 +763,8 @@ def Scatter_hist(data1, data2, data1b=False, data2b=False, binwidth = 0.5, # the scatter plot: if isinstance(data1b, np.ndarray): - print '*args, **kwargs do not have any influcence when using two\ - options' + print('*args, **kwargs do not have any influcence when using two\ + options') axScatter.scatter(data1, data2, facecolor = 'none', edgecolor='k',s=25) axScatter.scatter(data1b, data2b, facecolor='none', @@ -1009,8 +1009,8 @@ def Scatter_hist_withOF(data1, data2, data1b=False, data2b=False, xbinwidth = 0. binsyb = np.arange(yminb,ymaxb+ybinwidth_abs,ybinwidth_abs) if SSE == None: - print '*args, **kwargs do not have any influence when using two\ - options' + print('*args, **kwargs do not have any influence when using two\ + options') sc1 = axScatter.scatter(data1, data2, facecolor = 'none', edgecolor='k',s=25) axScatter.scatter(data1b, data2b, facecolor='none', @@ -1028,8 +1028,8 @@ def Scatter_hist_withOF(data1, data2, data1b=False, data2b=False, xbinwidth = 0. edgecolor='None', color='grey', normed=True, alpha = roodlichter) else: - print '*args, **kwargs do not have any influence when using two\ - options' + print('*args, **kwargs do not have any influence when using two\ + options') sc1 = axScatter.scatter(data1, data2, c=SSE, vmin=vmin, vmax=vmax, edgecolors= 'none', cmap = colormaps, *args, **kwargs) @@ -1194,7 +1194,7 @@ def TornadoSensPlot(parnames, parvals, gridbins=4, midwidth=0.5, # --- Negative effects --- left if not negval.any(): - print 'no negative sensitivities; axis are made equal' + print('no negative sensitivities; axis are made equal') setequal = True axleft = fig.add_subplot(121) @@ -1244,7 +1244,7 @@ def TornadoSensPlot(parnames, parvals, gridbins=4, midwidth=0.5, # --- Positive effects --- if not posval.any(): - print 'no positive sensitivities; axis are made equal' + print('no positive sensitivities; axis are made equal') setequal = True axright = fig.add_subplot(122, sharey=axleft) @@ -1352,7 +1352,7 @@ def plotbar(ax1, values, names, width = 0.5, addval = True, sortit = False, ax1.spines['bottom'].set_position('zero') bwidth = width - xlocations = np.array(range(len(values))) + 0.25 + xlocations = np.array(list(range(len(values)))) + 0.25 ax1.bar(xlocations, values, width = bwidth, *args, **kwargs) if addval == True: @@ -1457,7 +1457,7 @@ def plothbar(ax1, values, names, width = 0.5, addval = True, sortit = False, ax1.spines['bottom'].set_position('zero') bwidth = width - ylocations = np.array(range(len(values))) + 0.25 + ylocations = np.array(list(range(len(values)))) + 0.25 ax1.barh(ylocations, values, height = width, *args, **kwargs) if addval == True: @@ -1709,7 +1709,7 @@ def interactionplot(values, names, lwidth = 2.): plt.pcolor(values, cmap=cm.gray_r, edgecolors='k', norm = Normalize(), linewidths=lwidth) plt.colorbar(pad = 0.10) - xlocations = np.array(range(nsize)) + 0.5 + xlocations = np.array(list(range(nsize))) + 0.5 ax1.yaxis.tick_right() plt.xticks(xlocations, names, rotation = 30) plt.yticks(xlocations, names, rotation = 30) #, size='small' diff --git a/pystran/sensitivity_base.py b/pystran/sensitivity_base.py index e867314..c23cdde 100644 --- a/pystran/sensitivity_base.py +++ b/pystran/sensitivity_base.py @@ -27,19 +27,19 @@ def print_methods(): """ Overview of the used methods """ - print '''1. Sobol Variance Based: - first and total order''' - print '''2. Regional Sensitivity Analysis: - also called Monte Carlo Filtering''' - print '''3. Morris Screening Method: - with pre-optimized defined trajects and group option''' - print '''4. Sampled-OAT: - Latin HYpercube or Sobol sampling with OAT sensitivity''' - print '''5. Standardized Regression Coefficients: - Latin HYpercube or Sobol sampling with linear regression''' - print '''6. DYNamic Identifiability Analysis: + print('''1. Sobol Variance Based: + first and total order''') + print('''2. Regional Sensitivity Analysis: + also called Monte Carlo Filtering''') + print('''3. Morris Screening Method: + with pre-optimized defined trajects and group option''') + print('''4. Sampled-OAT: + Latin HYpercube or Sobol sampling with OAT sensitivity''') + print('''5. Standardized Regression Coefficients: + Latin HYpercube or Sobol sampling with linear regression''') + print('''6. DYNamic Identifiability Analysis: Latin HYpercube or Sobol sampling with time-sliced based - evaluation''' + evaluation''') def __init__(self, parsin): ''' @@ -50,10 +50,10 @@ def __init__(self, parsin): if isinstance(parsin, dict): #bridge with pyFUSE! dictlist = [] - for value in parsin.itervalues(): + for value in parsin.values(): dictlist.append(value) parsin = dictlist - print parsin + print(parsin) #control for other self._parsin = parsin[:] @@ -63,7 +63,7 @@ def __init__(self, parsin): for i in range(len(parsin)): if isinstance(parsin[i], ModPar): cname = parsin[i].name - if cname in self._parmap.values(): + if cname in list(self._parmap.values()): raise ValueError("Duplicate parameter name %s"%cname) self.pars = parsin[:] self._parsin[i] = (parsin[i].min, parsin[i].max, cname) @@ -80,7 +80,7 @@ def __init__(self, parsin): if not isinstance(parsin[i][2], str): raise Exception('Name of par needs to be string') - if parsin[i][2] in self._parmap.values(): + if parsin[i][2] in list(self._parmap.values()): raise ValueError("Duplicate parameter name %s"%parsin[i][2]) self._parmap[i] = parsin[i][2] @@ -121,14 +121,14 @@ def write_parameter_sets(self, filename = 'inputparameterfile', *args, """ try: np.savetxt(filename, self.parset2run, *args, **kwargs) - print 'file saved in directory %s' % os.getcwd() + print('file saved in directory %s' % os.getcwd()) except PystanSequenceError: print('Parameter sets to run model with not yet setup.') def getcurrentmethod(self): """Check if method is defined and return name""" if self._methodname == None: - print "No method defined." + print("No method defined.") else: return self._methodname @@ -183,14 +183,14 @@ def run_pyfuse(self, pyfuse): for i in range(self._ndim): par2pyfuse[self._parmap[i]] = self.parset2run[run, i] #run the pyfuse model with new pars - print self._methodname, 'Run'+str(run) - print 'Simulation %d of %d is \ - running...' % (run + 1, self.parset2run.shape[0]) + print(self._methodname, 'Run'+str(run)) + print('Simulation %d of %d is \ + running...' % (run + 1, self.parset2run.shape[0])) pyfuse.run(new_pars = par2pyfuse, run_id = self._methodname + 'Run' + str(run)) - print 'All simulations are performed and saved in hdf5. You can now \ - transform the output data to an evaluation criterion.' + print('All simulations are performed and saved in hdf5. You can now \ + transform the output data to an evaluation criterion.') def scattercheck(self, parsamples, output, ncols=3, *args, **kwargs): ''' diff --git a/pystran/sensitivity_dynamic.py b/pystran/sensitivity_dynamic.py index 014dad5..ebe990d 100644 --- a/pystran/sensitivity_dynamic.py +++ b/pystran/sensitivity_dynamic.py @@ -55,16 +55,16 @@ def __init__(self, ParsIn, ModelType = 'external'): if ModelType == 'pyFUSE': self.modeltype = 'pyFUSE' - print 'The analysed model is built up by the pyFUSE environment' + print('The analysed model is built up by the pyFUSE environment') elif ModelType == 'external': self.modeltype = 'pyFUSE' - print 'The analysed model is externally run' + print('The analysed model is externally run') elif ModelType == 'PCRaster': self.modeltype = 'PCRasterPython' - print 'The analysed model is a PCRasterPython Framework instance' + print('The analysed model is a PCRasterPython Framework instance') elif ModelType == 'testmodel': self.modeltype = 'testmodel' - print 'The analysed model is a testmodel' + print('The analysed model is a testmodel') else: raise Exception('Not supported model type') diff --git a/pystran/sensitivity_globaloat.py b/pystran/sensitivity_globaloat.py index 80f5c6a..2daaf95 100644 --- a/pystran/sensitivity_globaloat.py +++ b/pystran/sensitivity_globaloat.py @@ -85,16 +85,16 @@ def __init__(self, parsin, ModelType = 'external'): if ModelType == 'pyFUSE': self.modeltype = 'pyFUSE' - print 'The analysed model is built up by the pyFUSE environment' + print('The analysed model is built up by the pyFUSE environment') elif ModelType == 'external': self.modeltype = 'pyFUSE' - print 'The analysed model is externally run' + print('The analysed model is externally run') elif ModelType == 'PCRaster': self.modeltype = 'PCRasterPython' - print 'The analysed model is a PCRasterPython Framework instance' + print('The analysed model is a PCRasterPython Framework instance') elif ModelType == 'testmodel': self.modeltype = 'testmodel' - print 'The analysed model is a testmodel' + print('The analysed model is a testmodel') else: raise Exception('Not supported model type') @@ -160,7 +160,7 @@ def PrepareSample(self, nbaseruns, perturbation_factor, # Par2run = np.zeros((nbaseruns,self._ndim)) - for i in xrange(1, nbaseruns+1): + for i in range(1, nbaseruns+1): [r, seed_out] = i4_sobol(self._ndim, seedin) Par2run[i-1,:] = r seedin = seed_out @@ -169,7 +169,7 @@ def PrepareSample(self, nbaseruns, perturbation_factor, Par2run[:,i] = rescale(Par2run[:,i], FacIn[i][0], FacIn[i][1]) self.seed_out = seed_out - print 'Last seed pointer is ',seed_out + print('Last seed pointer is ',seed_out) elif samplemethod=='lh': self.nbaseruns = nbaseruns @@ -281,7 +281,7 @@ def Calc_sensitivity(self,output): self.CAS_SENS[i,:] = self.CAS[i::self._ndim].mean(axis=0) self.CTRS_SENS[i,:] = self.CTRS[i::self._ndim].mean(axis=0) - print 'Use PE_SENS for ranking purposes; since it uses the absolute value of the change; giving no compensation between positive and negative partial effects' + print('Use PE_SENS for ranking purposes; since it uses the absolute value of the change; giving no compensation between positive and negative partial effects') def Get_ranking(self, choose_output=False): ''' @@ -324,30 +324,30 @@ def Get_ranking(self, choose_output=False): #get it clean in dictionary if RANK.size == RANK.shape[0]: #only one output - print 'Ranking for the singls output' + print('Ranking for the singls output') i=1 self.rankdict={} for rank in RANK: self.rankdict[str(i)] = self._namelist[rank] - print str(i),' : ',self._namelist[rank] + print(str(i),' : ',self._namelist[rank]) i+=1 return self.rankmatrix, self.rankdict else: #multiple outputs self.overall_importance = self.rankmatrix.min(axis=1) if choose_output == False: - print 'Combined ranking, by taking minimum ranking of the parameters over the different outputs' + print('Combined ranking, by taking minimum ranking of the parameters over the different outputs') i=0 for rank in self.overall_importance: - print self._namelist[i],' : ', str(rank) + print(self._namelist[i],' : ', str(rank)) i+=1 else: - print 'Ranking for selected output' + print('Ranking for selected output') i=1 self.rankdict={} for rank in RANK[:,choose_output-1]: self.rankdict[str(i)] = self._namelist[rank] - print str(i),' : ',self._namelist[rank] + print(str(i),' : ',self._namelist[rank]) i+=1 return self.rankmatrix @@ -379,12 +379,12 @@ def latexresults(self, outputnames, name = 'GlobalOATtable.tex'): else: #MULTIPLE outputs for i in range(self.rankmatrix.shape[1]): col.append(self.rankmatrix[:,i].tolist()) - print col + print(col) t.add_data(col, sigfigs=2) #,col3 t.print_table(fout) fout.close() - print 'Latex Results latex table file saved in directory %s'%os.getcwd() + print('Latex Results latex table file saved in directory %s'%os.getcwd()) def txtresults(self, outputnames, name='GlobalOATresults.txt'): @@ -416,7 +416,7 @@ def txtresults(self, outputnames, name='GlobalOATresults.txt'): nstring+=nname fout.write('%s %s \n' %(self._parmap[i],nstring)) fout.close() - print 'txt Results file saved in directory %s'%os.getcwd() + print('txt Results file saved in directory %s'%os.getcwd()) def plotsens(self, indice='PE', width = 0.5, addval = True, sortit = True, outputid = 0, *args, **kwargs): @@ -481,7 +481,7 @@ def plot_rankmatrix(self,outputnames,fontsize=14): ''' if len(outputnames) > 20.: - print 'Consider to split up the outputs to get nicer overview' + print('Consider to split up the outputs to get nicer overview') try: self.rankmatrix except: diff --git a/pystran/sensitivity_morris.py b/pystran/sensitivity_morris.py index c3950c2..1dd97a0 100644 --- a/pystran/sensitivity_morris.py +++ b/pystran/sensitivity_morris.py @@ -129,16 +129,16 @@ def __init__(self, parsin, ModelType = 'external'): if ModelType == 'pyFUSE': self.modeltype = 'pyFUSE' - print 'The analysed model is built up by the pyFUSE environment' + print('The analysed model is built up by the pyFUSE environment') elif ModelType == 'external': self.modeltype = 'pyFUSE' - print 'The analysed model is externally run' + print('The analysed model is externally run') elif ModelType == 'PCRaster': self.modeltype = 'PCRasterPython' - print 'The analysed model is a PCRasterPython Framework instance' + print('The analysed model is a PCRasterPython Framework instance') elif ModelType == 'testmodel': self.modeltype = 'testmodel' - print 'The analysed model is a testmodel' + print('The analysed model is a testmodel') else: raise Exception('Not supported model type') @@ -329,8 +329,8 @@ def Optimized_Groups(self, nbaseruns=500, intervals = 4, noptimized=10, #check the p and Delta value workaround if not intervals%2==0: - print 'It is adviced to use an even number for the p-value, number \ - of intervals, since currently not all levels are explored' + print('It is adviced to use an even number for the p-value, number \ + of intervals, since currently not all levels are explored') if Delta == 'default': self.Delta = intervals/(2.*(intervals-1.)) @@ -520,7 +520,7 @@ def Optimized_diagnostic(self, width = 0.1): fig.suptitle('Optimized sampling') # DimPlots = np.round(NumFact/2) DimPlots = int(np.ceil(NumFact/2.)) -# print hplot.shape +# print(hplot.shape) for i in range(NumFact): ax=fig.add_subplot(DimPlots,2,i+1) # n, bins, patches = ax.hist(hplot[:,i], p, color='k',ec='white') @@ -611,7 +611,7 @@ def Optimized_diagnostic(self, width = 0.1): self.QualMeasure = QualMeasure self.QualOriMeasure = QualOriMeasure - print 'The quality of the sampling strategy changed from %.3f with the old strategy to %.3f for the optimized strategy' %(QualOriMeasure,QualMeasure) + print('The quality of the sampling strategy changed from %.3f with the old strategy to %.3f for the optimized strategy') %(QualOriMeasure,QualMeasure) def Morris_Measure_Groups(self, Output): ''' @@ -659,11 +659,11 @@ def Morris_Measure_Groups(self, Output): try: NumGroups = Group.shape[1] - print '%d Groups are used' %NumGroups + print('%d Groups are used') %NumGroups except: NumGroups = 0 - print 'No Groups are used' - print NumGroups, type(NumGroups) + print('No Groups are used') + print(NumGroups), type(NumGroups) # Delt = p/(2.*(p-1.)) Delt = self.Delta @@ -672,7 +672,7 @@ def Morris_Measure_Groups(self, Output): sizeb=sizea+1 GroupMat=Group GroupMat = GroupMat.transpose() - print NumGroups + print(NumGroups) else: sizea = NumFact sizeb=sizea+1 @@ -764,7 +764,7 @@ def Morris_Measure_Groups(self, Output): self.mu = OutMatrix[:,1] self.sigma = OutMatrix[:,2] #for every output: every factor is a line, columns are mu*,mu and std if self.sigma.shape[0] > self._ndim: - print 'Different outputs are used, so split them in comparing the output, by using outputid' + print('Different outputs are used, so split them in comparing the output, by using outputid') else: self.mustar = OutMatrix[:] @@ -809,7 +809,7 @@ def runTestModel(self, ai): SAmeas_out, OutMatrix = self.Morris_Measure_Groups(output) - print 'Higher values of ai correspond to lower importance of Xi \n' + print('Higher values of ai correspond to lower importance of Xi \n') #Analytical to compare -> G Vi = np.zeros(len(ai)) @@ -824,17 +824,17 @@ def runTestModel(self, ai): Vtot = Vtot * (1+Vi[i]) Vtot = Vtot -1. - print 'Morris gives only qualitive measures of importance, \n' - print 'a correspondance between STi and mustar is expected \n' - print 'and compared here \n' - print ' \n' + print('Morris gives only qualitive measures of importance, \n') + print('a correspondance between STi and mustar is expected \n') + print('and compared here \n') + print(' \n') - print 'Analytical Solution for STi: \n' - print VTi/Vtot - print 'The Morris mu* results: \n' - print self.mustar + print('Analytical Solution for STi: \n') + print(VTi/Vtot) + print('The Morris mu* results: \n') + print(self.mustar) - print 'A barplot is generated...' + print('A barplot is generated...') fig, ax1 = self.plotmustar(width=0.15, ec='grey',fc='grey') ax1.set_title('Morris screening result') fig2 = plt.figure() @@ -855,7 +855,7 @@ def latexresults(self, outputid=0, name = 'Morristable.tex'): name : str.tex output file name; use .tex extension in the name ''' - print 'tex: The %d th output evaluation criterion is used'%(outputid+1) + print('tex: The %d th output evaluation criterion is used')%(outputid+1) mu2use = self.mu[outputid*self._ndim:(outputid+1)*self._ndim] mustar2use = self.mustar[outputid*self._ndim:(outputid+1)*self._ndim] @@ -873,7 +873,7 @@ def latexresults(self, outputid=0, name = 'Morristable.tex'): t.add_data([col1,col2,col3,col4], sigfigs=2) #,col3 t.print_table(fout) fout.close() - print 'Latex Results latex table file saved in directory %s'%os.getcwd() + print('Latex Results latex table file saved in directory %s')%os.getcwd() def txtresults(self, outputid=0, name = 'Morrisresults.txt'): @@ -888,7 +888,7 @@ def txtresults(self, outputid=0, name = 'Morrisresults.txt'): output file name; use .txt extension in the name ''' - print 'txt: The %d th output evaluation criterion is used'%(outputid+1) + print('txt: The %d th output evaluation criterion is used')%(outputid+1) mu2use = self.mu[outputid*self._ndim:(outputid+1)*self._ndim] mustar2use = self.mustar[outputid*self._ndim:(outputid+1)*self._ndim] @@ -902,7 +902,7 @@ def txtresults(self, outputid=0, name = 'Morrisresults.txt'): mustar2use[i], sigma2use[i])) fout.close() - print 'txt Results file saved in directory %s'%os.getcwd() + print('txt Results file saved in directory %s')%os.getcwd() def plotmu(self, width = 0.5, addval = True, sortit = True, outputid = 0, *args, **kwargs): diff --git a/pystran/sensitivity_regression.py b/pystran/sensitivity_regression.py index 1ca4417..1fd43a1 100644 --- a/pystran/sensitivity_regression.py +++ b/pystran/sensitivity_regression.py @@ -81,16 +81,16 @@ def __init__(self, parsin, ModelType = 'external'): if ModelType == 'pyFUSE': self.modeltype = 'pyFUSE' - print 'The analysed model is built up by the pyFUSE environment' + print('The analysed model is built up by the pyFUSE environment') elif ModelType == 'external': self.modeltype = 'pyFUSE' - print 'The analysed model is externally run' + print('The analysed model is externally run') elif ModelType == 'PCRaster': self.modeltype = 'PCRasterPython' - print 'The analysed model is a PCRasterPython Framework instance' + print('The analysed model is a PCRasterPython Framework instance') elif ModelType == 'testmodel': self.modeltype = 'testmodel' - print 'The analysed model is a testmodel' + print('The analysed model is a testmodel') else: raise Exception('Not supported model type') @@ -141,7 +141,7 @@ def PrepareSample(self, nbaseruns, samplemethod='Sobol', seedin=1): self.nbaseruns = nbaseruns # generate a (N,2k) matrix with FacIn = self._parsin - for i in xrange(1, nbaseruns+1): + for i in range(1, nbaseruns+1): [r, seed_out] = i4_sobol(self._ndim, seedin) self.parset2run[i-1,:] = r seedin = seed_out @@ -150,7 +150,7 @@ def PrepareSample(self, nbaseruns, samplemethod='Sobol', seedin=1): self.parset2run[:,i] = rescale(self.parset2run[:,i], FacIn[i][0], FacIn[i][1]) self.seed_out = seed_out - print 'Last seed pointer is ',seed_out + print('Last seed pointer is ',seed_out) elif samplemethod=='lh': self.nbaseruns = nbaseruns @@ -193,19 +193,19 @@ def _standardize(self,output): To get the Standardized Regression Coefficients (SRC), we need to standardize the variables (outputs and parameters) ''' - print 'calculating standardized values...' + print('calculating standardized values...') parmean, parstd = self.parset2run.mean(axis=0), self.parset2run.std(axis=0) outmean, outstd = output.mean(axis=0), output.std(axis=0) self.parscaled = (self.parset2run - parmean)/parstd self.outputscaled = (output - outmean)/outstd - print '...done' + print('...done') def _transform2rank(self, pars, output): ''' hidden definition for rank transformation ''' - print 'calclulating standardized values...' + print('calclulating standardized values...') parranked = np.empty_like(pars) for i in range(pars.shape[1]): @@ -266,8 +266,8 @@ def Calc_SRC(self,output, rankbased = False): #calcluate SRC values for each output for i in range(output.shape[1]): - print '--------------------------' - print 'Working on column ',i,'...' + print('--------------------------') + print('Working on column ',i,'...') #the res is the sum(res**2) value; functional for covariance calculation self.SRC[:,i], res, rank, s = np.linalg.lstsq(self.parscaled,self.outputscaled[:,i]) if rankbased == True: @@ -277,30 +277,30 @@ def Calc_SRC(self,output, rankbased = False): Yi = np.dot(self.parscaled,self.SRC[:,i]) #matrix multiplication R = np.corrcoef(self.outputscaled[:,i],Yi) Rsq = R**2 - print 'Rsq (for SRC calculation) = ', Rsq[0,1] + print('Rsq (for SRC calculation) = ', Rsq[0,1]) #another possibility to get Rsq based on residuals (OLS theory) #Rsq_2 = 1. - res / sum((self.outputscaled - self.outputscaled.mean())**2) #The 0.7 threshold is a rule of thumb used in literature if Rsq[0,1] < 0.7: - print '''ATTENTION: the coefficient of determination, Rsq, i.e. the fraction of the output variance that is explained by the regression model, is lower than 0.7. for SRC calcluation. Consider using a method which is less dependent on the assumption of linearity and evaluate SRRC result.''' + print('''ATTENTION: the coefficient of determination, Rsq, i.e. the fraction of the output variance that is explained by the regression model, is lower than 0.7. for SRC calcluation. Consider using a method which is less dependent on the assumption of linearity and evaluate SRRC result.''') else: - print '''Assumption of linearity is assumed valid with the Rsq value higer than 0.7''' + print('''Assumption of linearity is assumed valid with the Rsq value higer than 0.7''') if rankbased == True: Yi = np.dot(self.parrankscaled,self.SRRC[:,i]) #matrix multiplication R = np.corrcoef(self.outputrankscaled[:,i],Yi) Rsq = R**2 - print 'Rsq (for SRRC calculation) = ',Rsq[0,1] + print('Rsq (for SRRC calculation) = ',Rsq[0,1]) #another check: sum of the SRC^2 should be 1!! self.sumcheck[i] = np.dot(self.SRC[:,i].transpose(),self.SRC[:,i]) - print 'Sum of squared sensitivities should approach 1, for SRC: ',self.sumcheck[i] + print('Sum of squared sensitivities should approach 1, for SRC: ',self.sumcheck[i]) if rankbased == True: sumcheck = np.dot(self.SRRC[:,i].transpose(),self.SRRC[:,i]) - print 'Sum of squared sensitivities should approach 1, for SRRC: ',sumcheck + print('Sum of squared sensitivities should approach 1, for SRRC: ',sumcheck) #Calculates the Parameter variance-covariance matrix #variances on the diagonal, covariances of factors on the non-diagonal @@ -309,14 +309,14 @@ def Calc_SRC(self,output, rankbased = False): s2 = res/(n-p) #estimator for variance; better to do n-p-1? self.cova[:,:,i] = s2 * np.linalg.inv(np.dot(self.parscaled.transpose(),self.parscaled)) #(X'X)-1 s2 - print 'Confidence intervals can be calculated based on covariance matrix, only done for SRC' + print('Confidence intervals can be calculated based on covariance matrix, only done for SRC') #calculate the correlation matrix for k in range(p): for l in range(p): self.corre[k,l,i] = self.cova[k,l,i]/(np.sqrt(self.cova[k,k,i])*np.sqrt(self.cova[l,l,i])) - print 'output of column ',i,' done.' - print '--------------------------' + print('output of column ',i,' done.') + print('--------------------------') #combine results in a ranking RANK = np.argsort(-self.SRC,axis=0) @@ -451,7 +451,7 @@ def plot_SRC(self, width = 0.2, addval = True, sortit = True, outputid = 'all', ax.set_axis_off() i+=1 else: - print 'SRC values of output ',outputid,' is shown in graph' + print('SRC values of output ',outputid,' is shown in graph') fig = plt.figure() ax1 = fig.add_subplot(111) ax1 = plotbar(ax1, self.SRC[:,outputid], self._namelist, width = width, @@ -500,12 +500,12 @@ def latexresults(self, outputnames, rank = False, name = 'SRCtable.tex'): else: #MULTIPLE outputs for i in range(self.rankmatrix.shape[1]): col.append(towrite[:,i].tolist()) - print col + print(col) t.add_data(col, sigfigs=2) #,col3 t.print_table(fout) fout.close() - print 'Latex Results latex table file saved in directory %s'%os.getcwd() + print('Latex Results latex table file saved in directory %s'%os.getcwd()) def txtresults(self, outputnames, rank = False, name = 'SRCresults.txt'): ''' @@ -546,7 +546,7 @@ def txtresults(self, outputnames, rank = False, name = 'SRCresults.txt'): nstring+=nname fout.write('%s %s \n' %(self._parmap[i],nstring)) fout.close() - print 'txt Results file saved in directory %s'%os.getcwd() + print('txt Results file saved in directory %s'%os.getcwd()) diff --git a/pystran/sensitivity_rsa.py b/pystran/sensitivity_rsa.py index e5b9cf4..a99fb9d 100644 --- a/pystran/sensitivity_rsa.py +++ b/pystran/sensitivity_rsa.py @@ -80,16 +80,16 @@ def __init__(self, parsin, ModelType = 'pyFUSE'): if ModelType == 'pyFUSE': self.modeltype = 'pyFUSE' - print 'The analysed model is built up by the pyFUSE environment' + print('The analysed model is built up by the pyFUSE environment') elif ModelType == 'external': self.modeltype = 'pyFUSE' - print 'The analysed model is externally run' + print('The analysed model is externally run') elif ModelType == 'PCRaster': self.modeltype = 'PCRasterPython' - print 'The analysed model is a PCRasterPython Framework instance' + print('The analysed model is a PCRasterPython Framework instance') elif ModelType == 'testmodel': self.modeltype = 'testmodel' - print 'The analysed model is a testmodel' + print('The analysed model is a testmodel') else: raise Exception('Not supported model type') @@ -136,7 +136,7 @@ def PrepareSample(self, nbaseruns, seedin=1): Par2run = np.zeros((nbaseruns,self._ndim)) - for i in xrange(1, nbaseruns+1): + for i in range(1, nbaseruns+1): [r, seed_out] = i4_sobol(self._ndim, seedin) Par2run[i-1,:] = r seedin = seed_out @@ -222,7 +222,7 @@ def select_behavioural(self, output, method='treshold', threshold=0.0, InputPar_Behav=np.delete(InputPar, indBad, 0) else: - print ' Choose appropriate method: treshold or percentage' + print(' Choose appropriate method: treshold or percentage') #Normaliseren van de Objectieffunctie 1! if norm==True: diff --git a/pystran/sensitivity_sobol.py b/pystran/sensitivity_sobol.py index 43414d7..6b83081 100644 --- a/pystran/sensitivity_sobol.py +++ b/pystran/sensitivity_sobol.py @@ -80,16 +80,16 @@ def __init__(self, parsin, ModelType = 'pyFUSE'): if ModelType == 'pyFUSE': self.modeltype = 'pyFUSE' - print 'The analysed model is built up by the pyFUSE environment' + print('The analysed model is built up by the pyFUSE environment') elif ModelType == 'external': self.modeltype = 'pyFUSE' - print 'The analysed model is externally run' + print('The analysed model is externally run') elif ModelType == 'PCRaster': self.modeltype = 'PCRasterPython' - print 'The analysed model is a PCRasterPython Framework instance' + print('The analysed model is a PCRasterPython Framework instance') elif ModelType == 'testmodel': self.modeltype = 'testmodel' - print 'The analysed model is a testmodel' + print('The analysed model is a testmodel') else: raise Exception('Not supported model type') @@ -142,7 +142,7 @@ def Sobolsampling(self, nbaseruns, seedin=1): Par2run = np.zeros((nbaseruns,ndim2)) self.Par2run = np.zeros((nbaseruns,ndim2)) - for i in xrange(1, nbaseruns+1): + for i in range(1, nbaseruns+1): [r, seed_out] = i4_sobol(ndim2, seedin) Par2run[i-1,:] = r seedin = seed_out @@ -174,7 +174,7 @@ def conditional_sampling(self, nbaseruns, cond_dict): #HERE THE CONDITIONAL PART NEEDS TO BE ADDED #conservative strategy: sample random untill enough with the conditions are found cnt = 0 - print 'get samples...' + print('get samples...') while cnt < nbaseruns: newset_a = np.zeros(self._ndim) newset_b = np.zeros(self._ndim) @@ -241,9 +241,9 @@ def SobolVariancePre(self, nbaseruns, seed = 1, repl = 1, self.nbaseruns = nbaseruns self.totalruns = nbaseruns*(2 + self._ndim)*repl self.startedseed = seed - print self.startedseed + print(self.startedseed) - print 'The total cost of the analysis well be %d Monte Carlo Runs' %(nbaseruns*(2+self._ndim)*repl) + print('The total cost of the analysis well be %d Monte Carlo Runs' %(nbaseruns*(2+self._ndim)*repl)) #Set up the matrices #--------------------- @@ -262,7 +262,7 @@ def SobolVariancePre(self, nbaseruns, seed = 1, repl = 1, # But implementation cited elsewhere sAB = self.Par2run - print sAB.shape + print(sAB.shape) Aall = sAB[:,:self._ndim] Ball = sAB[:,self._ndim:] @@ -281,7 +281,7 @@ def SobolVariancePre(self, nbaseruns, seed = 1, repl = 1, # print self.Ctorun.shape if self.repl > 1: - print self.repl,' replications of analysis are used' + print(self.repl,' replications of analysis are used') for i in range(1,repl): A = Aall[i*self.nbaseruns : (i+1)*self.nbaseruns] B = Ball[i*self.nbaseruns : (i+1)*self.nbaseruns] @@ -299,9 +299,9 @@ def SobolVariancePre(self, nbaseruns, seed = 1, repl = 1, self.parset2run = Ctorun self.totalnumberruns = self.parset2run.shape[0] - print self.Ctorun.shape + print(self.Ctorun.shape) #Model needs to run for everyline of the returned matrix - print 'The parameter sets to calculate the model are stored in self.parset2run and can be extracted' + print('The parameter sets to calculate the model are stored in self.parset2run and can be extracted') def SobolVariancePost(self, output, repl = 1, adaptedbaserun = None, forevol=False): @@ -341,7 +341,7 @@ def SobolVariancePost(self, output, repl = 1, adaptedbaserun = None, forevol=Fal # if forevol == False: # raise Exception('Sobol variance evaluation considers only 1 ouput value (1D arrays)') - if repl <> self.repl: + if repl != self.repl: raise Exception('Control if your number of replicates is correct,\ since it does not agree with the saved number') #Needed for the vonvergense test @@ -452,22 +452,22 @@ def runTestModel(self, model, inputsmod, repl = 1): for j in range(repl): MAESTi[j] = np.sum(np.abs(VTi/Vtot-self.STi[j,:])) - print 'Analytical Solution for Si: \n' - print Vi/Vtot - print 'Sobol solution for Si: \n' - print self.Si - print 'Mean Absolute Error of Si is:', MAESi.mean() - print ' \n Anaytical Solution for STi:' - print VTi/Vtot - print 'Sobol solution for STi: \n' - print self.STi - print 'Mean Absolute Error of STi is:', MAESTi.mean() - print 'Sobol solution for STij: \n' - print self.STij + print('Analytical Solution for Si: \n') + print(Vi/Vtot) + print('Sobol solution for Si: \n') + print(self.Si) + print('Mean Absolute Error of Si is:', MAESi.mean()) + print(' \n Anaytical Solution for STi:') + print(VTi/Vtot) + print('Sobol solution for STi: \n') + print(self.STi) + print('Mean Absolute Error of STi is:', MAESTi.mean()) + print('Sobol solution for STij: \n') + print(self.STij) elif model == 'analgstarfunc': if repl > 1: - print 'Caution: replicates not supported on MAE calculation, results not represent' + print('Caution: replicates not supported on MAE calculation, results not represent') ai = inputsmod[0] alphai = inputsmod[1] di = inputsmod[2] @@ -487,11 +487,11 @@ def runTestModel(self, model, inputsmod, repl = 1): Vtot = Vtot * (1+Vi[i]) Vtot = Vtot -1. - print 'Anaytical Solution for Si:' - print Vi/Vtot - print 'Sobol solution for Si:' - print self.Si - print 'Mean Absolute Error for Si is:', np.sum(np.abs(Vi/Vtot-self.Si)) + print('Anaytical Solution for Si:') + print(Vi/Vtot) + print('Sobol solution for Si:') + print(self.Si) + print('Mean Absolute Error for Si is:', np.sum(np.abs(Vi/Vtot-self.Si))) else: raise Exception('Use analgfunc or analgstarfunc') @@ -506,7 +506,7 @@ def latexresults(self, name = 'Soboltable.tex'): output file name; use .tex extension in the name ''' if self.repl > 1: - print 'Table generates only output of first line!' + print('Table generates only output of first line!') fout = open(name,'w') t = Table(3, justs='lcc', caption='First order and Total sensitivity index', label="tab:sobol1tot") @@ -522,7 +522,7 @@ def latexresults(self, name = 'Soboltable.tex'): t.add_data([col1,col2,col3], sigfigs=2) #,col3 t.print_table(fout) fout.close() - print 'Results latex table file saved in directory %s'%os.getcwd() + print('Results latex table file saved in directory %s'%os.getcwd()) def txtresults(self, name = 'Sobolresults.txt'): @@ -536,7 +536,7 @@ def txtresults(self, name = 'Sobolresults.txt'): ''' if self.repl > 1: - print 'Table generates only output of first line!' + print('Table generates only output of first line!') fout = open(name,'w') fout.write('Par \t Si \t STi \n') @@ -547,7 +547,7 @@ def txtresults(self, name = 'Sobolresults.txt'): self.STi.sum())) fout.close() - print 'Results file saved in directory %s'%os.getcwd() + print('Results file saved in directory %s'%os.getcwd()) def plotSi(self, width = 0.5, addval = True, sortit = True, @@ -569,7 +569,7 @@ def plotSi(self, width = 0.5, addval = True, sortit = True, ''' if self.repl > 1: - print 'Table generates only output of first line!' + print('Table generates only output of first line!') fig = plt.figure() ax1 = fig.add_subplot(111) @@ -602,7 +602,7 @@ def plotSTi(self, width = 0.5, addval = True, sortit = True, ''' if self.repl > 1: - print 'Table generates only output of first line!' + print('Table generates only output of first line!') fig = plt.figure() ax1 = fig.add_subplot(111) @@ -656,8 +656,8 @@ def sens_evolution(self, output = None, repl=1, labell = -0.07, *args, **kwargs) if repl > 1: raise Exception('Not supported for replicates!') - if output <> None: - print 'alternative output...' + if output != None: + print('alternative output...') self.output2evaluate = output else: self.output2evaluate = self._outputbk diff --git a/pystran/sobol_lib.py b/pystran/sobol_lib.py index 75bc017..d715d18 100644 --- a/pystran/sobol_lib.py +++ b/pystran/sobol_lib.py @@ -161,7 +161,7 @@ def i4_sobol_generate ( m, n, skip ): # Output, real R(M,N), the points. # r=zeros((m,n)) - for j in xrange (1, n+1): + for j in range (1, n+1): seed = skip + j - 2 [ r[0:m,j-1], seed ] = i4_sobol ( m, seed ) return r @@ -245,7 +245,7 @@ def i4_sobol( dim_num, seed ): global seed_save global v - if ( not 'initialized' in globals().keys() ): + if ( not 'initialized' in list(globals().keys()) ): initialized = 0 dim_num_save = -1 @@ -328,17 +328,17 @@ def i4_sobol( dim_num, seed ): # Check parameters. # if ( dim_num < 1 or dim_max < dim_num ): - print 'I4_SOBOL - Fatal error!' - print ' The spatial dimension DIM_NUM should satisfy:' - print ' 1 <= DIM_NUM <= %d'%dim_max - print ' But this input value is DIM_NUM = %d'%dim_num + print('I4_SOBOL - Fatal error!') + print(' The spatial dimension DIM_NUM should satisfy:') + print(' 1 <= DIM_NUM <= %d'%dim_max) + print(' But this input value is DIM_NUM = %d'%dim_num) return dim_num_save = dim_num # # Initialize the remaining rows of V. # - for i in xrange(2 , dim_num+1): + for i in range(2 , dim_num+1): # # The bits of the integer POLY(I) gives the form of polynomial I. # @@ -356,7 +356,7 @@ def i4_sobol( dim_num, seed ): # j = poly[i-1] includ=zeros(m) - for k in xrange(m, 0, -1): + for k in range(m, 0, -1): j2 = math.floor ( j / 2. ) includ[k-1] = (j != 2 * j2 ) j = j2 @@ -364,10 +364,10 @@ def i4_sobol( dim_num, seed ): # Calculate the remaining elements of row I as explained # in Bratley and Fox, section 2. # - for j in xrange( m+1, maxcol+1 ): + for j in range( m+1, maxcol+1 ): newv = v[i-1,j-m-1] l = 1 - for k in xrange(1, m+1): + for k in range(1, m+1): l = 2 * l if ( includ[k-1] ): newv = bitwise_xor ( int(newv), int(l * v[i-1,j-k-1]) ) @@ -376,7 +376,7 @@ def i4_sobol( dim_num, seed ): # Multiply columns of V by appropriate power of 2. # l = 1 - for j in xrange( maxcol-1, 0, -1): + for j in range( maxcol-1, 0, -1): l = 2 * l v[0:dim_num,j-1] = v[0:dim_num,j-1] * l # @@ -406,18 +406,18 @@ def i4_sobol( dim_num, seed ): l = 1 lastq=zeros(dim_num) - for seed_temp in xrange( int(seed_save), int(seed)): + for seed_temp in range( int(seed_save), int(seed)): l = i4_bit_lo0 ( seed_temp ) - for i in xrange(1 , dim_num+1): + for i in range(1 , dim_num+1): lastq[i-1] = bitwise_xor ( int(lastq[i-1]), int(v[i-1,l-1]) ) l = i4_bit_lo0 ( seed ) elif ( seed_save + 1 < seed ): - for seed_temp in xrange( int(seed_save + 1), int(seed) ): + for seed_temp in range( int(seed_save + 1), int(seed) ): l = i4_bit_lo0 ( seed_temp ) - for i in xrange(1, dim_num+1): + for i in range(1, dim_num+1): lastq[i-1] = bitwise_xor ( int(lastq[i-1]), int(v[i-1,l-1]) ) l = i4_bit_lo0 ( seed ) @@ -425,16 +425,16 @@ def i4_sobol( dim_num, seed ): # Check that the user is not calling too many times! # if ( maxcol < l ): - print 'I4_SOBOL - Fatal error!' - print ' Too many calls!' - print ' MAXCOL = %d\n'%maxcol - print ' L = %d\n'%l + print('I4_SOBOL - Fatal error!') + print(' Too many calls!') + print(' MAXCOL = %d\n'%maxcol) + print(' L = %d\n'%l) return # # Calculate the new components of QUASI. # quasi=zeros(dim_num) - for i in xrange( 1, dim_num+1): + for i in range( 1, dim_num+1): quasi[i-1] = lastq[i-1] * recipd lastq[i-1] = bitwise_xor ( int(lastq[i-1]), int(v[i-1,l-1]) ) @@ -501,8 +501,8 @@ def i4_uniform ( a, b, seed ): # Output, integer SEED, the updated seed. # if ( seed == 0 ): - print 'I4_UNIFORM - Fatal error!' - print ' Input SEED = 0!' + print('I4_UNIFORM - Fatal error!') + print(' Input SEED = 0!') seed = math.floor ( seed ) a = round ( a ) From 2c86beaa1af74dd4cfebf3e1fd072dd568283150 Mon Sep 17 00:00:00 2001 From: gbellandi Date: Tue, 11 Feb 2020 15:11:49 +0100 Subject: [PATCH 2/3] Update README.md update readme for py3 --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 6a0122a..8377903 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # pystran -*no longer maintained* +*currently converging to py3* Toolset of dynamical model STRucture ANalysis algorithms From 7db0d23fa0f7b33b99039030cf84661cf0f1a397 Mon Sep 17 00:00:00 2001 From: gbellandi Date: Tue, 11 Feb 2020 16:17:19 +0100 Subject: [PATCH 3/3] Update __init__.py dummy trial --- pystran/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pystran/__init__.py b/pystran/__init__.py index b8b9f02..ffe0115 100644 --- a/pystran/__init__.py +++ b/pystran/__init__.py @@ -8,7 +8,7 @@ import numpy as np import matplotlib.pyplot as plt -#from evaluationfunctions import Evaluation, Likelihood +from evaluationfunctions import Evaluation, Likelihood from sensitivity_base import SensitivityAnalysis from sensitivity_dynamic import DynamicSensitivity