1190 lines
49 KiB
Python
1190 lines
49 KiB
Python
import geopandas as gpd
|
|
from pycen import zh
|
|
from os import path
|
|
|
|
zh = zh()
|
|
|
|
def ident_newsite(df,view=None,rcvmt=10):
|
|
'''
|
|
Identification des nouvelles géometries ou des mises
|
|
à jours des géométries par recouvrement.
|
|
MAJ (1:1) / Remplacement (1:n) / Nouveau (1:0)
|
|
'''
|
|
maj = []
|
|
# Récupération de la couche pelouses_seches en bdd.
|
|
if view is None:
|
|
view = zh.v_zoneshumides()
|
|
# Identification des superpositions new_site/old_site
|
|
df_inters = gpd.sjoin(df,view, how='left')
|
|
del df_inters['index_right']
|
|
news1 = df_inters[df_inters.site_code.isna()].CODE_SITE
|
|
lst_old_site = df_inters.site_code.unique()
|
|
view = view[view.site_code.isin(lst_old_site)].copy()
|
|
view.loc[:,'surf'] = view.area
|
|
# Explosion des MULTIPOLYGONS
|
|
view2 = view.explode(index_parts=True)
|
|
view2['surf'] = view2.area
|
|
|
|
|
|
tmp = gpd.overlay(view,df[['CODE_SITE','geom']],how='intersection',make_valid=True,keep_geom_type=False)
|
|
if isinstance(tmp, gpd.GeoDataFrame) and tmp.geometry.name !='geom':
|
|
tmp.rename_geometry('geom',inplace=True)
|
|
tmp['perc_rcvmt'] = 100*tmp.area/tmp['surf']
|
|
|
|
# Recouvrement avec explosion
|
|
tmp2 = gpd.overlay(view2,df[['CODE_SITE','geom']].explode(index_parts=True),how='intersection',make_valid=True,keep_geom_type=False)
|
|
if isinstance(tmp2, gpd.GeoDataFrame) and tmp2.geometry.name !='geom':
|
|
tmp2.rename_geometry('geom',inplace=True)
|
|
tmp2['perc_rcvmt'] = 100*tmp2.area/tmp2['surf']
|
|
# Identification des sites : MAJ (1:1) / Remplacement (1:n) / Nouveau (1:0)
|
|
# Limite recouvrement = 10%
|
|
# rcvmt_inf = tmp.perc_rcvmt < rcvmt
|
|
# rcvmt_sup = tmp.perc_rcvmt > rcvmt
|
|
# tmp.loc[rcvmt_inf&(~code_dupl)&(~site_dupl)]
|
|
|
|
tmpp = tmp2[tmp2.perc_rcvmt > rcvmt].copy()
|
|
code_dupl = tmpp.CODE_SITE.duplicated(keep=False)
|
|
site_dupl = tmpp.site_code.duplicated(keep=False)
|
|
site_maj = tmpp[(~code_dupl) & (~site_dupl)].CODE_SITE.unique()
|
|
# site_cor = tmpp.loc[(code_dupl) | (site_dupl),['site_code','CODE_SITE']]
|
|
|
|
|
|
maj2 = df[df.CODE_SITE.isin(site_maj)].merge(
|
|
tmpp[['site_code','CODE_SITE','perc_rcvmt']], on=['CODE_SITE'])
|
|
maj2['id_site'] = maj2['site_code']
|
|
|
|
del maj2['site_code']
|
|
if maj : df_maj = gpd.pd.concat([maj,maj2])
|
|
else : df_maj = maj2
|
|
# Isolement des correspondance new_site / old_site_toClose
|
|
orig_maj_all = df_maj.CODE_SITE
|
|
id_maj_all = df_maj.id_site
|
|
df_cor = tmpp.loc[
|
|
(~tmpp.site_code.isin(id_maj_all))&(~tmpp.CODE_SITE.isin(orig_maj_all)),
|
|
['site_code','CODE_SITE']
|
|
]
|
|
df_cor.rename(columns={'site_code':'id_site_old'}, inplace=True)
|
|
df_cor.sort_values('id_site_old',inplace=True)
|
|
df_cor.drop_duplicates(inplace=True)
|
|
COR = df_cor.copy()
|
|
df_cor = {}
|
|
df_cor['cor'] = COR
|
|
df_cor['df'] = df[df.CODE_SITE.isin(COR.CODE_SITE)].sort_values('CODE_SITE').copy()
|
|
# Isolement des nouveaux sites
|
|
df_new = df[(~df.CODE_SITE.isin(orig_maj_all))&(~df.CODE_SITE.isin(df_cor['cor'].CODE_SITE))].sort_values('CODE_SITE').copy()
|
|
|
|
return df_new, df_cor, df_maj.sort_values('CODE_SITE')
|
|
|
|
|
|
|
|
def format_delim(df):
|
|
dict_delim = {
|
|
"présence d'une végétation hygrophile": "présence ou absence d'une végétation hygrophile",
|
|
"présence de sols hydromorphes": "présence ou absence de sols hydromorphes",
|
|
}
|
|
dlm = (df[['CRITERES DELIMITATION']]
|
|
.droplevel(1,axis=1)['CRITERES DELIMITATION']
|
|
.str.split('\n')
|
|
.explode()
|
|
.replace('. - ', '', regex=True)
|
|
.str.strip()
|
|
.str.lower()
|
|
.replace(dict_delim)
|
|
.to_frame('crit_delim')
|
|
.merge((df[['CODE_SITE']]
|
|
.droplevel(1,axis=1)), left_index=True, right_index=True))
|
|
return dlm
|
|
|
|
def format_usg(df):
|
|
USG = (df[['USAGES/PROCESSUS NATURELS']]
|
|
.droplevel(1,axis=1)['USAGES/PROCESSUS NATURELS']
|
|
.str.split('\n')
|
|
.explode()
|
|
.str.strip())
|
|
usg = (USG
|
|
.replace('-.*', '', regex=True)
|
|
.str.strip()
|
|
.astype(int)
|
|
.to_frame('crit_usg')
|
|
.merge((df[['CODE_SITE']]
|
|
.droplevel(1,axis=1)), left_index=True, right_index=True)
|
|
.merge(
|
|
(USG
|
|
.to_frame('activ_hum_autre')
|
|
.loc[USG.str.contains('Autre')]
|
|
.replace('.*-.*.: ', '', regex=True)
|
|
), left_index=True, right_index=True, how='left')
|
|
.merge(
|
|
(USG
|
|
.replace({
|
|
'.*-': '',
|
|
' ': ' '}, regex=True)
|
|
.str.strip()
|
|
.to_frame('remarques')
|
|
.loc[USG.str.contains('Remb|Pât')]
|
|
), left_index=True, right_index=True, how='left')
|
|
)
|
|
return usg
|
|
|
|
def format_reghydro(df):
|
|
|
|
hydro = df['REGIME HYDRIQUE']
|
|
hydro_in = hydro[["Entrée d'eau "]].rename(columns={"Entrée d'eau ":'reg_hydro'})
|
|
hydro_out = hydro[["Sortie d'eau"]].rename(columns={"Sortie d'eau":'reg_hydro'})
|
|
hydro_in['in_out'] = True
|
|
hydro_out['in_out'] = False
|
|
return (gpd.pd.concat([hydro_in,hydro_out])
|
|
.sort_index()
|
|
.set_index('in_out',append=True)['reg_hydro']
|
|
.str.split('\n')
|
|
.explode()
|
|
.str.strip()
|
|
.replace({
|
|
'.*- ': '',
|
|
' / ': '/',
|
|
'Ev':'Év'
|
|
}, regex=True)
|
|
.replace({
|
|
'Nappe':'Nappes',
|
|
'Source':'Sources',
|
|
'Ruissellement':'Ruissellement diffus',
|
|
})
|
|
.to_frame('reg_hydro')
|
|
.reset_index(-1)
|
|
.merge((df[['CODE_SITE']]
|
|
.droplevel(1,axis=1)), left_index=True, right_index=True)
|
|
)
|
|
|
|
def format_subm(df):
|
|
hydro = df['REGIME HYDRIQUE']
|
|
return (hydro[['Fréquence submersion','Etendue submersion']]
|
|
.replace({
|
|
'Fréquence submersion':{
|
|
'Inconnue':'Inconnu',
|
|
'Régulière':'Régulièrement submergé',
|
|
'Toujours':'Toujours submergé',
|
|
'Saisonnière':'Régulièrement submergé',
|
|
'.*- ': '',
|
|
},
|
|
'Etendue submersion':{
|
|
'Inconnue':'Inconnu',
|
|
'Partielle':'Partiellement submergé',
|
|
'Totale':'Totalement submergé',
|
|
'Saisonnière':'Inconnu',
|
|
'.*- ': '',
|
|
}
|
|
},regex=True)
|
|
.rename(columns={
|
|
'Fréquence submersion':'sub_freq',
|
|
'Etendue submersion':'sub_etend',
|
|
})
|
|
.merge((df[['CODE_SITE']]
|
|
.droplevel(1,axis=1)), left_index=True, right_index=True))
|
|
|
|
def format_cnx(df):
|
|
return (df[['CONNEXION ZH ENVIRONNEMENT']]
|
|
.droplevel(1,axis=1)
|
|
.rename(columns={'CONNEXION ZH ENVIRONNEMENT':'id_param_connect'})
|
|
.astype(int)
|
|
.merge((df[['CODE_SITE']]
|
|
.droplevel(1,axis=1)), left_index=True, right_index=True))
|
|
|
|
|
|
def format_fctBio(df):
|
|
dict_fct = {
|
|
'Etapes migratoires':'étapes migratoires, zones de stationnement, dortoirs',
|
|
'Zone alimentation faune':"zone particulière d'alimentation pour la faune",
|
|
'Zone reproduction faune':'zone particulière liée à la reproduction',
|
|
'Connexions biologiques':'connexions biologiques',
|
|
}
|
|
autre_fct = (df[['AUTRE INTERET']]
|
|
.droplevel(1,axis=1)
|
|
.dropna()
|
|
.rename(columns={'AUTRE INTERET':'description'}))
|
|
autre_fct['fonction'] = "autre interet fonctionnel d'ordre ecologique"
|
|
fct_bio = (df['HABITATS POPULATIONS ANIMALES OU VEGETALES']
|
|
.stack()
|
|
.rename_axis(['index','fonction'])
|
|
.str.strip()
|
|
.replace('x',None)
|
|
.to_frame('description')
|
|
.reset_index(-1)
|
|
.replace(dict_fct)
|
|
)
|
|
autre_fct['typ_fonc'] = 'fct_bio'
|
|
fct_bio['typ_fonc'] = 'fct_bio'
|
|
|
|
return (gpd.pd.concat([autre_fct,fct_bio])
|
|
.merge((df[['CODE_SITE']]
|
|
.droplevel(1,axis=1)), left_index=True, right_index=True)
|
|
.sort_index())
|
|
|
|
def format_fctHydro(df):
|
|
dict_fct = {
|
|
'Expansion des crues':'expansion naturelle des crues',
|
|
"Soutien d'étiage":"soutien naturel d'étiage",
|
|
'Ralentissement du ruissellement':'ralentissement du ruissellement',
|
|
'Epuration des eaux':"fonctions d'épuration",
|
|
"Protection contre l'érosion":"rôle naturel de protection contre l'érosion",
|
|
}
|
|
fct_hyd = (df[['REGULATION HYDRAULIQUE','PROTECTION MILIEU PHYSIQUE']]
|
|
.droplevel(0,axis=1)
|
|
.stack()
|
|
.rename_axis(['index','fonction'])
|
|
.str.strip()
|
|
.replace('x',None)
|
|
.to_frame('description')
|
|
.reset_index(-1)
|
|
.replace(dict_fct)
|
|
.merge((df[['CODE_SITE']]
|
|
.droplevel(1,axis=1)), left_index=True, right_index=True)
|
|
)
|
|
fct_hyd['typ_fonc'] = 'fct_hydro'
|
|
|
|
return fct_hyd
|
|
|
|
def format_valSocioEco(df):
|
|
dict_fct = {
|
|
'RESERVOIR AEP':"réservoir pour l'alimentation en eau potable",
|
|
'PRODUCTION BIOLOGIQUE':'production biologique',
|
|
'PROD. MATIERE PREMIERE':'production de matière première',
|
|
'VALORISATION PEDAGOGIQUE':'intérêt pour la valorisation pédagogique / éducation',
|
|
'INTERET PAYSAGER':'intérêt paysager',
|
|
'LOISIRS / VALEURS RECREATIVES':'intérêt pour les loisirs/valeurs récréatives',
|
|
'VALEURS SCIENTIFIQUES':'valeur scientifique',
|
|
'VALEURS CULTURELLES':'valeur culturelle',
|
|
'NUISANCES':'nuisances sur les conditions de vie des populations humaines résidentes',
|
|
}
|
|
|
|
valSocioEco = (df[['RESERVOIR AEP','PRODUCTION BIOLOGIQUE',
|
|
'PROD. MATIERE PREMIERE','VALORISATION PEDAGOGIQUE','INTERET PAYSAGER','LOISIRS / VALEURS RECREATIVES',
|
|
'VALEURS SCIENTIFIQUES','VALEURS CULTURELLES','NUISANCES']]
|
|
.droplevel(1,axis=1)
|
|
.stack()
|
|
.rename_axis(['index','fonction'])
|
|
.replace('x',None)
|
|
.to_frame('description')
|
|
.reset_index(-1)
|
|
.replace(dict_fct)
|
|
.merge((df[['CODE_SITE']]
|
|
.droplevel(1,axis=1)), left_index=True, right_index=True)
|
|
)
|
|
valSocioEco['typ_fonc'] = 'val_socioEco'
|
|
return valSocioEco
|
|
|
|
def format_patrim(df):
|
|
dict_fct = {
|
|
'Inver-tébrés':'invertébrés',
|
|
'Insectes':'insectes',
|
|
'Poissons':'poissons',
|
|
'Amphi-biens':'amphibiens',
|
|
'Reptiles':'reptiles',
|
|
'Oiseaux':'oiseaux',
|
|
'Mammi-fères':'mammifères',
|
|
'Flore vasculaire':'floristiques',
|
|
'Algues':'algues',
|
|
'Champi-gnons':'champignons',
|
|
'Lichens':'lichens',
|
|
'Bryo-phytes':'bryophytes',
|
|
'Ptérido-phytes':'ptéridophytes',
|
|
'Phané-rophytes':'phanérogames',
|
|
}
|
|
patrim = (data[['FAUNISTIQUES','FLORISTIQUES']]
|
|
.droplevel(0,axis=1)
|
|
.stack()
|
|
.rename_axis(['index','fonction'])
|
|
.replace('x',None)
|
|
.to_frame('description')
|
|
.reset_index(-1)
|
|
.replace(dict_fct)
|
|
.merge((df[['CODE_SITE']]
|
|
.droplevel(1,axis=1)), left_index=True, right_index=True)
|
|
)
|
|
patrim['typ_fonc'] = 'int_patri'
|
|
return patrim
|
|
|
|
def format_fct(df):
|
|
fct_bio = format_fctBio(df)
|
|
fct_hydro = format_fctHydro(df)
|
|
valSocioEco = format_valSocioEco(df)
|
|
patrim = format_patrim(df)
|
|
return (gpd.pd.concat([fct_bio,fct_hydro,valSocioEco,patrim])
|
|
.sort_values('CODE_SITE'))
|
|
|
|
def format_info(df):
|
|
dict_col = {
|
|
'NOM_SITE':'nom',
|
|
'Commentaires / remarques fiche de terrain':'rmq_usage_process',
|
|
}
|
|
return (df[['CODE_SITE','NOM_SITE',
|
|
'Commentaires / remarques fiche de terrain']]
|
|
.droplevel(1,axis=1)
|
|
.sort_values('CODE_SITE')
|
|
.rename(columns=dict_col))
|
|
|
|
def format_data(df):
|
|
info = format_info(df)
|
|
delim = format_delim(df)
|
|
usg = format_usg(df)
|
|
reghydro = format_reghydro(df)
|
|
subm = format_subm(df)
|
|
cnx = format_cnx(df)
|
|
fct = format_fct(df)
|
|
return info,delim,usg,reghydro,subm,cnx,fct
|
|
|
|
|
|
def insert_regHydro(df):
|
|
sch = pycen.zh().schema
|
|
tab_regHyd = 'r_site_reghydro'
|
|
reg_hydro = pycen.zh()._get_param(param_table='param_reg_hydro')
|
|
|
|
df.replace({'reg_hydro':dict(zip(reg_hydro.nom,reg_hydro.id))},inplace=True)
|
|
df.rename(columns={
|
|
'reg_hydro':'id_reg_hydro',
|
|
'site_cod':'id_site',
|
|
'site_code':'id_site',
|
|
},inplace=True)
|
|
dfinout,ins = insertAttrs(df,'zones_humides','r_site_reghydro')
|
|
if ins:
|
|
ids = select_ID(dfinout[dfinout.columns.drop('auteur')],sch,tab_regHyd)
|
|
same_col = dfinout.columns[dfinout.columns.isin(ids.columns)]
|
|
if 'date' in same_col:
|
|
dfinout['date'] = dfinout['date'].astype(str)
|
|
ids['date'] = ids['date'].astype(str)
|
|
for c in same_col:
|
|
if dfinout[c].dtype != ids[c].dtype:
|
|
dfinout[c] = dfinout[c].astype(ids[c].dtype)
|
|
dfinout = dfinout.merge(ids, on=[*same_col], how='left')
|
|
insertAutorAttrs(dfinout,colname_rsite='id_sitehydro', sch=sch, tab='r_rsitehydro_auteur')
|
|
else: return
|
|
|
|
def insert_subm(df):
|
|
sch = pycen.zh().schema
|
|
tab_sub = 'r_site_sub'
|
|
p_con_sub = pycen.zh()._get_param(param_table='param_sub', type_table='type_param_sub',type_court=False)
|
|
|
|
|
|
df['sub_freq'].fillna('Inconnu',inplace=True)
|
|
df['sub_etend'].fillna('Inconnu',inplace=True)
|
|
df['id_freqsub'] = df['sub_freq'].str.lower() \
|
|
.replace([*p_con_sub.nom.str.lower()],[*p_con_sub.id.astype(str)])
|
|
df['id_etendsub'] = df['sub_etend'].str.lower() \
|
|
.replace([*p_con_sub.nom.str.lower()],[*p_con_sub.id.astype(str)])
|
|
df.rename(columns={'site_cod':'id_site','site_code':'id_site'},inplace=True)
|
|
df.drop(columns=['sub_freq','sub_etend'],inplace=True)
|
|
|
|
df,ins = insertAttrs(df,sch, tab_sub)
|
|
if ins:
|
|
ids = select_ID(df[df.columns.drop('auteur')],sch,tab_sub)
|
|
ids.loc[~ids.id_etendsub.isna(),'id_etendsub'] = ids.loc[~ids.id_etendsub.isna(),'id_etendsub']\
|
|
.astype(int).astype(str)
|
|
if ids.id_freqsub.dtype==int:
|
|
ids.id_freqsub = ids.id_freqsub.astype(str)
|
|
same_col = df.columns[df.columns.isin(ids.columns)]
|
|
if 'date' in same_col:
|
|
df['date'] = df['date'].astype(str)
|
|
ids['date'] = ids['date'].astype(str)
|
|
for col in same_col:
|
|
if df[col].dtype != ids[col].dtype:
|
|
print(col)
|
|
if df[col].dtype == float:
|
|
ids[col] = ids[col].astype(float)
|
|
elif df[col].dtype == int:
|
|
ids[col] = ids[col].astype(int)
|
|
elif df[col].dtype == object:
|
|
ids[col] = ids[col].astype(object)
|
|
df = df.merge(ids, on=[*same_col], how='left')
|
|
insertAutorAttrs(df,colname_rsite='id_sitesub', sch=sch, tab='r_rsitesub_auteur')
|
|
else: return
|
|
|
|
def insert_cnx(df):
|
|
tab_con = 'r_site_type_connect'
|
|
sch = pycen.zh().schema
|
|
p_con_cnx = pycen.zh()._get_param(param_table='param_type_connect')
|
|
|
|
df.rename(columns={'site_cod':'id_site','site_code':'id_site'},inplace=True)
|
|
df.drop(columns=['connexion'],inplace=True,errors='ignore')
|
|
|
|
df,ins = insertAttrs(df,sch, tab_con)
|
|
if ins:
|
|
ids = select_ID(df[df.columns.drop('auteur')],sch,tab_con)
|
|
if ids.id_param_connect.dtype==int:
|
|
ids.id_param_connect = ids.id_param_connect.astype(str)
|
|
if df.id_param_connect.dtype==int:
|
|
df.id_param_connect = df.id_param_connect.astype(str)
|
|
same_col = df.columns[df.columns.isin(ids.columns)]
|
|
if 'date' in same_col:
|
|
df['date'] = df['date'].astype(str)
|
|
ids['date'] = ids['date'].astype(str)
|
|
df = df.merge(ids, on=[*same_col], how='left')
|
|
insertAutorAttrs(df,colname_rsite='id_siteconnect', sch=sch, tab='r_rsiteconnect_auteur')
|
|
else: return
|
|
|
|
def define_author(df,is_new=False):
|
|
is_mosaique = df['Source'].str.contains('Mosa.que')
|
|
is_acer = df['Source'].str.contains('Acer')
|
|
is_setis = df['Source'].str.contains('SETIS')
|
|
is_evin = df['Source'].str.contains('Evinerude')
|
|
is_cen = df['Source'] == 'Inventaire départemental'
|
|
if not is_new:
|
|
same_date = df.date == df.date_geom
|
|
df.loc[same_date&is_mosaique,'auteur'] = 'Mosaïque Environnement'
|
|
df.loc[same_date&is_acer,'auteur'] = 'Acer campestre'
|
|
df.loc[same_date&is_setis,'auteur'] = 'SETIS GROUPE Degaud'
|
|
df.loc[same_date&is_evin,'auteur'] = 'EVINERUDE'
|
|
df.loc[same_date&is_cen,'auteur'] = 'CEN Isère'
|
|
df.loc[df.auteur.isna(),'auteur'] = 'Mosaïque Environnement'
|
|
else:
|
|
df.loc[is_mosaique,'auteur'] = 'Mosaïque Environnement'
|
|
df.loc[is_acer,'auteur'] = 'Acer campestre'
|
|
df.loc[is_setis,'auteur'] = 'SETIS GROUPE Degaud'
|
|
df.loc[is_evin,'auteur'] = 'EVINERUDE'
|
|
df.loc[is_cen,'auteur'] = 'CEN Isère'
|
|
df.loc[df.auteur.isna(),'auteur'] = 'Mosaïque Environnement'
|
|
return df
|
|
|
|
if __name__ == "__main__":
|
|
|
|
PATH = '/media/colas/SRV/FICHIERS/OUTILS/CARTOGRAPHIE/ESPACE DE TRAVAIL/ETUDES/PLUI METRO/INV_ZH_PLUI_METRO/Mosaique/MAJ-INV-ZH MOSAIQUE 2018-SIG_BDD'
|
|
shp = 'ZH_GAM_CC45.shp'
|
|
data_file = 'BDD_ZH_GAM_database.xlsx'
|
|
dict_cols = {
|
|
'CODE_SITE':'id_origine',
|
|
'DATEMODIF':'date',
|
|
'TYPO_SDAGE':'id_typo_sdage',
|
|
}
|
|
|
|
# Lecture des données
|
|
data = gpd.pd.read_excel(path.join(PATH,data_file),sheet_name='Fiches_terrain',header=[1,2])
|
|
info,delim,usg,reghydro,subm,cnx,fct = format_data(data)
|
|
|
|
df = gpd.read_file(path.join(PATH,shp))
|
|
df.DATEMODIF = gpd.pd.to_datetime(df.DATEMODIF)
|
|
LST_IGNORE = [
|
|
'38GAM0012','38GAM0034','38GAM0045','38GAM0142',
|
|
'38GAM0003','38GAM0004','38GAM0006','38GAM0007',
|
|
'38GAM0008','38GAM0009','38GAM0011','38GAM0015',
|
|
'38GAM0016','38GAM0017','38GAM0019','38GAM0021',
|
|
'38GAM0026','38GAM0138','38GAM0032','38GAM0035',
|
|
'38GAM0040','38GAM0041','38GAM0042','38GAM0044',
|
|
'38GAM0046','38GAM0047','38GAM0049','38GAM0051',
|
|
'38GAM0052','38GAM0053','38GAM0054','38GAM0055',
|
|
'38GAM0056','38GAM0069','38GAM0070','38GAM0073',
|
|
'38GAM0076','38GAM0141','38GAM0001','38GAM0005',
|
|
'38GAM0018','38GAM0023','38GAM0029','38GAM0033',
|
|
'38GAM0039','38GAM0050','38GAM0060','38GAM0134'
|
|
]
|
|
LST_HISTO = [
|
|
'38GAM0024','38GAM0031','38GAM0058','38GAM0139',
|
|
'38GAM0010','38GAM0014','38GAM0072','38GAM0075',
|
|
'38GAM0133']
|
|
LST_NEW_MAJ = [
|
|
'38GAM0022','38GAM0061',
|
|
'38GAM0118','38GAM0127','38GAM0129']
|
|
GAM0115 = ['38GAM0115']
|
|
GAM0057 = ['38GAM0057']
|
|
GAM0091 = ['38GAM0091']
|
|
GAM0079 = ['38GAM0079']
|
|
GAM0108 = ['38GAM0108']
|
|
LST_COR_MAJ = [
|
|
'38GAM0036','38GAM0068',
|
|
'38GAM0063','38GAM0123',
|
|
'38GAM0071']
|
|
NOT_MAJ = [
|
|
'38GAM0131','38GAM0130','38GAM0126','38GAM0111','38GAM0110','38GAM0089','38GAM0080','38GAM0059','38GAM0048','38GAM0043','38GAM0038','38GAM0037','38GAM0028','38GAM0025','38GAM0020','38GAM0013','38GAM0002',
|
|
'38GAM0087','38GAM0132','38GAM0135','38GAM0136','38GAM0098','38GAM0088','38GAM0090','38GAM0092','38GAM0093','38GAM0094','38GAM0095','38GAM0096','38GAM0097','38GAM0099','38GAM0100','38GAM0101','38GAM0102','38GAM0067','38GAM0103','38GAM0104','38GAM0062','38GAM0064','38GAM0065','38GAM0066','38GAM0105','38GAM0074','38GAM0077','38GAM0084','38GAM0109','38GAM0078','38GAM0081','38GAM0082','38GAM0083','38GAM0085','38GAM0086','38GAM0112','38GAM0113','38GAM0114','38GAM0116','38GAM0117','38GAM0119','38GAM0120','38GAM0121','38GAM0122','38GAM0124','38GAM0125','38GAM0137','38GAM0140']
|
|
df = df[~df.CODE_SITE.isin([*LST_IGNORE,*NOT_MAJ,*GAM0115,*GAM0057,*GAM0091,*GAM0079,*GAM0108,*LST_HISTO,*LST_COR_MAJ,*LST_NEW_MAJ])]
|
|
|
|
if isinstance(df, gpd.GeoDataFrame) and df.geometry.name !='geom':
|
|
df.rename_geometry('geom',inplace=True)
|
|
if isinstance(df, gpd.GeoDataFrame) and df.crs.srs.lower() != 'epsg:2154':
|
|
df.to_crs(2154,inplace=True)
|
|
|
|
# Distinction des types de sites
|
|
v_zh = zh.v_zoneshumides()
|
|
df_new, df_cor, df_maj = ident_newsite(df,v_zh,rcvmt=10)
|
|
df_new.rename(columns=dict_cols,inplace=True)
|
|
df_maj.rename(columns=dict_cols,inplace=True)
|
|
df_cor['df'].rename(columns=dict_cols,inplace=True)
|
|
df_cor['cor'].rename(columns=dict_cols,inplace=True)
|
|
if not df_new.empty:
|
|
df_new = define_author(df_new,True)
|
|
|
|
locdata_new = data.droplevel(1,axis=1).CODE_SITE.isin([*df_new.id_origine])
|
|
locdata_maj = data.droplevel(1,axis=1).CODE_SITE.isin([*df_maj.id_origine])
|
|
locdata_cor = data.droplevel(1,axis=1).CODE_SITE.isin([*df_cor['df'].id_origine])
|
|
data_new = data[locdata_new]
|
|
data_maj = data[locdata_maj]
|
|
data_cor = data[locdata_cor]
|
|
|
|
|
|
# df_maj : distinction date récentes vs anciennes
|
|
t1 = df_maj.columns.str.contains('date',case=False)
|
|
t3 = v_zh.columns.str.contains('date',case=False)
|
|
c1 = df_maj.columns[t1]
|
|
c3 = v_zh.columns[t3]
|
|
maj_tmpv = (gpd.pd.merge(df_maj,v_zh[['site_code','date_geom']],how='left',left_on='id_site',right_on='site_code')
|
|
.drop(columns='id_site'))
|
|
maj_tmpv = define_author(maj_tmpv,False)
|
|
test_dt_new = maj_tmpv.date >= maj_tmpv.date_geom
|
|
maj_dt_new = maj_tmpv[test_dt_new].drop(columns=['CENTRE_X','CENTRE_Y'])
|
|
maj_dt_old = maj_tmpv[~test_dt_new].drop(columns=['CENTRE_X','CENTRE_Y'])
|
|
|
|
# df_cor : distinction date récentes vs anciennes
|
|
t2 = df_cor['df'].columns.str.contains('date',case=False)
|
|
c2 = df_cor['df'].columns[t2]
|
|
cor_tmpv = (
|
|
gpd.pd.merge(
|
|
gpd.pd.merge(
|
|
df_cor['cor'],
|
|
df_cor['df'],#[['id_origine','date','Source']],
|
|
on='id_origine',how='left'),
|
|
v_zh[['site_code','date_geom']],how='left',left_on='id_site_old',right_on='site_code')
|
|
.drop(columns='id_site_old'))
|
|
cor_tmpv = define_author(cor_tmpv,False)
|
|
# cor_tmpv = cor_tmpv[~cor_tmpv.site_code.isin(NOT_MAJ)]
|
|
test_dt_new2 = cor_tmpv.date >= cor_tmpv.date_geom
|
|
cor_dt_new = cor_tmpv[test_dt_new2].sort_values(by='id_origine').drop(columns=['CENTRE_X','CENTRE_Y'])
|
|
cor_dt_old = cor_tmpv[~test_dt_new2].sort_values(by='id_origine').drop(columns=['CENTRE_X','CENTRE_Y'])
|
|
|
|
|
|
### Suite : utilisation du fichier `insert_zh.py`
|
|
import pycen
|
|
from ..insert_zh import (
|
|
insertNewSite,
|
|
insertNewGeom,
|
|
insertAttrsDelim,
|
|
insertAttrsUsgPrss,
|
|
insertAttrs,
|
|
insertAttrsFct,
|
|
select_ID,insertAutorAttrs)
|
|
|
|
|
|
############################
|
|
######### df_new ###########
|
|
############################
|
|
|
|
dfNew = (df_new[['id_origine','NOM_SITE','id_typo_sdage','date','auteur','geom']]
|
|
.copy()
|
|
.rename(columns={
|
|
'NOM_SITE':'site_name','id_origine':'site_cod'})
|
|
)
|
|
dfNew['dept'] = dfNew.site_cod.str[:2]
|
|
dfNew['org'] = dfNew.site_cod.str[2:5]
|
|
dfNew['num'] = dfNew.site_cod.str[5:]
|
|
dfNew['type_milieux'] = 'Milieux humides'
|
|
insertNewSite((dfNew
|
|
.drop(columns='geom')
|
|
.rename(columns={
|
|
'date':'date_deb'
|
|
})))
|
|
|
|
dfNewGeom = (dfNew[['site_cod','date','auteur','geom']]
|
|
.copy()
|
|
.merge(info,left_on='site_cod',right_on='CODE_SITE',how='left')
|
|
.drop(columns=['CODE_SITE','nom']))
|
|
|
|
insertNewGeom(pycen.tools.Polygons_to_MultiPolygon
|
|
(dfNewGeom))
|
|
|
|
dfNewDelim = (dfNew[['site_cod','date','auteur']]
|
|
.merge(delim,left_on='site_cod',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE']))
|
|
insertAttrsDelim(dfNewDelim)
|
|
|
|
dfNewUPS = (dfNew[['site_cod','date','auteur']]
|
|
.merge(usg,left_on='site_cod',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE']))
|
|
dfNewUPS['impact'] = 'Inconnu'
|
|
dfNewUPS['localisation'] = 'Inconnu'
|
|
hactivhum = gpd.pd.read_sql('SELECT * FROM zones_humides.param_activ_hum',con=pycen.con)
|
|
insertAttrsUsgPrss((dfNewUPS
|
|
.rename(columns={
|
|
'crit_usg':'activite_hum',
|
|
'remarques':'remarks'})
|
|
.astype({'activite_hum':int})
|
|
.replace({'activite_hum':dict(zip(hactivhum.id,hactivhum.nom))})
|
|
))
|
|
|
|
dfNewRHyd = (dfNew[['site_cod','date','auteur']]
|
|
.merge(reghydro,left_on='site_cod',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE']))
|
|
insert_regHydro(dfNewRHyd)
|
|
|
|
dfNewSubm = (dfNew[['site_cod','date','auteur']]
|
|
.merge(subm,left_on='site_cod',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE']))
|
|
insert_subm(dfNewSubm)
|
|
|
|
dfNewCnx = (dfNew[['site_cod','date','auteur']]
|
|
.merge(cnx,left_on='site_cod',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE']))
|
|
insert_cnx(dfNewCnx)
|
|
|
|
dfNewFct = (dfNew[['site_cod', 'date', 'auteur']]
|
|
.merge(fct,left_on='site_cod',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE'])
|
|
.rename(columns={ 'description':'memo'}))
|
|
insertAttrsFct(dfNewFct,True)
|
|
|
|
############################
|
|
############################
|
|
######### df_maj ###########
|
|
############################
|
|
############################
|
|
|
|
############################
|
|
######### NEW maj ##########
|
|
### 38GAM0030
|
|
GAM030 = maj_dt_old[maj_dt_old.id_origine=='38GAM0030'].copy()
|
|
GAM030.date = '2017-01-01'
|
|
GAM030geom = (GAM030[['site_code','id_origine','date','auteur','geom']]
|
|
.copy()
|
|
.merge(info,left_on='id_origine',right_on='CODE_SITE',how='left')
|
|
.set_geometry('geom',crs=2154)
|
|
.drop(columns=['CODE_SITE','nom']))
|
|
insertNewGeom(GAM030geom)
|
|
### 38GAM0106
|
|
GAM106 = maj_dt_old[maj_dt_old.id_origine=='38GAM0106'].copy()
|
|
GAM106.date = '2017-01-01'
|
|
GAM106geom = (GAM106[['site_code','id_origine','date','auteur','geom']]
|
|
.copy()
|
|
.merge(info,left_on='id_origine',right_on='CODE_SITE',how='left')
|
|
.set_geometry('geom',crs=2154)
|
|
.drop(columns=['CODE_SITE','nom']))
|
|
insertNewGeom(GAM106geom)
|
|
### 38GAM0108
|
|
GAM108 = cor_dt_new[cor_dt_new.id_origine=='38GAM0108'].copy()
|
|
GAM108geom = (GAM108[['site_code','id_origine','date','auteur','geom']]
|
|
.copy()
|
|
.merge(info,left_on='id_origine',right_on='CODE_SITE',how='left')
|
|
.set_geometry('geom',crs=2154)
|
|
.drop(columns=['CODE_SITE','nom']))
|
|
insertNewGeom(GAM108geom)
|
|
|
|
# Test
|
|
# maj_dt_new.merge(info,left_on='id_origine',right_on='CODE_SITE',how='inner').drop(columns=['CODE_SITE'])
|
|
|
|
dfNewMajGeom = (maj_dt_new[['site_code','id_origine','date','auteur','geom']]
|
|
.copy()
|
|
.merge(info,left_on='id_origine',right_on='CODE_SITE',how='left')
|
|
.drop(columns=['CODE_SITE','nom']))
|
|
|
|
dfNewMajDelim = (maj_dt_new[['site_code','date','auteur','id_origine']]
|
|
.merge(delim,left_on='id_origine',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE','id_origine']))
|
|
|
|
hactivhum = gpd.pd.read_sql('SELECT * FROM zones_humides.param_activ_hum',con=pycen.con)
|
|
dfNewMajUPS = (maj_dt_new[['site_code','date','auteur','id_origine']]
|
|
.merge(usg,left_on='id_origine',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE','id_origine'])
|
|
.rename(columns={
|
|
'crit_usg':'activite_hum',
|
|
'remarques':'remarks'})
|
|
.astype({'activite_hum':int})
|
|
.replace({'activite_hum':dict(zip(hactivhum.id,hactivhum.nom))})
|
|
)
|
|
dfNewMajUPS['impact'] = 'Inconnu'
|
|
dfNewMajUPS['localisation'] = 'Inconnu'
|
|
|
|
dfNewMajRHyd = (maj_dt_new[['site_code','date','auteur','id_origine']]
|
|
.merge(reghydro,left_on='id_origine',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE','id_origine']))
|
|
|
|
dfNewMajSubm = (maj_dt_new[['site_code','date','auteur','id_origine']]
|
|
.merge(subm,left_on='id_origine',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE','id_origine']))
|
|
|
|
dfNewMajCnx = (maj_dt_new[['site_code','date','auteur','id_origine']]
|
|
.merge(cnx,left_on='id_origine',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE','id_origine']))
|
|
|
|
dfNewMajFct = (maj_dt_new[['site_code', 'date', 'auteur','id_origine']]
|
|
.merge(fct,left_on='id_origine',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE','id_origine'])
|
|
.rename(columns={ 'description':'memo'}))
|
|
|
|
insertNewGeom(pycen.tools.Polygons_to_MultiPolygon(dfNewMajGeom))
|
|
insertAttrsDelim(dfNewMajDelim)
|
|
insertAttrsUsgPrss(dfNewMajUPS)
|
|
insert_regHydro(dfNewMajRHyd)
|
|
insert_subm(dfNewMajSubm)
|
|
insert_cnx(dfNewMajCnx)
|
|
insertAttrsFct(dfNewMajFct,True)
|
|
|
|
|
|
#############################
|
|
######## Histo maj ##########
|
|
LST_HISTO = [
|
|
'38GAM0024','38GAM0031','38GAM0058','38GAM0139',
|
|
'38GAM0010','38GAM0014','38GAM0072','38GAM0075',
|
|
'38GAM0133',
|
|
]
|
|
histo_maj = gpd.pd.concat([
|
|
maj_dt_old[maj_dt_old.id_origine.isin(LST_HISTO)],
|
|
cor_dt_old[cor_dt_old.id_origine.isin(LST_HISTO)]
|
|
])
|
|
histo_maj = define_author(histo_maj,True)
|
|
insertNewGeom((histo_maj[['site_code','date','auteur','id_origine','geom']]
|
|
.rename(columns={'site_code':'id_site'})
|
|
))
|
|
|
|
histo_maj2 = gpd.read_file(path.join(PATH,'../../zh_gam_cc45_modifByMJ&GC.gpkg'))
|
|
histo_maj2 = define_author(histo_maj2,True)
|
|
|
|
### 38GAM0057
|
|
histo_maj2 = (histo_maj2.loc[
|
|
histo_maj2.CODE_SITE == '38GAM0057',
|
|
['CODE_SITE','DATEMODIF','auteur','geometry']]
|
|
.rename(columns={
|
|
'CODE_SITE':'id_origine',
|
|
'DATEMODIF':'date'})
|
|
.rename_geometry('geom'))
|
|
histo_maj2['id_site'] = '38RD0010'
|
|
histo_maj2['date'] = gpd.pd.to_datetime(histo_maj2.date)
|
|
insertNewGeom(histo_maj2)
|
|
|
|
|
|
#############################
|
|
######## New maj ############
|
|
### 38GAM0107
|
|
replace_geom = maj_dt_old[maj_dt_old.id_origine=='38GAM0107'].copy()
|
|
GAM0107_geom = replace_geom[['site_code','id_origine','date','geom','auteur']]
|
|
GAM0107_geom.date = '2017-01-01'
|
|
insertNewGeom((GAM0107_geom
|
|
.merge(info,left_on='id_origine',right_on='CODE_SITE',how='left')
|
|
.drop(columns=['CODE_SITE','nom']))
|
|
)
|
|
|
|
rpl_delim = (GAM0107_geom
|
|
.merge(delim,left_on='id_origine',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE','id_origine','geom']))
|
|
hactivhum = gpd.pd.read_sql('SELECT * FROM zones_humides.param_activ_hum',con=pycen.con)
|
|
rpl_activHum = (GAM0107_geom
|
|
.merge(usg,left_on='id_origine',right_on='CODE_SITE',how='inner')
|
|
.rename(columns={
|
|
'crit_usg':'activite_hum',
|
|
'remarques':'remarks'})
|
|
.astype({'activite_hum':int})
|
|
.replace({'activite_hum':dict(zip(hactivhum.id,hactivhum.nom))})
|
|
.drop(columns=['CODE_SITE','id_origine','geom']))
|
|
rpl_activHum['impact'] = 'Inconnu'
|
|
rpl_activHum['localisation'] = 'Inconnu'
|
|
rpl_RHyd = (GAM0107_geom
|
|
.merge(reghydro,left_on='id_origine',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE','id_origine','geom']))
|
|
rpl_subm = (GAM0107_geom
|
|
.merge(subm,left_on='id_origine',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE','id_origine','geom']))
|
|
rpl_cnx = (GAM0107_geom
|
|
.merge(cnx,left_on='id_origine',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE','id_origine','geom']))
|
|
rpl_fct = (GAM0107_geom
|
|
.merge(fct,left_on='id_origine',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE','id_origine','geom'])
|
|
.rename(columns={ 'description':'memo'}))
|
|
insertAttrsDelim(rpl_delim)
|
|
insertAttrsUsgPrss(rpl_activHum)
|
|
insert_regHydro(rpl_RHyd)
|
|
insert_subm(rpl_subm)
|
|
insert_cnx(rpl_cnx)
|
|
insertAttrsFct(rpl_fct,True)
|
|
## ADD GEOM 2023
|
|
PATH = '/media/colas/SRV/FICHIERS/OUTILS/CARTOGRAPHIE/ESPACE DE TRAVAIL/ETUDES/PLUI METRO/INV_ZH_PLUI_METRO/'
|
|
FILE_2023 = 'zh_gam_cc45_modifByMJ&GC.gpkg'
|
|
df2023 = gpd.read_file(path.join(PATH,FILE_2023))
|
|
df2023.rename_geometry('geom',inplace=True)
|
|
geom2023 = (df2023
|
|
.loc[df2023.CODE_SITE=='38GAM0107',['CODE_SITE','DATEMODIF','geom']]
|
|
.rename(columns={
|
|
'CODE_SITE':'id_site', 'DATEMODIF':'date',
|
|
}))
|
|
geom2023.date = '2023-11-28'
|
|
geom2023.id_site = '38VE0213'
|
|
geom2023['auteur'] = 'JUTON Mathieu'
|
|
insertNewGeom(geom2023)
|
|
|
|
|
|
### 38GAM0115
|
|
replace_geom = maj_dt_old[maj_dt_old.id_origine=='38GAM0115'].copy()
|
|
replace_geom = replace_geom[['site_code','id_origine','date','geom','auteur']]
|
|
pycen.update_to_sql(
|
|
(replace_geom
|
|
.merge(info,left_on='id_origine',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE','nom'])
|
|
.rename(columns={'site_code':'id_site'})),
|
|
pycen.con,'r_sites_geom','sites','id_site',
|
|
)
|
|
rpl_delim = (replace_geom
|
|
.merge(delim,left_on='id_origine',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE','id_origine','geom']))
|
|
hactivhum = gpd.pd.read_sql('SELECT * FROM zones_humides.param_activ_hum',con=pycen.con)
|
|
rpl_activHum = (replace_geom
|
|
.merge(usg,left_on='id_origine',right_on='CODE_SITE',how='inner')
|
|
.rename(columns={
|
|
'crit_usg':'activite_hum',
|
|
'remarques':'remarks'})
|
|
.astype({'activite_hum':int})
|
|
.replace({'activite_hum':dict(zip(hactivhum.id,hactivhum.nom))})
|
|
.drop(columns=['CODE_SITE','id_origine','geom']))
|
|
rpl_activHum['impact'] = 'Inconnu'
|
|
rpl_activHum['localisation'] = 'Inconnu'
|
|
rpl_RHyd = (replace_geom
|
|
.merge(reghydro,left_on='id_origine',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE','id_origine','geom']))
|
|
rpl_subm = (replace_geom
|
|
.merge(subm,left_on='id_origine',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE','id_origine','geom']))
|
|
rpl_cnx = (replace_geom
|
|
.merge(cnx,left_on='id_origine',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE','id_origine','geom']))
|
|
rpl_fct = (replace_geom
|
|
.merge(fct,left_on='id_origine',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE','id_origine','geom'])
|
|
.rename(columns={ 'description':'memo'}))
|
|
insertAttrsDelim(rpl_delim)
|
|
insertAttrsUsgPrss(rpl_activHum)
|
|
insert_regHydro(rpl_RHyd)
|
|
insert_subm(rpl_subm)
|
|
insert_cnx(rpl_cnx)
|
|
insertAttrsFct(rpl_fct,True)
|
|
|
|
### 38GAM0128
|
|
replace_geom = cor_dt_new[cor_dt_new.id_origine=='38GAM0128'].copy()
|
|
GAM128_geom = (replace_geom[['site_code','id_origine','date','geom','auteur']]
|
|
.set_geometry('geom',crs=2154))
|
|
|
|
insertNewGeom((GAM128_geom
|
|
.merge(info,left_on='id_origine',right_on='CODE_SITE',how='left')
|
|
.drop(columns=['CODE_SITE','nom']))
|
|
)
|
|
|
|
rpl_delim = (GAM128_geom
|
|
.merge(delim,left_on='id_origine',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE','id_origine','geom']))
|
|
hactivhum = gpd.pd.read_sql('SELECT * FROM zones_humides.param_activ_hum',con=pycen.con)
|
|
rpl_activHum = (GAM128_geom
|
|
.merge(usg,left_on='id_origine',right_on='CODE_SITE',how='inner')
|
|
.rename(columns={
|
|
'crit_usg':'activite_hum',
|
|
'remarques':'remarks'})
|
|
.astype({'activite_hum':int})
|
|
.replace({'activite_hum':dict(zip(hactivhum.id,hactivhum.nom))})
|
|
.drop(columns=['CODE_SITE','id_origine','geom']))
|
|
rpl_activHum['impact'] = 'Inconnu'
|
|
rpl_activHum['localisation'] = 'Inconnu'
|
|
rpl_RHyd = (GAM128_geom
|
|
.merge(reghydro,left_on='id_origine',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE','id_origine','geom']))
|
|
rpl_subm = (GAM128_geom
|
|
.merge(subm,left_on='id_origine',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE','id_origine','geom']))
|
|
rpl_cnx = (GAM128_geom
|
|
.merge(cnx,left_on='id_origine',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE','id_origine','geom']))
|
|
rpl_fct = (GAM128_geom
|
|
.merge(fct,left_on='id_origine',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE','id_origine','geom'])
|
|
.rename(columns={ 'description':'memo'}))
|
|
insertAttrsDelim(rpl_delim)
|
|
insertAttrsUsgPrss(rpl_activHum)
|
|
insert_regHydro(rpl_RHyd)
|
|
insert_subm(rpl_subm)
|
|
insert_cnx(rpl_cnx)
|
|
insertAttrsFct(rpl_fct,True)
|
|
|
|
## ADD GEOM 2023
|
|
PATH = '/media/colas/SRV/FICHIERS/OUTILS/CARTOGRAPHIE/ESPACE DE TRAVAIL/ETUDES/PLUI METRO/INV_ZH_PLUI_METRO/'
|
|
FILE_2023 = 'zh_gam_cc45_modifByMJ&GC.gpkg'
|
|
df2023 = gpd.read_file(path.join(PATH,FILE_2023))
|
|
df2023.rename_geometry('geom',inplace=True)
|
|
geom2023 = (df2023
|
|
.loc[df2023.CODE_SITE=='38GAM0128',['CODE_SITE','DATEMODIF','geom']]
|
|
.rename(columns={
|
|
'CODE_SITE':'id_site', 'DATEMODIF':'date',
|
|
}))
|
|
geom2023.date = '2023-11-28'
|
|
geom2023.id_site = '38GL0011'
|
|
geom2023['auteur'] = 'JUTON Mathieu'
|
|
insertNewGeom(geom2023)
|
|
|
|
### 38GAM0091
|
|
replace_geom = cor_dt_old[cor_dt_old.id_origine=='38GAM0091'].copy()
|
|
replace_geom = replace_geom[['site_code','id_origine','date','geom','auteur']]
|
|
pycen.update_to_sql(
|
|
(replace_geom.drop(columns=['auteur'])
|
|
.merge(info,left_on='id_origine',right_on='CODE_SITE',how='inner')
|
|
.set_geometry('geom',crs=2154)
|
|
.drop(columns=['CODE_SITE','nom'])
|
|
.rename(columns={'site_code':'id_site'})),
|
|
pycen.con,'r_sites_geom','sites','id_site',
|
|
)
|
|
|
|
LST_NEW_MAJ = [
|
|
'38GAM0022','38GAM0061',
|
|
'38GAM0118','38GAM0127','38GAM0129']
|
|
|
|
NEW_maj = (gpd.pd.concat([
|
|
maj_dt_old[maj_dt_old.id_origine.isin(LST_NEW_MAJ)],
|
|
cor_dt_new[cor_dt_new.id_origine.isin(LST_NEW_MAJ)],
|
|
cor_dt_old[cor_dt_old.id_origine.isin(LST_NEW_MAJ)]
|
|
]).drop(columns=['CENTRE_X','CENTRE_Y']))
|
|
NEW_maj.loc[NEW_maj.date<'2017-01-01','date'] = '2017-01-01'
|
|
|
|
rpl_geom = (NEW_maj
|
|
.merge(info,left_on='id_origine',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE','nom'])
|
|
.rename(columns={'site_code':'id_site'}))
|
|
rpl_delim = (NEW_maj
|
|
.merge(delim,left_on='id_origine',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE','id_origine','geom']))
|
|
hactivhum = gpd.pd.read_sql('SELECT * FROM zones_humides.param_activ_hum',con=pycen.con)
|
|
rpl_activHum = (NEW_maj
|
|
.merge(usg,left_on='id_origine',right_on='CODE_SITE',how='inner')
|
|
.rename(columns={
|
|
'crit_usg':'activite_hum',
|
|
'remarques':'remarks'})
|
|
.astype({'activite_hum':int})
|
|
.replace({'activite_hum':dict(zip(hactivhum.id,hactivhum.nom))})
|
|
.drop(columns=['CODE_SITE','id_origine','geom']))
|
|
rpl_activHum['impact'] = 'Inconnu'
|
|
rpl_activHum['localisation'] = 'Inconnu'
|
|
rpl_RHyd = (NEW_maj
|
|
.merge(reghydro,left_on='id_origine',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE','id_origine','geom']))
|
|
rpl_subm = (NEW_maj
|
|
.merge(subm,left_on='id_origine',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE','id_origine','geom']))
|
|
rpl_cnx = (NEW_maj
|
|
.merge(cnx,left_on='id_origine',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE','id_origine','geom']))
|
|
rpl_fct = (NEW_maj
|
|
.merge(fct,left_on='id_origine',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE','id_origine','geom'])
|
|
.rename(columns={ 'description':'memo'}))
|
|
insertNewGeom(rpl_geom)
|
|
insertAttrsDelim(rpl_delim)
|
|
insertAttrsUsgPrss(rpl_activHum)
|
|
insert_regHydro(rpl_RHyd)
|
|
insert_subm(rpl_subm)
|
|
insert_cnx(rpl_cnx)
|
|
insertAttrsFct(rpl_fct,True)
|
|
|
|
|
|
######### OLD df_maj ########
|
|
maj_dt_old.merge(info,left_on='id_origine',right_on='CODE_SITE',how='inner').drop(columns=['CODE_SITE'])
|
|
maj_dt_old[maj_dt_old.date != maj_dt_old.date_geom]
|
|
# maj_dt_old = maj_dt_old[~maj_dt_old.id_origine.isin(NOT_MAJ)]
|
|
|
|
############################
|
|
####### df_cor_OLD #########
|
|
############################
|
|
### 38GAM0079
|
|
LST_COR_OLD = ['38GAM0079']
|
|
COR_majOLD = (gpd.pd.concat([
|
|
maj_dt_new[maj_dt_new.id_origine.isin(LST_COR_OLD)],
|
|
maj_dt_old[maj_dt_old.id_origine.isin(LST_COR_OLD)],
|
|
cor_dt_new[cor_dt_new.id_origine.isin(LST_COR_OLD)],
|
|
cor_dt_old[cor_dt_old.id_origine.isin(LST_COR_OLD)]
|
|
]).drop(columns=['CENTRE_X','CENTRE_Y']))
|
|
COR_majOLD.auteur = 'CEN Isère'
|
|
COR_majOLD_new = (COR_majOLD[['id_origine','NOM_SITE','id_typo_sdage','date','auteur','geom']].copy()
|
|
.rename(columns={
|
|
'NOM_SITE':'site_name','id_origine':'site_cod'})
|
|
.drop_duplicates())
|
|
|
|
COR_majOLD_new.auteur = 'CEN Isère'
|
|
COR_majOLD_new['date_fin'] = '2019-02-28'
|
|
COR_majOLD_new['dept'] = COR_majOLD_new.site_cod.str[:2]
|
|
COR_majOLD_new['org'] = COR_majOLD_new.site_cod.str[2:5]
|
|
COR_majOLD_new['num'] = COR_majOLD_new.site_cod.str[5:]
|
|
COR_majOLD_new['type_milieux'] = 'Milieux humides'
|
|
COR_majOLD_cor = (COR_majOLD[['id_origine','site_code']]
|
|
.rename(columns={
|
|
'site_code':'id_site_new','id_origine':'id_site_old'})
|
|
.drop_duplicates())
|
|
COR_majOLD_newgeom = (COR_majOLD_new[['site_cod','date','auteur','geom']]
|
|
.merge(info,left_on='site_cod',right_on='CODE_SITE',how='left')
|
|
.drop(columns=['CODE_SITE','nom']))
|
|
|
|
# COR_majOLD_new.auteur = 'Mosaïque Environnement'
|
|
# COR_majOLD_new.date = '2017-01-01'
|
|
COR_majOLD_newDelim = (COR_majOLD_new[['site_cod','date','auteur']]
|
|
.merge(delim,left_on='site_cod',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE']))
|
|
hactivhum = gpd.pd.read_sql('SELECT * FROM zones_humides.param_activ_hum',con=pycen.con)
|
|
COR_majOLD_newUPS = (COR_majOLD_new[['site_cod','date','auteur']]
|
|
.merge(usg,left_on='site_cod',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE'])
|
|
.rename(columns={
|
|
'crit_usg':'activite_hum',
|
|
'remarques':'remarks'})
|
|
.astype({'activite_hum':int})
|
|
.replace({'activite_hum':dict(zip(hactivhum.id,hactivhum.nom))})
|
|
)
|
|
COR_majOLD_newUPS['impact'] = 'Inconnu'
|
|
COR_majOLD_newUPS['localisation'] = 'Inconnu'
|
|
COR_majOLD_newRHyd = (COR_majOLD_new[['site_cod','date','auteur']]
|
|
.merge(reghydro,left_on='site_cod',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE']))
|
|
COR_majOLD_newSubm = (COR_majOLD_new[['site_cod','date','auteur']]
|
|
.merge(subm,left_on='site_cod',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE']))
|
|
COR_majOLD_newCnx = (COR_majOLD_new[['site_cod','date','auteur']]
|
|
.merge(cnx,left_on='site_cod',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE']))
|
|
COR_majOLD_newFct = (COR_majOLD_new[['site_cod', 'date', 'auteur']]
|
|
.merge(fct,left_on='site_cod',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE'])
|
|
.rename(columns={ 'description':'memo'}))
|
|
|
|
insertNewSite((COR_majOLD_new
|
|
.drop(columns='geom')
|
|
.rename(columns={
|
|
'date':'date_deb'
|
|
})))
|
|
COR_majOLD_cor.to_sql(
|
|
'r_site_maj',pycen.con,'sites',if_exists='append',index=False
|
|
)
|
|
insertNewGeom(pycen.tools.Polygons_to_MultiPolygon(COR_majOLD_newgeom))
|
|
insertAttrsDelim(COR_majOLD_newDelim)
|
|
insertAttrsUsgPrss(COR_majOLD_newUPS)
|
|
insert_regHydro(COR_majOLD_newRHyd)
|
|
insert_subm(COR_majOLD_newSubm)
|
|
insert_cnx(COR_majOLD_newCnx)
|
|
insertAttrsFct(COR_majOLD_newFct,True)
|
|
|
|
|
|
############################
|
|
####### df_cor_NEW #########
|
|
############################
|
|
RD21 = gpd.read_file(path.join(PATH,'../../../../../../VECTEURS/ETUDES/ZONES HUMIDES/INVENTAIRE_ZH/ZH_2010 (re-save).shp'))
|
|
RD21.to_crs(2154,inplace=True)
|
|
RD21.rename_geometry('geom',inplace=True)
|
|
RD21.DATEMODIF = gpd.pd.to_datetime(RD21.DATEMODIF)
|
|
rebase_geom = (RD21
|
|
.loc[RD21.SITE_CODE.isin(['38RD0021','38RD0126','38RD0025']),['SITE_CODE','geom','DATEMODIF']]
|
|
.rename(columns={
|
|
'SITE_CODE':'id_site',
|
|
'DATEMODIF':'date',
|
|
}))
|
|
rebase_geom.loc[rebase_geom.id_site=='38RD0025','id_site'] = '38RD0127'
|
|
pycen.update_to_sql(rebase_geom,pycen.con,'r_sites_geom','sites','id_site')
|
|
|
|
|
|
LST_COR_MAJ = [
|
|
'38GAM0036','38GAM0068',
|
|
'38GAM0063','38GAM0123',
|
|
'38GAM0071']
|
|
COR_maj = (gpd.pd.concat([
|
|
maj_dt_new[maj_dt_new.id_origine.isin(LST_COR_MAJ)],
|
|
maj_dt_old[maj_dt_old.id_origine.isin(LST_COR_MAJ)],
|
|
cor_dt_new[cor_dt_new.id_origine.isin(LST_COR_MAJ)],
|
|
cor_dt_old[cor_dt_old.id_origine.isin(LST_COR_MAJ)]
|
|
]).drop(columns=['CENTRE_X','CENTRE_Y']))
|
|
COR_maj.loc[COR_maj.date<'2017-01-01','date'] = '2017-01-01'
|
|
|
|
COR_maj_site = (COR_maj[['id_origine','NOM_SITE','id_typo_sdage','date','auteur','geom']]
|
|
.copy()
|
|
.drop_duplicates()
|
|
.rename(columns={
|
|
'NOM_SITE':'site_name','id_origine':'site_cod'})
|
|
)
|
|
|
|
COR_maj_site['dept'] = COR_maj_site.site_cod.str[:2]
|
|
COR_maj_site['org'] = COR_maj_site.site_cod.str[2:5]
|
|
COR_maj_site['num'] = COR_maj_site.site_cod.str[5:]
|
|
COR_maj_site['type_milieux'] = 'Milieux humides'
|
|
COR_maj_cor = (COR_maj[['id_origine','site_code']]
|
|
.rename(columns={
|
|
'site_code':'id_site_old','id_origine':'id_site_new'})
|
|
.drop_duplicates())
|
|
|
|
COR_maj_siteOLD = (COR_maj[['site_code','date']]
|
|
.copy()
|
|
.rename(columns={'date':'date_fin','site_code':'id'})
|
|
.sort_values(['id','date_fin'])
|
|
.drop_duplicates('id',keep='last'))
|
|
|
|
COR_maj_newgeom = (COR_maj_site[['site_cod','date','auteur','geom']]
|
|
.merge(info,left_on='site_cod',right_on='CODE_SITE',how='left')
|
|
.drop(columns=['CODE_SITE','nom']))
|
|
|
|
COR_maj_newDelim = (COR_maj_site[['site_cod','date','auteur']]
|
|
.merge(delim,left_on='site_cod',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE']))
|
|
hactivhum = gpd.pd.read_sql('SELECT * FROM zones_humides.param_activ_hum',con=pycen.con)
|
|
COR_maj_newUPS = (COR_maj_site[['site_cod','date','auteur']]
|
|
.merge(usg,left_on='site_cod',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE'])
|
|
.rename(columns={
|
|
'crit_usg':'activite_hum',
|
|
'remarques':'remarks'})
|
|
.astype({'activite_hum':int})
|
|
.replace({'activite_hum':dict(zip(hactivhum.id,hactivhum.nom))})
|
|
)
|
|
COR_maj_newUPS['impact'] = 'Inconnu'
|
|
COR_maj_newUPS['localisation'] = 'Inconnu'
|
|
COR_maj_newRHyd = (COR_maj_site[['site_cod','date','auteur']]
|
|
.merge(reghydro,left_on='site_cod',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE']))
|
|
COR_maj_newSubm = (COR_maj_site[['site_cod','date','auteur']]
|
|
.merge(subm,left_on='site_cod',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE']))
|
|
COR_maj_newCnx = (COR_maj_site[['site_cod','date','auteur']]
|
|
.merge(cnx,left_on='site_cod',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE']))
|
|
COR_maj_newFct = (COR_maj_site[['site_cod', 'date', 'auteur']]
|
|
.merge(fct,left_on='site_cod',right_on='CODE_SITE',how='inner')
|
|
.drop(columns=['CODE_SITE'])
|
|
.rename(columns={ 'description':'memo'}))
|
|
|
|
|
|
# Insert New Site
|
|
insertNewSite((COR_maj_site
|
|
.drop(columns='geom')
|
|
.rename(columns={
|
|
'date':'date_deb'
|
|
})))
|
|
COR_maj_cor.to_sql('r_site_maj',con=pycen.con,schema='sites',if_exists='append',index=False)
|
|
# Insert `date_fin` for OLD_Site
|
|
pycen.update_to_sql(COR_maj_siteOLD,pycen.con,'sites','sites','id')
|
|
# Insert NewGeom
|
|
insertNewGeom(pycen.tools.Polygons_to_MultiPolygon(COR_maj_newgeom))
|
|
insertAttrsDelim(COR_maj_newDelim)
|
|
insertAttrsUsgPrss(COR_maj_newUPS)
|
|
insert_regHydro(COR_maj_newRHyd)
|
|
insert_subm(COR_maj_newSubm)
|
|
insert_cnx(COR_maj_newCnx)
|
|
insertAttrsFct(COR_maj_newFct,True)
|
|
|
|
|
|
##########################
|
|
##### ADD 2010 GEOM ######
|
|
##########################
|
|
PATH = '/media/colas/SRV/FICHIERS/OUTILS/CARTOGRAPHIE/VECTEURS/ETUDES/ZONES HUMIDES/INVENTAIRE_ZH/'
|
|
FILE_2010 = 'ZH_2010 (re-save).shp'
|
|
df2010 = gpd.read_file(PATH+FILE_2010)
|
|
df2010.to_crs(2154,inplace=True)
|
|
df2010.rename_geometry('geom',inplace=True)
|
|
GL11 = (df2010.loc[df2010.SITE_CODE=='38GL0011',['SITE_CODE','geom','DATEMODIF']]
|
|
.copy()
|
|
.rename(columns={
|
|
'DATEMODIF':'date','SITE_CODE':'id_site'
|
|
}))
|
|
GL11['auteur'] = 'CEN Isère'
|
|
insertNewGeom(pycen.tools.Polygons_to_MultiPolygon(GL11))
|
|
GL12 = (df2010.loc[df2010.SITE_CODE=='38GL0012',['SITE_CODE','geom','DATEMODIF']]
|
|
.copy()
|
|
.rename(columns={
|
|
'DATEMODIF':'date','SITE_CODE':'id_site'
|
|
}))
|
|
GL12['auteur'] = 'CEN Isère'
|
|
insertNewGeom(pycen.tools.Polygons_to_MultiPolygon(GL12))
|
|
VE213 = (df2010.loc[df2010.SITE_CODE=='38VE0213',['SITE_CODE','geom','DATEMODIF']]
|
|
.copy()
|
|
.rename(columns={
|
|
'DATEMODIF':'date','SITE_CODE':'id_site'
|
|
}))
|
|
VE213['auteur'] = 'CEN Isère'
|
|
insertNewGeom(pycen.tools.Polygons_to_MultiPolygon(VE213)) |