update
This commit is contained in:
parent
c16a92afc8
commit
002efc9d91
@ -21,8 +21,8 @@ from pycen import con_fon
|
|||||||
epsg = '2154'
|
epsg = '2154'
|
||||||
crs = 'EPSG:%s'%epsg
|
crs = 'EPSG:%s'%epsg
|
||||||
chunk = None
|
chunk = None
|
||||||
pci_annee = '2020'
|
pci_annee = '2021'
|
||||||
matrice_annee = '2020'
|
matrice_annee = '2021'
|
||||||
start_time = dt.datetime.today()
|
start_time = dt.datetime.today()
|
||||||
def time_exec (init_time):
|
def time_exec (init_time):
|
||||||
time = dt.datetime.today() - init_time
|
time = dt.datetime.today() - init_time
|
||||||
@ -46,91 +46,92 @@ def get_data(table,schema,engine=con_fon,chunk=chunk):
|
|||||||
|
|
||||||
return df
|
return df
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
|
||||||
###############
|
###############
|
||||||
# Get sites
|
# Get sites
|
||||||
tab_site = 'sites'
|
tab_site = 'sites'
|
||||||
sch_site = 'sites'
|
sch_site = 'sites'
|
||||||
tab_fon = 'cadastre_site'
|
tab_fon = 'cadastre_site'
|
||||||
sch_fon = 'foncier'
|
sch_fon = 'foncier'
|
||||||
print('''IMPORT data from schema: '%s' , table : '%s' '''%(sch_site,tab_site))
|
print('''IMPORT data from schema: '%s' , table : '%s' '''%(sch_site,tab_site))
|
||||||
# sql = "SELECT * FROM {0}.{1}".format(sch_site, tab_site)
|
# sql = "SELECT * FROM {0}.{1}".format(sch_site, tab_site)
|
||||||
sql = '''
|
sql = '''
|
||||||
SELECT * FROM {0}.{1}
|
SELECT * FROM {0}.{1}
|
||||||
WHERE site_id = 'GDMA'
|
WHERE site_id = 'CRAS'
|
||||||
--WHERE site_id NOT IN (SELECT DISTINCT site_id FROM {2}.{3})
|
--WHERE site_id NOT IN (SELECT DISTINCT site_id FROM {2}.{3})
|
||||||
'''.format(sch_site, tab_site, sch_fon, tab_fon)
|
'''.format(sch_site, tab_site, sch_fon, tab_fon)
|
||||||
site = gpd.read_postgis(
|
site = gpd.read_postgis(
|
||||||
# table_name = table_in,
|
# table_name = table_in,
|
||||||
sql = sql,
|
sql = sql,
|
||||||
con = con_fon,
|
con = con_fon,
|
||||||
geom_col = 'geom',
|
geom_col = 'geom',
|
||||||
# schema = schema_in,
|
# schema = schema_in,
|
||||||
crs = crs,
|
crs = crs,
|
||||||
chunksize = chunk, )
|
chunksize = chunk, )
|
||||||
# union_site = gpd.GeoSeries(site.geom.cascaded_union)
|
# union_site = gpd.GeoSeries(site.geom.cascaded_union)
|
||||||
if site.empty:
|
if site.empty:
|
||||||
print('Pas de nouveaux sites à lier au cadastre ====> EXIT')
|
print('Pas de nouveaux sites à lier au cadastre ====> EXIT')
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
#################
|
#################
|
||||||
# Get parcelles
|
# Get parcelles
|
||||||
tab_parc = 'cadastre'
|
tab_parc = 'cadastre'
|
||||||
sch_parc = 'parcelles'
|
sch_parc = 'parcelles'
|
||||||
print('''IMPORT data from schema: '%s' , table : '%s' '''%(sch_parc,tab_parc))
|
print('''IMPORT data from schema: '%s' , table : '%s' '''%(sch_parc,tab_parc))
|
||||||
sql = """SELECT * FROM {sch}.{tab} WHERE ST_Intersects (geom, 'SRID={epsg};{poly}')
|
sql = """SELECT * FROM {sch}.{tab} WHERE ST_Intersects (geom, 'SRID={epsg};{poly}')
|
||||||
AND par_id NOT IN (SELECT par_id FROM {sch}.parcelles_cen)""".format(
|
AND par_id NOT IN (SELECT par_id FROM {sch}.parcelles_cen)""".format(
|
||||||
sch=tab_parc, tab=sch_parc, epsg=epsg, poly=site.unary_union
|
sch=tab_parc, tab=sch_parc, epsg=epsg, poly=site.unary_union
|
||||||
)
|
)
|
||||||
parc = gpd.read_postgis(
|
parc = gpd.read_postgis(
|
||||||
sql = sql,
|
sql = sql,
|
||||||
con = con_fon,
|
con = con_fon,
|
||||||
geom_col = 'geom',
|
geom_col = 'geom',
|
||||||
crs = crs,
|
crs = crs,
|
||||||
chunksize = chunk, )
|
chunksize = chunk, )
|
||||||
|
|
||||||
parc_cent = parc.copy()
|
parc_cent = parc.copy()
|
||||||
# parc_cent.geom = parc_cent.representative_point()
|
# parc_cent.geom = parc_cent.representative_point()
|
||||||
# parc_cent.geom = parc_cent.centroid
|
# parc_cent.geom = parc_cent.centroid
|
||||||
res = gpd.sjoin(site, parc_cent, predicate='intersects')
|
res = gpd.sjoin(site, parc_cent, predicate='intersects')
|
||||||
|
|
||||||
|
|
||||||
print('RUN fonction "import_parcelles_cen" pour {} lignes '.format(res.shape[0]))
|
print('RUN fonction "import_parcelles_cen" pour {} lignes '.format(res.shape[0]))
|
||||||
# lst_site = res.site_id.unique()
|
# lst_site = res.site_id.unique()
|
||||||
start_time = dt.datetime.today()
|
start_time = dt.datetime.today()
|
||||||
res['sql'] = "SELECT cadastre.import_parcelles_cen('"+res.par_id+"','"+res.site_id+"',"+str(start_time.year)+");"
|
res['sql'] = "SELECT cadastre.import_parcelles_cen('"+res.par_id+"','"+res.site_id+"',"+str(start_time.year)+");"
|
||||||
|
|
||||||
with con_fon.begin() as cnx:
|
with con_fon.begin() as cnx:
|
||||||
res['sql'].map(lambda x: cnx.execute(x))
|
res['sql'].map(lambda x: cnx.execute(x))
|
||||||
|
|
||||||
|
|
||||||
###############
|
###############
|
||||||
# Get cptprop
|
# Get cptprop
|
||||||
lst_parid = "','".join(res.par_id.unique())
|
lst_parid = "','".join(res.par_id.unique())
|
||||||
sql = '''
|
sql = '''
|
||||||
SELECT * FROM cadastre.cadastre
|
SELECT * FROM cadastre.cadastre
|
||||||
JOIN cadastre.lots USING (lot_id)
|
JOIN cadastre.lots USING (lot_id)
|
||||||
WHERE lots.par_id IN ('{lst_parid}')'''.format(lst_parid=lst_parid)
|
WHERE lots.par_id IN ('{lst_parid}')'''.format(lst_parid=lst_parid)
|
||||||
cptprop = pd.read_sql(
|
cptprop = pd.read_sql(
|
||||||
sql = sql,
|
sql = sql,
|
||||||
con = con_fon,)
|
con = con_fon,)
|
||||||
cptprop.drop_duplicates('dnupro', inplace=True)
|
cptprop.drop_duplicates('dnupro', inplace=True)
|
||||||
|
|
||||||
start_time = dt.datetime.today()
|
start_time = dt.datetime.today()
|
||||||
print('RUN fonction "import_cptprop_cen" pour {} lignes '.format(cptprop.shape[0]))
|
print('RUN fonction "import_cptprop_cen" pour {} lignes '.format(cptprop.shape[0]))
|
||||||
|
|
||||||
|
|
||||||
cptprop['sql'] = "SELECT cadastre.import_cptprop_cen('"+cptprop.dnupro+"',"+str(start_time.year)+");"
|
cptprop['sql'] = "SELECT cadastre.import_cptprop_cen('"+cptprop.dnupro+"',"+str(start_time.year)+");"
|
||||||
|
|
||||||
with con_fon.begin() as cnx:
|
with con_fon.begin() as cnx:
|
||||||
cptprop['sql'].map(lambda x: cnx.execute(x))
|
cptprop['sql'].map(lambda x: cnx.execute(x))
|
||||||
time_exec(start_time)
|
time_exec(start_time)
|
||||||
print('END fonction : import_cptprop_cen .......... %s'%time_exec(start_time))
|
print('END fonction : import_cptprop_cen .......... %s'%time_exec(start_time))
|
||||||
|
|
||||||
site_id = 'GDMA'
|
site_id = 'GDMA'
|
||||||
with con_fon.begin() as cnx:
|
with con_fon.begin() as cnx:
|
||||||
cnx.execute("SELECT admin_sig.refresh_mview_foncier('{site_id}')"\
|
cnx.execute("SELECT admin_sig.refresh_mview_foncier('{site_id}')"\
|
||||||
.format(
|
.format(
|
||||||
site_id=site_id
|
site_id=site_id
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
@ -43,7 +43,7 @@ base = 'bd_cen'
|
|||||||
# pwd = pwd,
|
# pwd = pwd,
|
||||||
# adr = adr,
|
# adr = adr,
|
||||||
# base = base
|
# base = base
|
||||||
# # schema = schema
|
# schema = schema
|
||||||
# )
|
# )
|
||||||
crs = 'EPSG:2154'
|
crs = 'EPSG:2154'
|
||||||
|
|
||||||
|
|||||||
@ -55,9 +55,9 @@ def _where_parcelle(sql0,schema,list_parid):
|
|||||||
list_parid = [list_parid]
|
list_parid = [list_parid]
|
||||||
|
|
||||||
LIST_ID = str(tuple(list_parid)).replace(',)',')')
|
LIST_ID = str(tuple(list_parid)).replace(',)',')')
|
||||||
|
sql1 = 'AND' if 'WHERE' in sql0 else 'WHERE'
|
||||||
sql1 = '''
|
sql1 += '''
|
||||||
WHERE p.parcelle IN {list_id}
|
p.parcelle IN {list_id}
|
||||||
;'''.format(sch=schema,list_id=LIST_ID)
|
;'''.format(sch=schema,list_id=LIST_ID)
|
||||||
else :
|
else :
|
||||||
chunk = 200000
|
chunk = 200000
|
||||||
@ -325,7 +325,7 @@ def _get_lots1(schema='38_202207'):
|
|||||||
|
|
||||||
|
|
||||||
def _get_lots2(schema='38_202207',list_parid=None):
|
def _get_lots2(schema='38_202207',list_parid=None):
|
||||||
sql0 = '''
|
sql0 = '''set work_mem='265MB';
|
||||||
SELECT DISTINCT
|
SELECT DISTINCT
|
||||||
CASE WHEN TRIM(t.dnulot) = '' OR TRIM(t.dnulot) IS NULL
|
CASE WHEN TRIM(t.dnulot) = '' OR TRIM(t.dnulot) IS NULL
|
||||||
THEN substring(t.parcelle from 1 for 2)||substring(t.parcelle from 4 for 12)||TRIM(t.ccosub)
|
THEN substring(t.parcelle from 1 for 2)||substring(t.parcelle from 4 for 12)||TRIM(t.ccosub)
|
||||||
|
|||||||
@ -511,7 +511,7 @@ comment = """
|
|||||||
COMMENT ON COLUMN zones_humides.v_zoneshumides.site_code IS 'Identifiant de la zone humide.';
|
COMMENT ON COLUMN zones_humides.v_zoneshumides.site_code IS 'Identifiant de la zone humide.';
|
||||||
COMMENT ON COLUMN zones_humides.v_zoneshumides.old_code IS 'Ancien identifiant de la zone humide pouvant se retrouver dans des données historiques.';
|
COMMENT ON COLUMN zones_humides.v_zoneshumides.old_code IS 'Ancien identifiant de la zone humide pouvant se retrouver dans des données historiques.';
|
||||||
COMMENT ON COLUMN zones_humides.v_zoneshumides.nom IS 'Nom de la zone humide.';
|
COMMENT ON COLUMN zones_humides.v_zoneshumides.nom IS 'Nom de la zone humide.';
|
||||||
COMMENT ON COLUMN zones_humides.v_zoneshumides.autre_nom IS 'Autre nom de la zone humide.';
|
COMMENT ON COLUMN zones_humides.v_zoneshumides.autre_nom IS 'Autre nom possible de la zone humide.';
|
||||||
COMMENT ON COLUMN zones_humides.v_zoneshumides.auteur_site IS 'Auteur ayant caractérisé la zone humide pour la première fois.';
|
COMMENT ON COLUMN zones_humides.v_zoneshumides.auteur_site IS 'Auteur ayant caractérisé la zone humide pour la première fois.';
|
||||||
COMMENT ON COLUMN zones_humides.v_zoneshumides.auteur_geom IS 'Auteur ayant définis la géometrie actuelle de la zone humide.';
|
COMMENT ON COLUMN zones_humides.v_zoneshumides.auteur_geom IS 'Auteur ayant définis la géometrie actuelle de la zone humide.';
|
||||||
COMMENT ON COLUMN zones_humides.v_zoneshumides.auteur_last_maj IS 'Auteur le plus récent ayant défini les attributs de la zone humide.';
|
COMMENT ON COLUMN zones_humides.v_zoneshumides.auteur_last_maj IS 'Auteur le plus récent ayant défini les attributs de la zone humide.';
|
||||||
@ -521,6 +521,7 @@ COMMENT ON COLUMN zones_humides.v_zoneshumides.date_last_maj IS 'Date des attrib
|
|||||||
COMMENT ON COLUMN zones_humides.v_zoneshumides.type_milieu IS 'Caractérisation du milieu.';
|
COMMENT ON COLUMN zones_humides.v_zoneshumides.type_milieu IS 'Caractérisation du milieu.';
|
||||||
COMMENT ON COLUMN zones_humides.v_zoneshumides.type_site IS '';
|
COMMENT ON COLUMN zones_humides.v_zoneshumides.type_site IS '';
|
||||||
COMMENT ON COLUMN zones_humides.v_zoneshumides.typo_sdage IS 'Typologie sdage de la zone humide.';
|
COMMENT ON COLUMN zones_humides.v_zoneshumides.typo_sdage IS 'Typologie sdage de la zone humide.';
|
||||||
|
COMMENT ON COLUMN zones_humides.v_zoneshumides.mnemo_sdage IS 'Libellé mnémonique sdage de la zone humide.';
|
||||||
COMMENT ON COLUMN zones_humides.v_zoneshumides.rmq_site IS 'Remarques générale concernant la zone humide.';
|
COMMENT ON COLUMN zones_humides.v_zoneshumides.rmq_site IS 'Remarques générale concernant la zone humide.';
|
||||||
COMMENT ON COLUMN zones_humides.v_zoneshumides.rmq_fct_majeur IS 'Remarques sur les fonctions majeurs de la zone humide.';
|
COMMENT ON COLUMN zones_humides.v_zoneshumides.rmq_fct_majeur IS 'Remarques sur les fonctions majeurs de la zone humide.';
|
||||||
COMMENT ON COLUMN zones_humides.v_zoneshumides.rmq_interet_patri IS 'Remarques sur les intérêts patrimoniaux de la zone humide.';
|
COMMENT ON COLUMN zones_humides.v_zoneshumides.rmq_interet_patri IS 'Remarques sur les intérêts patrimoniaux de la zone humide.';
|
||||||
@ -549,6 +550,8 @@ COMMENT ON COLUMN zones_humides.v_zoneshumides.entree_eau_topo IS 'Liste de la t
|
|||||||
COMMENT ON COLUMN zones_humides.v_zoneshumides.sortie_eau_reg IS 'Liste des sorties d''eau du régime hydrique de la zone humide. Chaque élément de la liste est séparé par un '';''.';
|
COMMENT ON COLUMN zones_humides.v_zoneshumides.sortie_eau_reg IS 'Liste des sorties d''eau du régime hydrique de la zone humide. Chaque élément de la liste est séparé par un '';''.';
|
||||||
COMMENT ON COLUMN zones_humides.v_zoneshumides.sortie_eau_perm IS 'Liste des permanances respectivement de chaque sortie d''eau de la zone humide. Chaque élément de la liste est séparé par un '';''.';
|
COMMENT ON COLUMN zones_humides.v_zoneshumides.sortie_eau_perm IS 'Liste des permanances respectivement de chaque sortie d''eau de la zone humide. Chaque élément de la liste est séparé par un '';''.';
|
||||||
COMMENT ON COLUMN zones_humides.v_zoneshumides.sortie_eau_topo IS 'Liste de la toponymie respectivement de chaque sortie d''eau de la zone humide. Chaque élément de la liste est séparé par un '';''.';
|
COMMENT ON COLUMN zones_humides.v_zoneshumides.sortie_eau_topo IS 'Liste de la toponymie respectivement de chaque sortie d''eau de la zone humide. Chaque élément de la liste est séparé par un '';''.';
|
||||||
|
COMMENT ON COLUMN zones_humides.v_zoneshumides.id_origine IS 'Identifiant d''origine de la zone fournie par un partenaire exterieur.';
|
||||||
|
COMMENT ON COLUMN zones_humides.v_zoneshumides.geom IS 'Géometrie de la zone humide.';
|
||||||
"""
|
"""
|
||||||
with con.begin() as cnx:
|
with con.begin() as cnx:
|
||||||
cnx.execute(text(v_zoneshumides))
|
cnx.execute(text(v_zoneshumides))
|
||||||
|
|||||||
@ -28,5 +28,5 @@ if __name__ == "__main__":
|
|||||||
format_data(vzh_pne)
|
format_data(vzh_pne)
|
||||||
format_data(zhp_pne)
|
format_data(zhp_pne)
|
||||||
|
|
||||||
vzh_pne.drop('old_code',axis=1).to_file(os.path.join(PATH,'PNE_ZH.gpkg'),driver='GPKG',layer='inventaire_zh')
|
vzh_pne.drop(['old_code','type_site'],axis=1).to_file(os.path.join(PATH,'PNE_ZH.gpkg'),driver='GPKG',layer='inventaire_zh')
|
||||||
zhp_pne.to_file(os.path.join(PATH,'PNE_ZH.gpkg'),driver='GPKG',layer='zh_ponctuelle')
|
zhp_pne.to_file(os.path.join(PATH,'PNE_ZH.gpkg'),driver='GPKG',layer='zh_ponctuelle')
|
||||||
@ -2,9 +2,9 @@ from pycen import con_gn
|
|||||||
from pycen.wfs import list_layer,get_wfs
|
from pycen.wfs import list_layer,get_wfs
|
||||||
import geopandas as gpd
|
import geopandas as gpd
|
||||||
|
|
||||||
def extract_synthese_gn(table='v_synthese_for_export',con=con_gn,date_min=None,date_max=None,polygon=None):
|
def extract_synthese_gn(table='v_synthese_for_export',con=con_gn,date_min=None,date_max=None,polygon=None,filter_obscen=True):
|
||||||
sql = 'SELECT *,ST_GeomFromText(geometrie_wkt_4326,4326) geom FROM gn_synthese.{tab}'.format(tab=table)
|
sql = 'SELECT *,ST_GeomFromText(geometrie_wkt_4326,4326) geom FROM gn_synthese.{tab}'.format(tab=table)
|
||||||
if any([i is not None for i in [date_min,date_max,polygon]]):
|
if any([i is not None for i in [date_min,date_max,polygon]]) or filter_obscen:
|
||||||
sql += ' WHERE '
|
sql += ' WHERE '
|
||||||
if date_min is not None:
|
if date_min is not None:
|
||||||
sql += "date_debut > '%s' AND " % date_min
|
sql += "date_debut > '%s' AND " % date_min
|
||||||
@ -12,14 +12,14 @@ def extract_synthese_gn(table='v_synthese_for_export',con=con_gn,date_min=None,d
|
|||||||
sql += "date_debut < '%s' AND " % date_max
|
sql += "date_debut < '%s' AND " % date_max
|
||||||
if polygon is not None:
|
if polygon is not None:
|
||||||
sql += "ST_Intersects(ST_Transform(ST_GeomFromText(geometrie_wkt_4326,4326),2154),'SRID={epsg};{poly}') AND ".format(epsg=2154,poly=polygon)
|
sql += "ST_Intersects(ST_Transform(ST_GeomFromText(geometrie_wkt_4326,4326),2154),'SRID={epsg};{poly}') AND ".format(epsg=2154,poly=polygon)
|
||||||
|
if filter_obscen:
|
||||||
sql += """(
|
sql += """(
|
||||||
unaccent(observateurs) LIKE ANY (array['%%Conservatoire d%%Espaces Naturels Isere%%', '%%Conservatoire d%%Espaces Naturels d%%Isere%%', '%%CEN Isere%%', '%%Association des Amis de l%%Ile de la Platiere%%'])
|
unaccent(observateurs) LIKE ANY (array['%%Conservatoire d%%Espaces Naturels Isere%%', '%%Conservatoire d%%Espaces Naturels d%%Isere%%', '%%CEN Isere%%', '%%Association des Amis de l%%Ile de la Platiere%%'])
|
||||||
OR observateurs NOT LIKE '%%(%%)%%'
|
OR observateurs NOT LIKE '%%(%%)%%'
|
||||||
) AND observateurs NOT IN ('', 'Benoit Dodelin',', ','%%DELOCHE Denis%%')
|
) AND observateurs NOT IN ('', 'Benoit Dodelin',', ','%%DELOCHE Denis%%')
|
||||||
AND jdd_id NOT IN (185,377,236)
|
AND jdd_id NOT IN (185,377,236)
|
||||||
AND ca_id NOT IN (53,65,66,1,2,6)
|
AND ca_id NOT IN (53,65,66,1,2,6)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
return gpd.read_postgis(sql,con_gn)
|
return gpd.read_postgis(sql,con_gn)
|
||||||
|
|
||||||
|
|||||||
@ -1,78 +1,93 @@
|
|||||||
from pycen import con_gn
|
from pycen import con_gn
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
import os
|
||||||
|
|
||||||
# Liste des CD_NOM en entrée
|
def get_status(lst):
|
||||||
taxlist = pd.read_excel('/media/colas/SRV/FICHIERS/TRANSFERTS-EQUIPE/CG/BOCA_CD_nom.xlsx')
|
sql = """
|
||||||
|
SELECT
|
||||||
|
t.cd_nom,
|
||||||
|
t.cd_ref,
|
||||||
|
t.regne,
|
||||||
|
t.phylum,
|
||||||
|
t.classe,
|
||||||
|
t.ordre,
|
||||||
|
t.famille,
|
||||||
|
t.group1_inpn,
|
||||||
|
t.group2_inpn,
|
||||||
|
t.nom_vern,
|
||||||
|
t.nom_complet,
|
||||||
|
t.nom_valide,
|
||||||
|
t.lb_nom,
|
||||||
|
--s.*
|
||||||
|
s.code_statut,
|
||||||
|
s.cd_type_statut,
|
||||||
|
s.label_statut
|
||||||
|
FROM taxonomie.v_taxref_all_listes t
|
||||||
|
JOIN taxonomie.v_bdc_status s USING (cd_nom)
|
||||||
|
WHERE t.cd_nom IN {cd_nom}
|
||||||
|
;""".format(cd_nom = tuple(lst))
|
||||||
|
return pd.read_sql_query(sql,con_gn)
|
||||||
|
|
||||||
|
|
||||||
sql = """
|
if __name__ == "__main__":
|
||||||
SELECT
|
|
||||||
t.cd_nom,
|
|
||||||
t.cd_ref,
|
|
||||||
t.regne,
|
|
||||||
t.phylum,
|
|
||||||
t.classe,
|
|
||||||
t.ordre,
|
|
||||||
t.famille,
|
|
||||||
t.group1_inpn,
|
|
||||||
t.group2_inpn,
|
|
||||||
t.nom_vern,
|
|
||||||
t.nom_complet,
|
|
||||||
t.nom_valide,
|
|
||||||
t.lb_nom,
|
|
||||||
--s.*
|
|
||||||
s.code_statut,
|
|
||||||
s.cd_type_statut,
|
|
||||||
s.label_statut
|
|
||||||
FROM taxonomie.v_taxref_all_listes t
|
|
||||||
JOIN taxonomie.v_bdc_status s USING (cd_nom)
|
|
||||||
WHERE t.cd_nom IN {cd_nom}
|
|
||||||
;""".format(cd_nom = tuple(taxlist.CD_NOM.astype(str)) )
|
|
||||||
df = pd.read_sql_query(sql,con_gn)
|
|
||||||
df.to_csv('/media/colas/SRV/FICHIERS/TRANSFERTS-EQUIPE/LC/BOCA_CD_NOM_STATUS.csv')
|
|
||||||
|
|
||||||
pivot = pd.pivot_table(
|
PATH = '/media/colas/Disk2/tmp/NICO'
|
||||||
df,
|
file = 'MAIR_FAUNE&FLORE_PG2024_V0.xlsx'
|
||||||
values='code_statut',
|
sheet = 'MAIR_FLORE'
|
||||||
index=['cd_nom', 'cd_ref','lb_nom'#,'niveau_admin','lb_adm_tr'
|
|
||||||
],
|
|
||||||
columns=['cd_type_statut'],
|
|
||||||
aggfunc=list,fill_value=None)
|
|
||||||
|
|
||||||
for c in pivot.columns:
|
# Liste des CD_NOM en entrée
|
||||||
pivot[c] = [x[0] if x is not np.NaN and len(x)==1 else x for x in pivot[c]]
|
taxlist = pd.read_excel(os.path.join(PATH,file),sheet,usecols=['cd_nom'],header=1)
|
||||||
pivot['DH'] = [','.join(x) if (x is not np.NaN) and (len(x)==2) else x for x in pivot['DH']]
|
df = get_status(taxlist.cd_nom.astype(str))
|
||||||
pivot.DH.replace({'CDH':''},regex=True,inplace=True)
|
|
||||||
|
|
||||||
|
|
||||||
pivlib = pd.pivot_table(
|
# df.to_csv('/media/colas/SRV/FICHIERS/TRANSFERTS-EQUIPE/LC/BOCA_CD_NOM_STATUS.csv')
|
||||||
df,
|
|
||||||
values='label_statut',
|
pivot = pd.pivot_table(
|
||||||
index=['cd_nom', 'cd_ref','lb_nom'#,'niveau_admin','lb_adm_tr'
|
df,
|
||||||
],
|
values='code_statut',
|
||||||
columns=['cd_type_statut'],
|
index=['cd_nom', 'cd_ref','lb_nom'#,'niveau_admin','lb_adm_tr'
|
||||||
aggfunc=list,fill_value=None)
|
],
|
||||||
for c in pivlib.columns:
|
columns=['cd_type_statut'],
|
||||||
pivlib[c] = [x[0] if x is not np.NaN and len(x)==1 else x for x in pivlib[c]]
|
aggfunc=list,fill_value=None)
|
||||||
pivlib['DH'] = [','.join(x) if (x is not np.NaN) and (len(x)==2) else x for x in pivlib['DH']]
|
|
||||||
pivlib.DH.replace({'CDH':''},regex=True,inplace=True)
|
for c in pivot.columns:
|
||||||
|
pivot[c] = [x[0] if x is not np.NaN and len(x)==1 else x for x in pivot[c]]
|
||||||
|
if 'DH' in pivot.columns:
|
||||||
|
pivot['DH'] = [','.join(x) if (x is not np.NaN) and (len(x)==2) else x for x in pivot['DH']]
|
||||||
|
pivot.DH.replace({'CDH':''},regex=True,inplace=True)
|
||||||
|
|
||||||
|
|
||||||
print('INIT writer')
|
pivlib = pd.pivot_table(
|
||||||
NAME_OUT = '/media/colas/SRV/FICHIERS/TRANSFERTS-EQUIPE/LC/BOCA_CD_NOM_STATUS.xlsx'
|
df,
|
||||||
with pd.ExcelWriter(NAME_OUT) as writer:
|
values='label_statut',
|
||||||
df.to_excel(
|
index=['cd_nom', 'cd_ref','lb_nom'#,'niveau_admin','lb_adm_tr'
|
||||||
writer,sheet_name='v_bdc_status'
|
],
|
||||||
)
|
columns=['cd_type_statut'],
|
||||||
# writer.save()
|
aggfunc=list,fill_value=None)
|
||||||
print('v_bdc_status OK !')
|
for c in pivlib.columns:
|
||||||
pivot.to_excel(
|
pivlib[c] = [x[0] if x is not np.NaN and len(x)==1 else x for x in pivlib[c]]
|
||||||
writer,sheet_name='pivot_table'
|
if 'DH' in pivot.columns:
|
||||||
)
|
pivlib['DH'] = [','.join(x) if (x is not np.NaN) and (len(x)==2) else x for x in pivlib['DH']]
|
||||||
# writer.save()
|
pivlib.DH.replace({'CDH':''},regex=True,inplace=True)
|
||||||
print('pivot_table OK !')
|
|
||||||
pivlib.to_excel(
|
|
||||||
writer,sheet_name='pivot_libel'
|
print('INIT writer')
|
||||||
)
|
NAME_OUT = os.path.join(PATH,sheet+'_status.xlsx')
|
||||||
writer.save()
|
with pd.ExcelWriter(NAME_OUT) as writer:
|
||||||
print('pivot_libel OK !')
|
df.to_excel(
|
||||||
|
writer,sheet_name='v_bdc_status'
|
||||||
|
)
|
||||||
|
# writer.save()
|
||||||
|
print('v_bdc_status OK !')
|
||||||
|
pivot.to_excel(
|
||||||
|
writer,sheet_name='pivot_table'
|
||||||
|
)
|
||||||
|
# writer.save()
|
||||||
|
print('pivot_table OK !')
|
||||||
|
pivlib.to_excel(
|
||||||
|
writer,sheet_name='pivot_libel'
|
||||||
|
)
|
||||||
|
# writer.save()
|
||||||
|
print('pivot_libel OK !')
|
||||||
|
|
||||||
|
|||||||
@ -4,7 +4,6 @@
|
|||||||
from sqlalchemy.engine import URL
|
from sqlalchemy.engine import URL
|
||||||
from sqlalchemy import create_engine,text
|
from sqlalchemy import create_engine,text
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from pycen import con_bdcen,con_fon
|
|
||||||
|
|
||||||
# Parametres bdd
|
# Parametres bdd
|
||||||
# user = 'cen_admin'
|
# user = 'cen_admin'
|
||||||
@ -68,33 +67,50 @@ def create_grp(con,grp):
|
|||||||
with con.begin() as cnx:
|
with con.begin() as cnx:
|
||||||
cnx.execute(sql)
|
cnx.execute(sql)
|
||||||
|
|
||||||
# sql = "select * from pg_catalog.pg_user" # where tableowner = 'gpasquier'"
|
def create_usr(con,usr,pwd):
|
||||||
|
sql = """CREATE USER "{usr}" WITH
|
||||||
for grp in usr.keys():
|
NOSUPERUSER
|
||||||
# create grp_role
|
NOCREATEDB
|
||||||
create_grp(con_fon,grp)
|
NOCREATEROLE
|
||||||
|
INHERIT
|
||||||
# create usr
|
LOGIN
|
||||||
for user in usr[grp]:
|
NOREPLICATION
|
||||||
sql = """
|
NOBYPASSRLS
|
||||||
DO
|
PASSWORD '{pwd}'
|
||||||
$do$
|
CONNECTION LIMIT -1;""".format(usr=usr,pwd=pwd)
|
||||||
BEGIN
|
|
||||||
IF NOT EXISTS (
|
|
||||||
SELECT FROM pg_catalog.pg_roles
|
|
||||||
WHERE rolname = '{usr}') THEN
|
|
||||||
|
|
||||||
CREATE ROLE "{usr}" LOGIN PASSWORD '{usr}';
|
|
||||||
END IF;
|
|
||||||
GRANT {grp} TO "{usr}" ;
|
|
||||||
END $do$""".format(usr=user,grp=grp)
|
|
||||||
with con.begin() as cnx:
|
|
||||||
cnx.execute(text(sql))
|
|
||||||
|
|
||||||
|
|
||||||
# grant grp_role
|
|
||||||
for grp in usr.keys():
|
|
||||||
sql = """GRANT grp_consult TO {usr} ;""".format(usr=grp)
|
|
||||||
with con.begin() as cnx:
|
with con.begin() as cnx:
|
||||||
cnx.execute(sql)
|
cnx.execute(sql)
|
||||||
|
# sql = "select * from pg_catalog.pg_user" # where tableowner = 'gpasquier'"
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
from pycen import con_bdcen,con_fon,con
|
||||||
|
|
||||||
|
for grp in usr.keys():
|
||||||
|
# create grp_role
|
||||||
|
create_grp(con_fon,grp)
|
||||||
|
|
||||||
|
# create usr
|
||||||
|
for user in usr[grp]:
|
||||||
|
sql = """
|
||||||
|
DO
|
||||||
|
$do$
|
||||||
|
BEGIN
|
||||||
|
IF NOT EXISTS (
|
||||||
|
SELECT FROM pg_catalog.pg_roles
|
||||||
|
WHERE rolname = '{usr}') THEN
|
||||||
|
|
||||||
|
CREATE ROLE "{usr}" LOGIN PASSWORD '{usr}';
|
||||||
|
END IF;
|
||||||
|
GRANT {grp} TO "{usr}" ;
|
||||||
|
END $do$""".format(usr=user,grp=grp)
|
||||||
|
with con.begin() as cnx:
|
||||||
|
cnx.execute(text(sql))
|
||||||
|
|
||||||
|
|
||||||
|
# grant grp_role
|
||||||
|
for grp in usr.keys():
|
||||||
|
sql = """GRANT grp_consult TO {usr} ;""".format(usr=grp)
|
||||||
|
with con.begin() as cnx:
|
||||||
|
cnx.execute(sql)
|
||||||
|
|
||||||
|
|||||||
@ -38,13 +38,15 @@ def gen_mbtiles2(xml,output,format='mbtiles'):
|
|||||||
# projwin Isère : 836250 6534274 965160 6403454
|
# projwin Isère : 836250 6534274 965160 6403454
|
||||||
init = dt.now()
|
init = dt.now()
|
||||||
cmd = '''
|
cmd = '''
|
||||||
gdal_translate -outsize 75000 75000 -projwin 836250 6534274 965160 6403454 -projwin_srs "EPSG:2154" -of %s -co "COMPRESS=YES" -co "TILE_FORMAT=JPEG" -co "QUALITY=100" -co "MINZOOM=12" -co "MAXZOOM=22" --config GDAL_CACHEMAX 64 "%s" "%s"
|
gdal_translate -outsize 75000 75000 -projwin 836250 6534274 965160 6403454 -projwin_srs "EPSG:2154" -of %s -r cubic -co "COMPRESS=JPEG" -co "TILE_FORMAT=JPEG" -co "QUALITY=80" -co "MINZOOM=12" -co "MAXZOOM=22" -co "PHOTOMETRIC=YCBCR" --config GDAL_CACHEMAX 512 "%s" "%s"
|
||||||
''' % (format,xml,output)
|
''' % (format,xml,output)
|
||||||
print(cmd)
|
print(cmd)
|
||||||
os.system(cmd)
|
os.system(cmd)
|
||||||
print(dt.now()-init)
|
print(dt.now()-init)
|
||||||
|
|
||||||
def gdalwarp(vrt,mbtiles):
|
def gdalwarp(vrt,mbtiles):
|
||||||
|
# exemple : Découpage d'un raster à partir d'un polygon
|
||||||
|
# gdalwarp -overwrite -s_srs "EPSG:2154" -t_srs "EPSG:2154" -of GTiff -cutline "PG:dbname='azalee' host=91.134.194.221 port=5432 sslmode=disable user='cgeier' password='adm1n*bdCen'" -cl "ref_territoire.isere_platiere" -crop_to_cutline -multi -co "COMPRESS=JPEG" -co "JPEG_QUALITY=75" "/media/colas/Disk2/5_BDD/ZH_prob/ZH_probS_Auvergne-Rhone-Alpes/ZH_probS_region84.tif" "/media/colas/Disk2/5_BDD/ZH_prob/ZH_probS_Auvergne-Rhone-Alpes/ZH_probS_dept38.tif"
|
||||||
cmd = '''
|
cmd = '''
|
||||||
gdalwarp -of MBTiles -s_srs epsg:2154 -t_srs epsg:3857 %s %s
|
gdalwarp -of MBTiles -s_srs epsg:2154 -t_srs epsg:3857 %s %s
|
||||||
''' % (vrt,mbtiles)
|
''' % (vrt,mbtiles)
|
||||||
@ -85,11 +87,11 @@ if __name__ == "__main__":
|
|||||||
output=os.path.join(PATH,xml),
|
output=os.path.join(PATH,xml),
|
||||||
type_flux='wms')
|
type_flux='wms')
|
||||||
|
|
||||||
gen_mbtiles2(os.path.join(PATH,xml),os.path.join(PATH,mbtile),format='mbtiles')
|
# gen_mbtiles2(os.path.join(PATH,xml),os.path.join(PATH,mbtile),format='mbtiles')
|
||||||
# gen_mbtiles2(os.path.join(PATH,xml),os.path.join(PATH,vrt),format='vrt')
|
# gen_mbtiles2(os.path.join(PATH,xml),os.path.join(PATH,vrt),format='vrt')
|
||||||
# gdalwarp(os.path.join(PATH,vrt),os.path.join(PATH,mbtile))
|
# gdalwarp(os.path.join(PATH,vrt),os.path.join(PATH,mbtile))
|
||||||
# gdaladdo(os.path.join(PATH,mbtile))
|
gdaladdo(os.path.join(PATH,mbtile))
|
||||||
|
|
||||||
# ds = gdal.Open(os.path.join(PATH,vrt))
|
# ds = gdal.Open(os.path.join(PATH,vrt))
|
||||||
|
|
||||||
# gdal_translate -outsize 50% 50% -projwin 631397 5672590 639669 5659275 -of MBTILES -co "COMPRESS=YES" -co "TILE_FORMAT=JPEG" -co "QUALITY=80" "/media/colas/Disk2/output.xml" "/media/colas/Disk2/ign_ortho2024_38.mbtiles"
|
# gdal_translate -outsize 50% 50% -projwin 631397 5672590 639669 5659275 -of MBTILES -co "COMPRESS=YES" -co "TILE_FORMAT=JPEG" -co "QUALITY=80" "/media/colas/Disk2/output.xml" "/media/colas/Disk2/ign_ortho2024_38.mbtiles"
|
||||||
|
|||||||
@ -5,19 +5,27 @@ from subprocess import run
|
|||||||
from os import listdir,getcwd,chdir,system
|
from os import listdir,getcwd,chdir,system
|
||||||
|
|
||||||
|
|
||||||
def thread_function(name):
|
def thread_func(name,table,schema):
|
||||||
logging.info("Thread %s: starting", name)
|
logging.info("Thread %s: starting", name)
|
||||||
time.sleep(2)
|
time.sleep(2)
|
||||||
if not isinstance(name,list) : name = [name]
|
if not isinstance(name,list) : name = [name]
|
||||||
cmd = 'export PGPASSWORD=#CEN38@venir;raster2pgsql -s 2154 -a -t 5x5 {} ref_territoire.mnt_5m | psql -h 91.134.194.221 -U cen_admin -d azalee'.format(" ".join(name))
|
cmd = 'export PGPASSWORD=#CEN38@venir;raster2pgsql -s 2154 -c -C -I -M -t 5x5 -N 0 {file} {sch}.{tab} | psql -h 91.134.194.221 -U cen_admin -d azalee'.format(file=" ".join(name),sch=schema,tab=table)
|
||||||
system(cmd)
|
system(cmd)
|
||||||
logging.info("Thread %s: finishing", name)
|
logging.info("Thread %s: finishing", name)
|
||||||
|
|
||||||
def last_thread_function(name):
|
def thread_function(name,table,schema):
|
||||||
logging.info("Thread %s: starting", name)
|
logging.info("Thread %s: starting", name)
|
||||||
time.sleep(2)
|
time.sleep(2)
|
||||||
if not isinstance(name,list) : name = [name]
|
if not isinstance(name,list) : name = [name]
|
||||||
cmd = 'export PGPASSWORD=#CEN38@venir;raster2pgsql -s 2154 -a -C -I -M -t 5x5 {} ref_territoire.mnt_5m | psql -h 91.134.194.221 -U cen_admin -d azalee'.format(" ".join(name))
|
cmd = 'export PGPASSWORD=#CEN38@venir;raster2pgsql -s 2154 -a -t 5x5 {file} {sch}.{tab} | psql -h 91.134.194.221 -U cen_admin -d azalee'.format(file=" ".join(name),sch=schema,tab=table)
|
||||||
|
system(cmd)
|
||||||
|
logging.info("Thread %s: finishing", name)
|
||||||
|
|
||||||
|
def last_thread_function(name,table,schema):
|
||||||
|
logging.info("Thread %s: starting", name)
|
||||||
|
time.sleep(2)
|
||||||
|
if not isinstance(name,list) : name = [name]
|
||||||
|
cmd = 'export PGPASSWORD=#CEN38@venir;raster2pgsql -s 2154 -a -C -I -M -t 5x5 {file} {sch}.{tab} | psql -h 91.134.194.221 -U cen_admin -d azalee'.format(file=" ".join(name),sch=schema,tab=table)
|
||||||
system(cmd)
|
system(cmd)
|
||||||
logging.info("Thread %s: finishing", name)
|
logging.info("Thread %s: finishing", name)
|
||||||
|
|
||||||
@ -45,9 +53,9 @@ if __name__ == "__main__":
|
|||||||
for file in list_asc[j:k]:
|
for file in list_asc[j:k]:
|
||||||
logging.info("Main : create and start thread %s.", file)
|
logging.info("Main : create and start thread %s.", file)
|
||||||
if file == list_asc[-1]:
|
if file == list_asc[-1]:
|
||||||
x = threading.Thread(target=last_thread_function, args=(file,))
|
x = threading.Thread(target=last_thread_function, args=(file,'mnt_5m','ref_territoire'))
|
||||||
else:
|
else:
|
||||||
x = threading.Thread(target=thread_function, args=(file,))
|
x = threading.Thread(target=thread_function, args=(file,'mnt_5m','ref_territoire'))
|
||||||
threads.append(x)
|
threads.append(x)
|
||||||
x.start()
|
x.start()
|
||||||
# [t.start() for t in threads]
|
# [t.start() for t in threads]
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user