Skip to content

Commit

Permalink
Merge pull request #73 from IGNF/develop
Browse files Browse the repository at this point in the history
2.2.3
  • Loading branch information
azarz authored Feb 14, 2024
2 parents 95e2890 + 20d0272 commit 8bc09e5
Show file tree
Hide file tree
Showing 3 changed files with 77 additions and 41 deletions.
13 changes: 10 additions & 3 deletions changelog.md
Original file line number Diff line number Diff line change
@@ -1,15 +1,22 @@
# CHANGELOG

## x.y.z

## 2.2.3

CHANGED:
- Pivot to osm: Using batches for fetching edges in pivot DB

## 2.2.2

ADD:
- VACUUM ANALYSE is done only on created tables
- Templates for issues and PR
- Templates for issues and PR
- A code of conduct was adapted from the contributor covenant
- A contributing was added
- The DCO was added
- The DCO was added
- Restrict access to pedestrian ways according to BDTOPO
- Better handling of urbain column inside the BDTOPO
- Better handling of urbain column inside the BDTOPO

FIX:
- Durée de parcours incohérente sur OSRM entre car-fastest et car-shortest
Expand Down
2 changes: 1 addition & 1 deletion r2gg/__about__.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
__uri_tracker__ = f"{__uri_repository__}issues/"
__uri__ = __uri_repository__

__version__ = "2.2.2"
__version__ = "2.2.3"
__version_info__ = tuple(
[
int(num) if num.isdigit() else num
Expand Down
103 changes: 66 additions & 37 deletions r2gg/_pivot_to_osm.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,54 +69,83 @@ def pivot_to_osm(config, source, db_configs, connection, logger, output_is_pbf =
attribs = {"version": "0.6", "generator": "r2gg"}
with xf.element("osm", attribs):

# Ecriture des nodes
sql_query = getQueryByTableAndBoundingBox(f'{input_schema}.nodes', source['bbox'])
# Récupération du nombre de nodes
sql_query = f"SELECT COUNT(*) as cnt FROM {input_schema}.nodes"
logger.info("SQL: {}".format(sql_query))
st_execute = time.time()
cursor.execute(sql_query)
et_execute = time.time()
logger.info("Execution ended. Elapsed time : %s seconds." %(et_execute - st_execute))
row = cursor.fetchone()
logger.info("Writing nodes")
st_execute = time.time()
i = 1
while row:
nodeEl = writeNode(row, extraction_date)
xf.write(nodeEl, pretty_print=True)
row = cursor.fetchone()
if (i % ceil(cursor.rowcount/10) == 0):
logger.info("%s / %s nodes ajoutés" %(i, cursor.rowcount))
i += 1
et_execute = time.time()
logger.info("Writing nodes ended. Elapsed time : %s seconds." %(et_execute - st_execute))
nodesize = row["cnt"]

# Ecriture des ways
sql_query2 = getQueryByTableAndBoundingBox(f'{input_schema}.edges', source['bbox'], ['*', f'{input_schema}.inter_nodes(geom) as internodes'])
logger.info("SQL: {}".format(sql_query2))
# Ecriture des nodes
batchsize = 500000
offset = 0
logger.info(f"Writing nodes: {nodesize} ways to write")
st_nodes = time.time()
while offset < nodesize:
sql_query = getQueryByTableAndBoundingBox(f'{input_schema}.nodes', source['bbox'])
sql_query += " LIMIT {} OFFSET {}".format(batchsize, offset)
logger.info("SQL: {}".format(sql_query))
st_execute = time.time()
cursor.execute(sql_query)
et_execute = time.time()
offset += batchsize
logger.info("Execution ended. Elapsed time : %s seconds." %(et_execute - st_execute))
row = cursor.fetchone()
logger.info("Writing nodes")
st_execute = time.time()
i = 1
while row:
nodeEl = writeNode(row, extraction_date)
xf.write(nodeEl, pretty_print=True)
row = cursor.fetchone()
logger.info("%s / %s nodes ajoutés" %(offset, nodesize))
et_nodes = time.time()
logger.info("Writing nodes ended. Elapsed time : %s seconds." %(et_nodes - st_nodes))

# Récupération du nombre de ways
sql_query = f"SELECT COUNT(*) as cnt FROM {input_schema}.edges"
logger.info("SQL: {}".format(sql_query))
st_execute = time.time()
cursor.execute(sql_query2)
cursor.execute(sql_query)
et_execute = time.time()
logger.info("Execution ended. Elapsed time : %s seconds." %(et_execute - st_execute))
row = cursor.fetchone()
logger.info("Writing ways")
st_execute = time.time()
i = 1
while row:
wayEl = writeWay(row, extraction_date)
for node in row['internodes']:
vertexSequence = vertexSequence + 1
node['id'] = vertexSequence
nodeEl = writeNode(node, extraction_date)
xf.write(nodeEl, pretty_print=True)
wayEl = writeWayNds(wayEl, row, row['internodes'])
wayEl = writeWayTags(wayEl, row)
xf.write(wayEl, pretty_print=True)
row = cursor.fetchone()
if (i % ceil(cursor.rowcount/10) == 0):
logger.info("%s / %s ways ajoutés" %(i, cursor.rowcount))
i += 1
et_execute = time.time()
logger.info("Writing ways ended. Elapsed time : %s seconds." %(et_execute - st_execute))
edgesize = row["cnt"]

# Ecriture des ways
batchsize = 300000
offset = 0
logger.info(f"Writing ways: {edgesize} ways to write")
st_edges = time.time()
while offset < edgesize:
sql_query2 = getQueryByTableAndBoundingBox(f'{input_schema}.edges', source['bbox'], ['*', f'{input_schema}.inter_nodes(geom) as internodes'])
sql_query2 += " LIMIT {} OFFSET {}".format(batchsize, offset)
logger.info("SQL: {}".format(sql_query2))
st_execute = time.time()
cursor.execute(sql_query2)
et_execute = time.time()
offset += batchsize
logger.info("Execution ended. Elapsed time : %s seconds." %(et_execute - st_execute))
row = cursor.fetchone()
st_execute = time.time()
i = 1
while row:
wayEl = writeWay(row, extraction_date)
for node in row['internodes']:
vertexSequence = vertexSequence + 1
node['id'] = vertexSequence
nodeEl = writeNode(node, extraction_date)
xf.write(nodeEl, pretty_print=True)
wayEl = writeWayNds(wayEl, row, row['internodes'])
wayEl = writeWayTags(wayEl, row)
xf.write(wayEl, pretty_print=True)
row = cursor.fetchone()
logger.info("%s / %s ways ajoutés" %(offset, edgesize))
et_edges = time.time()
logger.info("Writing ways ended. Elapsed time : %s seconds." %(et_edges - st_edges))

# Ecriture des restrictions
sql_query3 = f"select * from {input_schema}.non_comm"
Expand Down

0 comments on commit 8bc09e5

Please sign in to comment.