Skip to content

Commit

Permalink
Refactoring Baseworkflow. Creating a gui interface
Browse files Browse the repository at this point in the history
  • Loading branch information
YuXHe15 committed Aug 2, 2023
1 parent 5c5895e commit 1fdd6db
Show file tree
Hide file tree
Showing 27 changed files with 444 additions and 99 deletions.
131 changes: 131 additions & 0 deletions amworkflow/src/core/_workflow.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,131 @@
#self dependencies
from amworkflow.src.interface.cli.cli_workflow import cli
from amworkflow.src.constants.data_model import DeepMapParamModel
from amworkflow.src.constants.enums import Label as L
from amworkflow.src.utils.parser import geom_param_parser
from amworkflow.src.utils.sanity_check import path_valid_check, dimension_check
from amworkflow.src.constants.exceptions import NoDataInDatabaseException, InsufficientDataException
from amworkflow.src.interface.api import amWorkflow as aw
#pip lib dependencies
import numpy as np
#built-in dependencies
import copy

class BaseWorkflow(object):
def __init__(self, args):
self.args = args
self.geometry_spawn: callable
self.geom_param_handler()
self.indicator = task_handler(self.args)
print(self.indicator)
self.data_init()
def sequence(self):
pass

def create(self):
pass

def process_geometry(self):
pass

def data_init(self):
match self.indicator[0]:
case 1: #read the imported file stored in the database and convert it to an OCC representation.
md5 = aw.tool.get_md5(self.args.import_dir)
impt_filename = aw.db.query_data("ImportedFile", by_name=md5, column_name="md5_id", only_for_column="filename")
mdl_name = aw.db.query_data("ModelProfile", by_name=md5, column_name="imported_file_id", only_for_column="model_name")
print(mdl_name)
print(impt_filename)
impt_format = path_valid_check(self.args.import_dir, format=["stp", "step","stl","STL"])
if impt_format in ["stl","STL"]:
self.import_fl = aw.tool.read_stl(self.args.import_file_dir + "/" + impt_filename)
else:
self.import_fl = aw.tool.read_step(self.args.import_file_dir + "/" + impt_filename)
match self.indicator[1]:
case 1: # Import file, convert the file to an OCC representation and create a new profile for imported file.
if impt_format in ["stl","STL"]:
self.import_fl = aw.tool.read_stl(self.args.import_dir)
else:
self.import_fl = aw.tool.read_step(self.args.import_dir)

case 2: # remove selected profile and stp file, then quit
#TODO remove the step file and and info in db.
aw.db.delete_data("ImportedFile", md5)
case 2: # yaml file provided
pass

case 0: # model_name provided
match self.indicator[1]:
case 2:
#remove certain model profile from database
query_mdl_pfl = aw.db.query_data("ModelProfile", by_name=self.args.name, column_name="model_name")
q_md5 = query_mdl_pfl.imported_file_id[0]
if q_md5 != None:
aw.db.delete_data("ImportedFile", prim_ky=q_md5)
else:
aw.db.delete_data("ModelProfile", prim_ky=self.args.name)
case 0: # do nothing, fill data into the loaded model.
pass
case 3: # Create a new model profile with given parameters.
aw.db.insert_data("ModelProfile", {"model_name": self.args.name})
if self.args.geom_param != None:
input = {"model_name": self.args.name}
collect = []
for param in self.args.geom_param:
print(param)
input.update({"param_name": param})
collect.append(copy.copy(input))
aw.db.insert_data("ModelParameter", collect, True)
case 3: # Draft mode.
pass

def geom_param_handler(self):
if self.args.geom_param != None:
data = geom_param_parser(self.args)
data.update({L.MESH_PARAM.value: {
L.MESH_SIZE_FACTOR:self.args.mesh_size_factor,
L.LYR_NUM.value: self.args.mesh_by_layer,
L.LYR_TKN.value: self.args.mesh_by_layer}})
data.update({L.STL_PARAM.value:{
L.LNR_DFT.value: self.args.stl_linear_deflect,
L.ANG_DFT.value: self.args.stl_angular_deflect
}})



def task_handler(args):
indicator = (0,0)
match args.mode:
case "draft":
indicator = (3,0)
case "production":
if args.name != None:
result = aw.db.query_data("ModelProfile", by_name=args.name, column_name=L.MDL_NAME.value, only_for_column=L.MDL_NAME.value)
if args.name in result:
indicator = (0,0)
if args.edit:
indicator = (0,1)
elif args.remove:
indicator = (0,2)
elif args.import_dir != None:
impt_format = path_valid_check(args.import_dir, format=["stp", "step","stl","STL"])
impt_filename = aw.tool.get_filename(args.import_dir)
md5 = aw.tool.get_md5(args.import_dir)
result = aw.db.query_data("ImportedFile", by_name=md5, column_name="md5_id")
if not result.empty:
indicator = (1,0)
if args.remove:
indicator = (1,2)
else:
aw.db.insert_data("ImportedFile",{"filename": impt_filename, "md5_id": md5})
aw.db.insert_data("ModelProfile", {"model_name": args.name,"imported_file_id": md5})
aw.tool.upload(args.import_dir, args.import_file_dir)
indicator = (1,1)
elif args.yaml_dir != None:
path_valid_check(args.yaml_dir, format=["yml", "yaml"])
indicator = (2,0)
else:
indicator = (0,3)
else:
raise InsufficientDataException()
return indicator
32 changes: 13 additions & 19 deletions amworkflow/src/core/workflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,30 +66,24 @@ def __init__(self, args):
def data_init(self):
match self.indicator[0]:
case 1: #read the step file stored in the database and convert it to an OCC representation.

self.linear_deflect = self.raw_args.stl_linear_deflect
self.angular_deflect= self.raw_args.stl_angular_deflect
self.mesh_t = self.raw_args.mesh_by_thickness
self.mesh_n = self.raw_args.mesh_by_layer
self.mesh_s = self.raw_args.mesh_size_factor
self.isbatch = self.parsed_args[L.BATCH_PARAM.value][L.IS_BATCH.value]
match self.impt_format.lower():
impt_format = path_valid_check(self.raw_args.import_dir, format=["stp", "step","stl","STL"])
impt_filename = get_filename(self.raw_args.import_dir)
match impt_format.lower():
case "stl":
self.import_model = stl_reader(path=self.import_dir)
case "stp":
self.import_model = step_reader(path=self.import_dir)
case "step":
self.import_model = step_reader(path=D.DATABASE_OUTPUT_FILE_PATH.value+self.impt_filename)
self.import_model = step_reader(path=D.DATABASE_OUTPUT_FILE_PATH.value+impt_filename)
match self.indicator[1]:
case 1: # Import stp file, convert the file to an OCC representation and create a new profile for imported stp file.
match self.impt_format.lower():
case "stl":
self.import_model = stl_reader(path=D.DATABASE_OUTPUT_FILE_PATH.value+self.impt_filename)
case "stp":
self.import_model = step_reader(path=D.DATABASE_OUTPUT_FILE_PATH.value+self.impt_filename)
case "tep":
self.import_model = step_reader(path=D.DATABASE_OUTPUT_FILE_PATH.value+self.impt_filename)
self.db_data_collection[L.MDL_PROF.value] = {L.MDL_NAME.value: self.impt_filename}
case 1: # Import file, convert the file to an OCC representation and create a new profile for imported file.
self.db_data_collection[L.MDL_PROF.value] = {L.MDL_NAME.value: impt_filename}
# self.batch_data_convert(self.parsed_args[L.GEOM_PARAM.value])
case 2: # remove selected profile and stp file, then quit
#TODO remove the step file and and info in db.
Expand Down Expand Up @@ -326,9 +320,9 @@ def create_database_engine(self):
pass

def task_handler(args):
if args.model_name != None:
result = query_multi_data(ModelProfile, by_name=args.model_name, column_name=L.MDL_NAME.value, target_column_name=L.MDL_NAME.value)
if args.model_name in result:
if args.name != None:
result = query_multi_data(ModelProfile, by_name=args.name, column_name=L.MDL_NAME.value, target_column_name=L.MDL_NAME.value)
if args.name in result:
indicator = (0,0)
if args.edit:
indicator = (0,1)
Expand All @@ -341,10 +335,10 @@ def task_handler(args):
indicator = (0,4)
else:
if args.import_dir != None:
args.impt_format = path_valid_check(args.import_dir, format=["stp", "step","stl","STL"])
args.impt_filename = get_filename(args.import_dir)
result = query_multi_data(ModelProfile, by_name=args.impt_filename, column_name=L.MDL_NAME.value, target_column_name=L.MDL_NAME.value)
if args.impt_filename in result:
impt_format = path_valid_check(args.import_dir, format=["stp", "step","stl","STL"])
impt_filename = get_filename(args.import_dir)
result = query_multi_data(ModelProfile, by_name=impt_filename, column_name=L.MDL_NAME.value, target_column_name=L.MDL_NAME.value)
if impt_filename in result:
indicator = (1,0)
if args.remove:
indicator = (1,2)
Expand Down
62 changes: 42 additions & 20 deletions amworkflow/src/infrastructure/database/cruds/crud.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
from uuid import uuid4
from amworkflow.src.infrastructure.database.models.model import Base
from amworkflow.src.infrastructure.database.engine.engine import session
from amworkflow.src.infrastructure.database.models.model import db_list
from sqlalchemy import insert
from sqlalchemy.sql.expression import select
import pandas as pd


def insert_data(table: callable,
def insert_data(table: str,
data: dict,
isbatch: bool) -> None:
from amworkflow.src.infrastructure.database.engine.engine import session
session.new
table = db_list[table]
try:
if not isbatch:
transaction = table(**data)
Expand All @@ -21,47 +21,68 @@ def insert_data(table: callable,
transcation_rollback()
session.commit()

def _query_data(table: callable,
by_hash: str):
def query_data_object(table: str,
by_name: str,
column_name: str):
from amworkflow.src.infrastructure.database.engine.engine import session
session.new
exec_result = session.execute(select(table).filter_by(stl_hashname = by_hash)).scalar_one()
if type(table) is str:
table = db_list[table]
column = getattr(table, column_name)
exec_result = session.execute(select(table).filter(column == by_name)).scalar_one()
return exec_result

def query_multi_data(table: callable,
def query_multi_data(table: str,
by_name: str = None,
column_name: str = None,
target_column_name: str = None):
target_column_name: str = None,):
from amworkflow.src.infrastructure.database.engine.engine import session
session.new
table = db_list[table]
if by_name != None:
column = getattr(table, column_name)
result = [i.__dict__ for i in session.query(table).filter(column == by_name).all()]
if target_column_name != None:
result = [i.__dict__[target_column_name] for i in session.query(table).filter(column == by_name).all()]
return result
else:
result = [i.__dict__ for i in session.query(table).filter(column == by_name).all()]
for dd in result:
dd.pop("_sa_instance_state", None)

else:
exec_result = session.execute(select(table))
return exec_result.all()
exec_result = session.execute(select(table)).all()
result = [i[0].__dict__ for i in exec_result]
for dd in result:
dd.pop("_sa_instance_state", None)
if target_column_name == None:
result = pd.DataFrame(result)
elif len(result) != 0:
result = result[0]
return result

def update_data(table: callable,
by_hash: str | list,
def update_data(table: str,
by_name: str | list,
target_column: str,
new_value: int | str | float | bool,
isbatch: bool) -> None:
from amworkflow.src.infrastructure.database.engine.engine import session
session.new
table = db_list[table]
if not isbatch:
transaction = _query_data(table, by_hash)
transaction = query_data_object(table, by_name, column_name= target_column )
setattr(transaction, target_column, new_value)
else:
for hash in by_hash:
transaction = _query_data(table, hash)
for name in by_name:
transaction = query_data_object(table, name, target_column)
setattr(transaction, target_column, new_value)
session.commit()

def delete_data(table: callable,
def delete_data(table: str,
by_primary_key: str | list,
isbatch: bool,
) -> None:
from amworkflow.src.infrastructure.database.engine.engine import session
session.new
table = db_list[table]
if not isbatch:
transaction = session.get(table, by_primary_key)
session.delete(transaction)
Expand All @@ -72,4 +93,5 @@ def delete_data(table: callable,
session.commit()

def transcation_rollback():
from amworkflow.src.infrastructure.database.engine.engine import session
session.rollback()
2 changes: 1 addition & 1 deletion amworkflow/src/infrastructure/database/engine/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,6 @@
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
from amworkflow.src.infrastructure.database.engine.config import DB_DIR
engine = create_engine("sqlite+pysqlite:////" + DB_DIR + r'/amworkflow.db', echo=True)
engine = create_engine("sqlite+pysqlite:////" + DB_DIR + r'/amworkflow.db') #echo = True for getting logging
Base.metadata.create_all(engine)
session = Session(engine)
Empty file.
Empty file.
Empty file.
26 changes: 20 additions & 6 deletions amworkflow/src/infrastructure/database/models/model.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,16 @@
import sys
import inspect
from typing import List
from typing import Optional
from sqlalchemy import ForeignKey
from sqlalchemy import String
from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column, relationship
from amworkflow.src.constants.enums import Timestamp as T
from datetime import datetime

current_module = sys.modules[__name__]
db_list = {}

class Base(DeclarativeBase):
pass

Expand Down Expand Up @@ -88,13 +94,14 @@ class ModelProfile(Base):
created_date: Mapped[datetime] = mapped_column(nullable=False, default=datetime.now)
ModelParameter = relationship("ModelParameter", cascade="all, delete", back_populates="ModelProfile")
GeometryFile = relationship("GeometryFile", cascade="all, delete", back_populates="ModelProfile")
imported_file_id: Mapped[str] = mapped_column(ForeignKey('ImportedFile.md5_id', ondelete="CASCADE"), nullable=True)
ImportedFile = relationship("ImportedFile", back_populates="ModelProfile")

class ModelParameter(Base):
__tablename__ = "ModelParameter"
param_name: Mapped[str] = mapped_column(nullable=False, primary_key=True)
model_name: Mapped[str] = mapped_column(ForeignKey('ModelProfile.model_name', ondelete="CASCADE"))
ModelProfile = relationship("ModelProfile", back_populates="ModelParameter")
param_type: Mapped[str] = mapped_column(ForeignKey('ParameterType.type_name', ondelete="CASCADE"))

class ParameterValue(Base):
__tablename__ = "ParameterValue"
Expand All @@ -103,9 +110,9 @@ class ParameterValue(Base):
geom_hashname: Mapped[str] = mapped_column(ForeignKey('GeometryFile.geom_hashname', ondelete="CASCADE"))
param_value: Mapped[float] = mapped_column(nullable=True)

class ParameterType(Base):
__tablename__ = "ParameterType"
type_name: Mapped[str] = mapped_column(nullable=False, primary_key=True)
# class ParameterType(Base):
# __tablename__ = "ParameterType"
# type_name: Mapped[str] = mapped_column(nullable=False, primary_key=True)

class IterationParameter(Base):
__tablename__ = "IterationParameter"
Expand All @@ -115,6 +122,13 @@ class IterationParameter(Base):
parameter_name: Mapped[str] = mapped_column(ForeignKey('ModelParameter.param_name', ondelete="CASCADE"))
iter_hashname: Mapped[str] = mapped_column(String(32), nullable=False, primary_key=True)



class ImportedFile(Base):
__tablename__ = "ImportedFile"
filename: Mapped[str] = mapped_column(nullable=False)
md5_id: Mapped[str] = mapped_column(nullable=False, primary_key=True)
ModelProfile = relationship("ModelProfile", cascade="all, delete", back_populates="ImportedFile")

for name, obj in inspect.getmembers(current_module):
if inspect.isclass(obj):
db_list.update({name: obj})

Empty file.
Loading

0 comments on commit 1fdd6db

Please sign in to comment.