List of Publications
Georgiou, Yiangos; Averkiou, Melinos; Kelly, Tom; Kalogerakis, Evangelos Projective Urban Texturing Inproceedings In: International Conference on 3D Vision (3DV), 2021. Deligiorgi, Marissia; Maslioukova, Maria I; Averkiou, Melinos; Andreou, Andreas C; Selvaraju, Pratheba; Kalogerakis, Evangelos; Patow, Gustavo; Chrysanthou, Yiorgos; Artopoulos, George A 3D digitisation workflow for architecture-specific annotation of built heritage Journal Article In: Journal of Archaeological Science: Reports, vol. 37, pp. 102787, 2021. Selvaraju, Pratheba; Nabail, Mohamed; Loizou, Marios; Maslioukova, Maria; Averkiou, Melinos; Andreou, Andreas; Chaudhuri, Siddhartha; Kalogerakis, Evangelos BuildingNet: Learning to Label 3D Buildings Inproceedings In: IEEE/CVF International Conference on Computer Vision (ICCV), 2021. Loizou, Marios; Averkiou, Melinos; Kalogerakis, Evangelos Learning Part Boundaries from 3D Point Clouds Journal Article In: Computer Graphics Forum (Proc. SGP), vol. 39, iss. 5, 2020.2021
@inproceedings{PUT2021,
title = {Projective Urban Texturing},
author = {Yiangos Georgiou and Melinos Averkiou and Tom Kelly and Evangelos Kalogerakis},
year = {2021},
date = {2021-12-01},
urldate = {2021-12-01},
booktitle = {International Conference on 3D Vision (3DV)},
abstract = {This paper proposes a method for automatic generation of textures for 3D city meshes in immersive urban environments. Many recent pipelines capture or synthesize large quantities of city geometry using scanners or procedural modeling pipelines. Such geometry is intricate and realistic, however the generation of photo-realistic textures for such large scenes remains a problem. We propose to generate textures for input target 3D meshes driven by the textural style present in readily available datasets of panoramic photos capturing urban environments. Re-targeting such 2D datasets to 3D geometry is challenging because the underlying shape, size, and layout of the urban structures in the photos do not correspond to the ones in the target meshes. Photos also often have objects (e.g., trees, vehicles) that may not even be present in the target geometry. To address these issues we present a method, called Projective Urban Texturing (PUT), which re-targets textural style from real-world panoramic images to unseen urban meshes. PUT relies on contrastive and adversarial training of a neural architecture designed for unpaired image-to-texture translation. The generated textures are stored in a texture atlas applied to the target 3D mesh geometry. To promote texture consistency, PUT employs an iterative procedure in which texture synthesis is conditioned on previously generated, adjacent textures. We demonstrate both quantitative and qualitative evaluation of the generated textures.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
@article{nokey,
title = {A 3D digitisation workflow for architecture-specific annotation of built heritage},
author = {Marissia Deligiorgi and Maria I Maslioukova and Melinos Averkiou and Andreas C Andreou and Pratheba Selvaraju and Evangelos Kalogerakis and Gustavo Patow and Yiorgos Chrysanthou and George Artopoulos},
year = {2021},
date = {2021-06-01},
urldate = {2022-06-01},
journal = {Journal of Archaeological Science: Reports},
volume = {37},
pages = {102787},
abstract = {Contemporary discourse points to the central role that heritage plays in the process of enabling groups of various cultural or ethnic background to strengthen their feeling of belonging and sharing in society. Safeguarding heritage is also valued highly in the priorities of the European Commission. As a result, there have been several long-term initiatives involving the digitisation, annotation and cataloguing of tangible cultural heritage in museums and collections. Specifically, for built heritage, a pressing challenge is that historical monuments such as buildings, temples, churches or city fortification infrastructures are hard to document due to their historic palimpsest; spatial transformations, actions of destruction, reuse of material, or continuous urban development that covers traces and changes the formal integrity and identity of a cultural heritage site. The ability to reason about a monument’s form is crucial for efficient documentation and cataloguing. This paper presents a 3D digitisation workflow through the involvement of reality capture technologies for the annotation and structure
analysis of built heritage with the use of 3D Convolutional Neural Networks (3D CNNs) for classification purposes. The presented workflow contributes a new approach to the identification of a building’s architectural components (e.g., arch, dome) and to the study of the stylistic influences (e.g., Gothic, Byzantine) of building parts. In doing so this workflow can assist in tracking a building’s history, identifying its construction period and comparing it to other buildings of the same period. This process can contribute to educational and research activities, as well as facilitate the automated classification of datasets in digital repositories for scholarly research in digital humanities. },
keywords = {},
pubstate = {published},
tppubtype = {article}
}
analysis of built heritage with the use of 3D Convolutional Neural Networks (3D CNNs) for classification purposes. The presented workflow contributes a new approach to the identification of a building’s architectural components (e.g., arch, dome) and to the study of the stylistic influences (e.g., Gothic, Byzantine) of building parts. In doing so this workflow can assist in tracking a building’s history, identifying its construction period and comparing it to other buildings of the same period. This process can contribute to educational and research activities, as well as facilitate the automated classification of datasets in digital repositories for scholarly research in digital humanities. @inproceedings{Selvaraju:2021:BuildingNet,
title = {BuildingNet: Learning to Label 3D Buildings},
author = {Pratheba Selvaraju and Mohamed Nabail and Marios Loizou and Maria Maslioukova and Melinos Averkiou and Andreas Andreou and Siddhartha Chaudhuri and Evangelos Kalogerakis},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
booktitle = {IEEE/CVF International Conference on Computer Vision (ICCV)},
abstract = {We introduce BuildingNet: (a) a large-scale dataset of 3D building models whose exteriors are consistently labeled, and (b) a graph neural network that labels building meshes by analyzing spatial and structural relations of their geometric primitives. To create our dataset, we used crowdsourcing combined with expert guidance, resulting in 513K annotated mesh primitives, grouped into 292K semantic part components across 2K building models. The dataset covers several building categories, such as houses, churches, skyscrapers, town halls, libraries, and castles. We include a benchmark for evaluating mesh and point cloud labeling. Buildings have more challenging structural complexity compared to objects in existing benchmarks (e.g., ShapeNet, PartNet), thus, we hope that our dataset can nurture the development of algorithms that are able to cope with such large-scale geometric data for both vision and graphics tasks e.g., 3D semantic segmentation, part-based generative models, correspondences, texturing, and analysis of point cloud data acquired from real-world buildings. Finally, we show that our mesh-based graph neural network significantly improves performance over several baselines for labeling 3D meshes.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2020
@article{PartBoundaries2020,
title = {Learning Part Boundaries from 3D Point Clouds},
author = {Marios Loizou and Melinos Averkiou and Evangelos Kalogerakis},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
booktitle = {IComputer Graphics Forum (Proc. SGP), 39(5), 2020},
journal = {Computer Graphics Forum (Proc. SGP)},
volume = {39},
issue = {5},
abstract = {We present a method that detects boundaries of parts in 3D shapes represented as point clouds. Our method is based on a
graph convolutional network architecture that outputs a probability for a point to lie in an area that separates two or more
parts in a 3D shape. Our boundary detector is quite generic: it can be trained to localize boundaries of semantic parts or
geometric primitives commonly used in 3D modeling. Our experiments demonstrate that our method can extract more accurate
boundaries that are closer to ground-truth ones compared to alternatives. We also demonstrate an application of our network
to fine-grained semantic shape segmentation, where we also show improvements in terms of part labeling performance.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
graph convolutional network architecture that outputs a probability for a point to lie in an area that separates two or more
parts in a 3D shape. Our boundary detector is quite generic: it can be trained to localize boundaries of semantic parts or
geometric primitives commonly used in 3D modeling. Our experiments demonstrate that our method can extract more accurate
boundaries that are closer to ground-truth ones compared to alternatives. We also demonstrate an application of our network
to fine-grained semantic shape segmentation, where we also show improvements in terms of part labeling performance.