I am trying to:
- Draw a
THREE.PointCloud
object with approx. 150k points where points are sent from a web application. - Scale the points in the
THREE.PointCloud
object to achieve a result similar to this (rendered usingMayaVi
):
The problem is that:
- Data passed to the
THREE.PointCloud
object seems to be inaccurate - When rendered in
three.js
, points are arranged into eight cubes, for unknown reasons (I'm not applying any scaling, or transformations to the points)
Example server response (I have included sample data at the bottom of this post):
{'geometry': [[-156, 65, 89],
[268, 84, 337],
[-205, 68, 170],
[-87, 69, 52],
...
[289, 81, 143],
[141, 78, 280],
[403, 75, 351]],
'metadata': {'max': {'x': 421, 'y': 105, 'z': 458},
'min': {'x': -335, 'y': 63, 'z': 39}}}
The three.js
code used to create the point cloud:
var container;
var scene, camera, renderer, controls;
var geometry, material, mesh;
init();
animate();
function init() {
scene = new THREE.Scene();
camera = new THREE.PerspectiveCamera(27, window.innerWidth / window.innerHeight, 5, 5000);
camera.position.z = 2750;
//Add a buffer geometry for particle system
var geometry = new THREE.BufferGeometry();
var particles = {{ len(topology['geometry']) }};
var geometry = new THREE.BufferGeometry();
var positions = new Float32Array(particles * 3);
var colors = new Float32Array(particles * 3);
var color = new THREE.Color();
var i = 0;
{% for point in topology['geometry'] %}
var x = {{ point[0] }};
var y = {{ point[1] }};
var z = {{ point[2] }};
//Store the position of the point
positions[i] = x;
positions[i + 1] = y;
positions[i + 2] = z;
//Assign a colour to the point
color.setRGB(0.42, 0.42, 0.42);
colors[i] = color.r;
colors[i + 1] = color.g;
colors[i + 2] = color.b;
i+=1;
{% end %}
geometry.addAttribute('position', new THREE.BufferAttribute(positions, 3));
geometry.addAttribute('color', new THREE.BufferAttribute(colors, 3));
geometry.computeBoundingSphere();
var material = new THREE.PointCloudMaterial({ size: 15, vertexColors: THREE.VertexColors });
particleSystem = new THREE.PointCloud(geometry, material);
scene.add(particleSystem);
//Lights
light = new THREE.DirectionalLight(0xffffff);
light.position.set(1, 1, 1);
scene.add(light);
//Set up renderer
renderer = new THREE.WebGLRenderer({ antialias:false });
renderer.setSize(window.innerWidth, window.innerHeight);
renderer.setPixelRatio(window.devicePixelRatio);
//Attach renderer to #container DOM element
container = document.getElementById('container');
container.appendChild(renderer.domElement);
//Add window listener for resize events
window.addEventListener('resize', onWindowResize, false);
//Call render loop
animate();
}
function onWindowResize(){
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
renderer.setSize(window.innerWidth, window.innerHeight);
render();
}
function animate() {
requestAnimationFrame(animate);
renderer.render(scene, camera);
}
function render(){
renderer.render(scene, camera);
}
The scene ends up looking like this:
Any suggestions? I've used the following example code, but I'm having difficulty properly implementing scaling for the points in my dataset: http://threejs.org/examples/#webgl_buffergeometry_particles
Link to a sample of data that I am working with (2MB, 180k lines): https://gist.githubusercontent.com/TylerJFisher/659e3e233f8aa458feee/raw/889c0dd0093fd0476094af48488aab62c8666271/topology.asc
I used your sample data. Put it in an array, like this:
and used THREE.Geometry() for PointCloud:
Also, in geodata, coordinates x and y are always swapped (in this case, there are x and z). If you won't do it, you'll get mirrored object then. That's why I put it as
instead of
The result is here: geodata
And yes, some lines in your sample data seem incorrect. Means they have 1 or 2 values instead of 3.