mirror of
https://github.com/publiclab/mapknitter.git
synced 2025-12-11 18:59:59 +01:00
basic rebase plus bower rework
This commit is contained in:
1
.gitignore
vendored
1
.gitignore
vendored
@@ -38,3 +38,4 @@ app/assets/node_modules
|
|||||||
public/lib
|
public/lib
|
||||||
node_modules
|
node_modules
|
||||||
todo.txt
|
todo.txt
|
||||||
|
.sass-cache
|
||||||
|
|||||||
@@ -1,11 +0,0 @@
|
|||||||
<VirtualHost cartagen.org:80>
|
|
||||||
DocumentRoot "/home/warren/sites/cartagen.org"
|
|
||||||
ServerName cartagen.org
|
|
||||||
ProxyPass / http://127.0.0.1:3003/
|
|
||||||
ProxyPassReverse / http://127.0.0.1:3003/
|
|
||||||
ProxyPass /api/0.6/geohash/ http://127.0.0.1:3003/api/0.6/geohash/
|
|
||||||
ProxyPassReverse /api/0.6/geohash/ http://127.0.0.1:3003/api/0.6/geohash/
|
|
||||||
ProxyPass /api/ http://127.0.0.1:3005/api/0.6/
|
|
||||||
ProxyPassReverse /api/ http://127.0.0.1:3005/api/0.6/
|
|
||||||
</VirtualHost>
|
|
||||||
|
|
||||||
5
Gemfile
5
Gemfile
@@ -3,6 +3,8 @@ source "https://rubygems.org"
|
|||||||
ruby "2.1.2"
|
ruby "2.1.2"
|
||||||
gem "rails", "~>3.2"
|
gem "rails", "~>3.2"
|
||||||
|
|
||||||
|
gem "will_paginate", "3.0.7"
|
||||||
|
|
||||||
# dependencies
|
# dependencies
|
||||||
group :dependencies do
|
group :dependencies do
|
||||||
gem "mysql", "2.9.1"
|
gem "mysql", "2.9.1"
|
||||||
@@ -10,12 +12,11 @@ group :dependencies do
|
|||||||
gem "geokit-rails", "1.1.4"
|
gem "geokit-rails", "1.1.4"
|
||||||
gem "image_science", "1.2.6"
|
gem "image_science", "1.2.6"
|
||||||
gem "recaptcha", "0.3.6", :require => "recaptcha/rails"
|
gem "recaptcha", "0.3.6", :require => "recaptcha/rails"
|
||||||
gem "will_paginate", "3.0.7"
|
|
||||||
gem "oa-openid", "0.3.2"
|
gem "oa-openid", "0.3.2"
|
||||||
gem "ruby-openid", "~>2.5"
|
gem "ruby-openid", "~>2.5"
|
||||||
gem 'open_id_authentication'
|
gem 'open_id_authentication'
|
||||||
gem "RubyInline"
|
gem "RubyInline"
|
||||||
gem "paperclip", "4.2.0"
|
gem "paperclip", "~>4.2.0"
|
||||||
|
|
||||||
# if you use amazon s3 for warpable image storage
|
# if you use amazon s3 for warpable image storage
|
||||||
gem 'aws-sdk', '~> 1.5.7'
|
gem 'aws-sdk', '~> 1.5.7'
|
||||||
|
|||||||
210
Gemfile.lock
210
Gemfile.lock
@@ -32,7 +32,7 @@ GEM
|
|||||||
i18n (~> 0.6, >= 0.6.4)
|
i18n (~> 0.6, >= 0.6.4)
|
||||||
multi_json (~> 1.0)
|
multi_json (~> 1.0)
|
||||||
arel (3.0.3)
|
arel (3.0.3)
|
||||||
autoprefixer-rails (4.0.2.1)
|
autoprefixer-rails (4.0.2.2)
|
||||||
execjs
|
execjs
|
||||||
aws-sdk (1.5.8)
|
aws-sdk (1.5.8)
|
||||||
httparty (~> 0.7)
|
httparty (~> 0.7)
|
||||||
@@ -55,7 +55,7 @@ GEM
|
|||||||
httparty (0.13.3)
|
httparty (0.13.3)
|
||||||
json (~> 1.8)
|
json (~> 1.8)
|
||||||
multi_xml (>= 0.5.2)
|
multi_xml (>= 0.5.2)
|
||||||
i18n (0.6.11)
|
i18n (0.7.0)
|
||||||
image_science (1.2.6)
|
image_science (1.2.6)
|
||||||
RubyInline (~> 3.9)
|
RubyInline (~> 3.9)
|
||||||
journey (1.0.4)
|
journey (1.0.4)
|
||||||
@@ -69,7 +69,7 @@ GEM
|
|||||||
mime-types (~> 1.16)
|
mime-types (~> 1.16)
|
||||||
treetop (~> 1.4.8)
|
treetop (~> 1.4.8)
|
||||||
mime-types (1.25.1)
|
mime-types (1.25.1)
|
||||||
mini_portile (0.6.1)
|
mini_portile (0.6.2)
|
||||||
multi_json (1.10.1)
|
multi_json (1.10.1)
|
||||||
multi_xml (0.5.5)
|
multi_xml (0.5.5)
|
||||||
mysql (2.9.1)
|
mysql (2.9.1)
|
||||||
@@ -83,16 +83,12 @@ GEM
|
|||||||
ruby-openid-apps-discovery (~> 1.2.0)
|
ruby-openid-apps-discovery (~> 1.2.0)
|
||||||
open_id_authentication (1.2.0)
|
open_id_authentication (1.2.0)
|
||||||
rack-openid (~> 1.3)
|
rack-openid (~> 1.3)
|
||||||
<<<<<<< HEAD
|
|
||||||
paperclip (4.2.0)
|
|
||||||
=======
|
|
||||||
paperclip (4.2.1)
|
paperclip (4.2.1)
|
||||||
>>>>>>> checking in Gemfile.lock
|
|
||||||
activemodel (>= 3.0.0)
|
activemodel (>= 3.0.0)
|
||||||
activesupport (>= 3.0.0)
|
activesupport (>= 3.0.0)
|
||||||
cocaine (~> 0.5.3)
|
cocaine (~> 0.5.3)
|
||||||
mime-types
|
mime-types
|
||||||
passenger (4.0.56)
|
passenger (4.0.57)
|
||||||
daemon_controller (>= 1.2.0)
|
daemon_controller (>= 1.2.0)
|
||||||
rack
|
rack
|
||||||
rake (>= 0.8.1)
|
rake (>= 0.8.1)
|
||||||
@@ -123,208 +119,31 @@ GEM
|
|||||||
rdoc (~> 3.4)
|
rdoc (~> 3.4)
|
||||||
thor (>= 0.14.6, < 2.0)
|
thor (>= 0.14.6, < 2.0)
|
||||||
rake (10.4.2)
|
rake (10.4.2)
|
||||||
<<<<<<< HEAD
|
|
||||||
rdiscount (2.1.7.1)
|
rdiscount (2.1.7.1)
|
||||||
rdoc (3.12.2)
|
rdoc (3.12.2)
|
||||||
json (~> 1.4)
|
json (~> 1.4)
|
||||||
recaptcha (0.3.6)
|
recaptcha (0.3.6)
|
||||||
ref (1.0.5)
|
ref (1.0.5)
|
||||||
=======
|
|
||||||
rdoc (3.12.2)
|
|
||||||
json (~> 1.4)
|
|
||||||
recaptcha (0.3.6)
|
|
||||||
>>>>>>> checking in Gemfile.lock
|
|
||||||
right_aws (3.1.0)
|
right_aws (3.1.0)
|
||||||
right_http_connection (>= 1.2.5)
|
right_http_connection (>= 1.2.5)
|
||||||
right_http_connection (1.5.0)
|
right_http_connection (1.5.0)
|
||||||
ruby-openid (2.6.0)
|
ruby-openid (2.6.0)
|
||||||
ruby-openid-apps-discovery (1.2.0)
|
ruby-openid-apps-discovery (1.2.0)
|
||||||
ruby-openid (>= 2.1.7)
|
ruby-openid (>= 2.1.7)
|
||||||
<<<<<<< HEAD
|
|
||||||
sass (3.4.9)
|
sass (3.4.9)
|
||||||
=======
|
|
||||||
>>>>>>> checking in Gemfile.lock
|
|
||||||
sprockets (2.2.3)
|
sprockets (2.2.3)
|
||||||
hike (~> 1.2)
|
hike (~> 1.2)
|
||||||
multi_json (~> 1.0)
|
multi_json (~> 1.0)
|
||||||
rack (~> 1.0)
|
rack (~> 1.0)
|
||||||
tilt (~> 1.1, != 1.3.0)
|
tilt (~> 1.1, != 1.3.0)
|
||||||
sqlite3 (1.3.10)
|
sqlite3 (1.3.10)
|
||||||
<<<<<<< HEAD
|
|
||||||
therubyracer (0.12.1)
|
therubyracer (0.12.1)
|
||||||
libv8 (~> 3.16.14.0)
|
libv8 (~> 3.16.14.0)
|
||||||
ref
|
ref
|
||||||
=======
|
|
||||||
>>>>>>> checking in Gemfile.lock
|
|
||||||
thor (0.19.1)
|
thor (0.19.1)
|
||||||
tilt (1.4.1)
|
tilt (1.4.1)
|
||||||
treetop (1.4.15)
|
treetop (1.4.15)
|
||||||
polyglot
|
polyglot
|
||||||
<<<<<<< HEAD
|
|
||||||
polyglot
|
|
||||||
polyglot
|
|
||||||
polyglot
|
|
||||||
polyglot
|
|
||||||
polyglot
|
|
||||||
polyglot
|
|
||||||
polyglot
|
|
||||||
polyglot
|
|
||||||
polyglot
|
|
||||||
polyglot
|
|
||||||
polyglot
|
|
||||||
polyglot
|
|
||||||
polyglot
|
|
||||||
polyglot
|
|
||||||
polyglot
|
|
||||||
polyglot
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
polyglot (>= 0.3.1)
|
|
||||||
=======
|
|
||||||
>>>>>>> checking in Gemfile.lock
|
|
||||||
polyglot (>= 0.3.1)
|
polyglot (>= 0.3.1)
|
||||||
tzinfo (0.3.42)
|
tzinfo (0.3.42)
|
||||||
uuidtools (2.1.5)
|
uuidtools (2.1.5)
|
||||||
@@ -335,7 +154,6 @@ PLATFORMS
|
|||||||
|
|
||||||
DEPENDENCIES
|
DEPENDENCIES
|
||||||
RubyInline
|
RubyInline
|
||||||
<<<<<<< HEAD
|
|
||||||
autoprefixer-rails
|
autoprefixer-rails
|
||||||
aws-sdk (~> 1.5.7)
|
aws-sdk (~> 1.5.7)
|
||||||
geokit-rails (= 1.1.4)
|
geokit-rails (= 1.1.4)
|
||||||
@@ -345,7 +163,7 @@ DEPENDENCIES
|
|||||||
mysql2
|
mysql2
|
||||||
oa-openid (= 0.3.2)
|
oa-openid (= 0.3.2)
|
||||||
open_id_authentication
|
open_id_authentication
|
||||||
paperclip (= 4.2.0)
|
paperclip (~> 4.2.0)
|
||||||
passenger
|
passenger
|
||||||
rails (~> 3.2)
|
rails (~> 3.2)
|
||||||
rdiscount (= 2.1.7.1)
|
rdiscount (= 2.1.7.1)
|
||||||
@@ -357,21 +175,3 @@ DEPENDENCIES
|
|||||||
sqlite3
|
sqlite3
|
||||||
therubyracer
|
therubyracer
|
||||||
will_paginate (= 3.0.7)
|
will_paginate (= 3.0.7)
|
||||||
=======
|
|
||||||
aws-sdk (~> 1.5.7)
|
|
||||||
geokit-rails
|
|
||||||
httparty (= 0.11.0)
|
|
||||||
image_science (= 1.2.6)
|
|
||||||
mysql
|
|
||||||
mysql2
|
|
||||||
oa-openid (= 0.3.2)
|
|
||||||
open_id_authentication
|
|
||||||
paperclip
|
|
||||||
passenger
|
|
||||||
rails (~> 3.2)
|
|
||||||
recaptcha
|
|
||||||
right_aws
|
|
||||||
ruby-openid
|
|
||||||
sqlite3
|
|
||||||
will_paginate
|
|
||||||
>>>>>>> checking in Gemfile.lock
|
|
||||||
|
|||||||
@@ -11,7 +11,7 @@
|
|||||||
*= require_self
|
*= require_self
|
||||||
*= require_tree .
|
*= require_tree .
|
||||||
|
|
||||||
*= require bootstrap/dist/css/bootstrap.css
|
*= require bootstrap/dist/css/bootstrap.min.css
|
||||||
*= require leaflet/dist/leaflet.css
|
*= require leaflet/dist/leaflet.css
|
||||||
*= require leaflet-draw/dist/leaflet.draw.css
|
*= require leaflet-draw/dist/leaflet.draw.css
|
||||||
*= require leaflet-illustrate/dist/Leaflet.Illustrate.css
|
*= require leaflet-illustrate/dist/Leaflet.Illustrate.css
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
require 'open3'
|
require 'open3'
|
||||||
require 'will_paginate'
|
|
||||||
|
|
||||||
class MapsController < ApplicationController
|
class MapsController < ApplicationController
|
||||||
protect_from_forgery :except => [:export]
|
protect_from_forgery :except => [:export]
|
||||||
@@ -9,10 +8,7 @@ class MapsController < ApplicationController
|
|||||||
layout 'knitter2'
|
layout 'knitter2'
|
||||||
|
|
||||||
def index
|
def index
|
||||||
@maps = Map.find :all, :order => 'updated_at DESC', :joins => :warpables, :limit => 24, :group => "maps.id"
|
@maps = Map.page(params[:page]).per_page(24).where(:archived => false,:password => '').order('updated_at DESC')
|
||||||
@unpaginated = true
|
|
||||||
@maps = @maps.paginate :page => params[:page], :per_page => 24
|
|
||||||
|
|
||||||
render :layout => 'application2'
|
render :layout => 'application2'
|
||||||
end
|
end
|
||||||
|
|
||||||
@@ -31,13 +27,11 @@ class MapsController < ApplicationController
|
|||||||
|
|
||||||
def show
|
def show
|
||||||
@map = Map.find params[:id]
|
@map = Map.find params[:id]
|
||||||
|
|
||||||
@map.zoom = 12
|
@map.zoom = 12
|
||||||
end
|
end
|
||||||
|
|
||||||
def edit
|
def edit
|
||||||
@map = Map.find params[:id]
|
@map = Map.find params[:id]
|
||||||
|
|
||||||
@map.zoom = 12
|
@map.zoom = 12
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|||||||
@@ -342,6 +342,7 @@ puts self.image.url
|
|||||||
|
|
||||||
private
|
private
|
||||||
|
|
||||||
|
# adjust filename behavior of Paperclip after migrating from attachment_fu
|
||||||
Paperclip.interpolates :custom_filename do |attachment, style|
|
Paperclip.interpolates :custom_filename do |attachment, style|
|
||||||
if style == :original
|
if style == :original
|
||||||
custom_filename = basename(attachment,style) # generate hash path here
|
custom_filename = basename(attachment,style) # generate hash path here
|
||||||
|
|||||||
@@ -25,7 +25,7 @@
|
|||||||
<div class="map col-md-3 <%= 'odd' if odd %>">
|
<div class="map col-md-3 <%= 'odd' if odd %>">
|
||||||
<% if !map.private && map.warpables.length > 0 %>
|
<% if !map.private && map.warpables.length > 0 %>
|
||||||
<a href="<%= %>">
|
<a href="<%= %>">
|
||||||
<img src="<%= map.warpables.first.public_filename(:small) %>" />
|
<img src="<%= map.warpables.first.image.url(:small) %>" />
|
||||||
</a>
|
</a>
|
||||||
<% end %>
|
<% end %>
|
||||||
<h3><%= link_to map.name.capitalize, map %></a></h3>
|
<h3><%= link_to map.name.capitalize, map %></a></h3>
|
||||||
|
|||||||
10
bower.json
10
bower.json
@@ -1,12 +1,15 @@
|
|||||||
{
|
{
|
||||||
"name": "MapKnitter",
|
"name": "MapKnitter",
|
||||||
"version": "0.1.0",
|
"version": "0.1.1",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"fontawesome": "~4.2.0",
|
"fontawesome": "~4.2.0",
|
||||||
"bootstrap-css": "~2.3",
|
"bootstrap": "~3.2.0",
|
||||||
"jquery": "~1.11",
|
"jquery": "~1.11",
|
||||||
"jquery-ui": "~1.11",
|
"jquery-ui": "~1.11",
|
||||||
|
"jquery-ujs": "~1.0.3",
|
||||||
"openlayers": "release-2.13.1",
|
"openlayers": "release-2.13.1",
|
||||||
|
"blueimp-file-upload": "blueimp/jQuery-File-Upload#~9.8.1",
|
||||||
|
"blueimp-tmpl": "2.5.4",
|
||||||
"leaflet": "0.7.3",
|
"leaflet": "0.7.3",
|
||||||
"leaflet-google": "https://raw.githubusercontent.com/shramov/leaflet-plugins/master/layer/tile/Google.js",
|
"leaflet-google": "https://raw.githubusercontent.com/shramov/leaflet-plugins/master/layer/tile/Google.js",
|
||||||
"leaflet-draw": "Leaflet/Leaflet.draw#0.2.3",
|
"leaflet-draw": "Leaflet/Leaflet.draw#0.2.3",
|
||||||
@@ -17,6 +20,7 @@
|
|||||||
"modalbox": "okonet/modalbox",
|
"modalbox": "okonet/modalbox",
|
||||||
"cartagen": "jywarren/cartagen",
|
"cartagen": "jywarren/cartagen",
|
||||||
"scriptaculous": "http://script.aculo.us/dist/scriptaculous-js-1.9.0.zip",
|
"scriptaculous": "http://script.aculo.us/dist/scriptaculous-js-1.9.0.zip",
|
||||||
"prototype": "sstephenson/prototype#1.7.2"
|
"prototype": "sstephenson/prototype#1.7.2",
|
||||||
|
"junction": "theleagueof/junction"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -59,5 +59,6 @@ module Mapknitter
|
|||||||
|
|
||||||
# Version of your assets, change this if you want to expire all your assets
|
# Version of your assets, change this if you want to expire all your assets
|
||||||
config.assets.version = '1.0'
|
config.assets.version = '1.0'
|
||||||
|
config.assets.paths << Rails.root.join("public","lib")
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|||||||
2
config/initializers/paperclip.rb
Normal file
2
config/initializers/paperclip.rb
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
require "paperclip/railtie"
|
||||||
|
Paperclip::Railtie.insert
|
||||||
@@ -95,7 +95,7 @@ Mapknitter::Application.routes.draw do
|
|||||||
|
|
||||||
# You can have the root of your site routed with 'root'
|
# You can have the root of your site routed with 'root'
|
||||||
# just remember to delete public/index.html.
|
# just remember to delete public/index.html.
|
||||||
root :to => 'map#index'
|
root :to => 'maps#index'
|
||||||
|
|
||||||
# See how all your routes lay out with 'rake routes'
|
# See how all your routes lay out with 'rake routes'
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
class ChangeWarpableColumns < ActiveRecord::Migration
|
class ChangeWarpableColumns < ActiveRecord::Migration
|
||||||
def up
|
def up
|
||||||
<<<<<<< HEAD
|
|
||||||
rename_column(:warpables, :filename, :image_file_name)
|
rename_column(:warpables, :filename, :image_file_name)
|
||||||
rename_column(:warpables, :content_type, :image_content_type)
|
rename_column(:warpables, :content_type, :image_content_type)
|
||||||
rename_column(:warpables, :size, :image_file_size)
|
rename_column(:warpables, :size, :image_file_size)
|
||||||
@@ -10,16 +9,5 @@ class ChangeWarpableColumns < ActiveRecord::Migration
|
|||||||
rename_column(:warpables, :image_file_name, :filename)
|
rename_column(:warpables, :image_file_name, :filename)
|
||||||
rename_column(:warpables, :image_content_type, :content_type)
|
rename_column(:warpables, :image_content_type, :content_type)
|
||||||
rename_column(:warpables, :image_file_size, :size)
|
rename_column(:warpables, :image_file_size, :size)
|
||||||
=======
|
|
||||||
rename_column(:warpables, :photo_file_name, :filename)
|
|
||||||
rename_column(:warpables, :photo_content_type, :content_type)
|
|
||||||
rename_column(:warpables, :photo_file_size, :size)
|
|
||||||
end
|
|
||||||
|
|
||||||
def down
|
|
||||||
rename_column(:warpables, :filename, :photo_file_name)
|
|
||||||
rename_column(:warpables, :content_type, :photo_content_type)
|
|
||||||
rename_column(:warpables, :size, :photo_file_size)
|
|
||||||
>>>>>>> much of Rails 3.2 upgrade
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|||||||
@@ -16,8 +16,4 @@ class Gdal
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
# def self.gdal_merge(args)
|
|
||||||
# self.raw()
|
|
||||||
# end
|
|
||||||
|
|
||||||
end
|
end
|
||||||
|
|||||||
@@ -1,279 +0,0 @@
|
|||||||
#
|
|
||||||
# SimpleXMLWriter
|
|
||||||
# $Id: SimpleXMLWriter.py 2312 2005-03-02 18:13:39Z fredrik $
|
|
||||||
#
|
|
||||||
# a simple XML writer
|
|
||||||
#
|
|
||||||
# history:
|
|
||||||
# 2001-12-28 fl created
|
|
||||||
# 2002-11-25 fl fixed attribute encoding
|
|
||||||
# 2002-12-02 fl minor fixes for 1.5.2
|
|
||||||
# 2004-06-17 fl added pythondoc markup
|
|
||||||
# 2004-07-23 fl added flush method (from Jay Graves)
|
|
||||||
# 2004-10-03 fl added declaration method
|
|
||||||
#
|
|
||||||
# Copyright (c) 2001-2004 by Fredrik Lundh
|
|
||||||
#
|
|
||||||
# fredrik@pythonware.com
|
|
||||||
# http://www.pythonware.com
|
|
||||||
#
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
# The SimpleXMLWriter module is
|
|
||||||
#
|
|
||||||
# Copyright (c) 2001-2004 by Fredrik Lundh
|
|
||||||
#
|
|
||||||
# By obtaining, using, and/or copying this software and/or its
|
|
||||||
# associated documentation, you agree that you have read, understood,
|
|
||||||
# and will comply with the following terms and conditions:
|
|
||||||
#
|
|
||||||
# Permission to use, copy, modify, and distribute this software and
|
|
||||||
# its associated documentation for any purpose and without fee is
|
|
||||||
# hereby granted, provided that the above copyright notice appears in
|
|
||||||
# all copies, and that both that copyright notice and this permission
|
|
||||||
# notice appear in supporting documentation, and that the name of
|
|
||||||
# Secret Labs AB or the author not be used in advertising or publicity
|
|
||||||
# pertaining to distribution of the software without specific, written
|
|
||||||
# prior permission.
|
|
||||||
#
|
|
||||||
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
|
|
||||||
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
|
|
||||||
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
|
|
||||||
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
|
|
||||||
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
|
||||||
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
|
||||||
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
|
||||||
# OF THIS SOFTWARE.
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
|
|
||||||
##
|
|
||||||
# Tools to write XML files, without having to deal with encoding
|
|
||||||
# issues, well-formedness, etc.
|
|
||||||
# <p>
|
|
||||||
# The current version does not provide built-in support for
|
|
||||||
# namespaces. To create files using namespaces, you have to provide
|
|
||||||
# "xmlns" attributes and explicitly add prefixes to tags and
|
|
||||||
# attributes.
|
|
||||||
#
|
|
||||||
# <h3>Patterns</h3>
|
|
||||||
#
|
|
||||||
# The following example generates a small XHTML document.
|
|
||||||
# <pre>
|
|
||||||
#
|
|
||||||
# from elementtree.SimpleXMLWriter import XMLWriter
|
|
||||||
# import sys
|
|
||||||
#
|
|
||||||
# w = XMLWriter(sys.stdout)
|
|
||||||
#
|
|
||||||
# html = w.start("html")
|
|
||||||
#
|
|
||||||
# w.start("head")
|
|
||||||
# w.element("title", "my document")
|
|
||||||
# w.element("meta", name="generator", value="my application 1.0")
|
|
||||||
# w.end()
|
|
||||||
#
|
|
||||||
# w.start("body")
|
|
||||||
# w.element("h1", "this is a heading")
|
|
||||||
# w.element("p", "this is a paragraph")
|
|
||||||
#
|
|
||||||
# w.start("p")
|
|
||||||
# w.data("this is ")
|
|
||||||
# w.element("b", "bold")
|
|
||||||
# w.data(" and ")
|
|
||||||
# w.element("i", "italic")
|
|
||||||
# w.data(".")
|
|
||||||
# w.end("p")
|
|
||||||
#
|
|
||||||
# w.close(html)
|
|
||||||
# </pre>
|
|
||||||
##
|
|
||||||
|
|
||||||
import re, sys, string
|
|
||||||
|
|
||||||
try:
|
|
||||||
unicode("")
|
|
||||||
except NameError:
|
|
||||||
def encode(s, encoding):
|
|
||||||
# 1.5.2: application must use the right encoding
|
|
||||||
return s
|
|
||||||
_escape = re.compile(r"[&<>\"\x80-\xff]+") # 1.5.2
|
|
||||||
else:
|
|
||||||
def encode(s, encoding):
|
|
||||||
return s.encode(encoding)
|
|
||||||
_escape = re.compile(eval(r'u"[&<>\"\u0080-\uffff]+"'))
|
|
||||||
|
|
||||||
def encode_entity(text, pattern=_escape):
|
|
||||||
# map reserved and non-ascii characters to numerical entities
|
|
||||||
def escape_entities(m):
|
|
||||||
out = []
|
|
||||||
for char in m.group():
|
|
||||||
out.append("&#%d;" % ord(char))
|
|
||||||
return string.join(out, "")
|
|
||||||
return encode(pattern.sub(escape_entities, text), "ascii")
|
|
||||||
|
|
||||||
del _escape
|
|
||||||
|
|
||||||
#
|
|
||||||
# the following functions assume an ascii-compatible encoding
|
|
||||||
# (or "utf-16")
|
|
||||||
|
|
||||||
def escape_cdata(s, encoding=None, replace=string.replace):
|
|
||||||
s = replace(s, "&", "&")
|
|
||||||
s = replace(s, "<", "<")
|
|
||||||
s = replace(s, ">", ">")
|
|
||||||
if encoding:
|
|
||||||
try:
|
|
||||||
return encode(s, encoding)
|
|
||||||
except UnicodeError:
|
|
||||||
return encode_entity(s)
|
|
||||||
return s
|
|
||||||
|
|
||||||
def escape_attrib(s, encoding=None, replace=string.replace):
|
|
||||||
s = replace(s, "&", "&")
|
|
||||||
s = replace(s, "'", "'")
|
|
||||||
s = replace(s, "\"", """)
|
|
||||||
s = replace(s, "<", "<")
|
|
||||||
s = replace(s, ">", ">")
|
|
||||||
if encoding:
|
|
||||||
try:
|
|
||||||
return encode(s, encoding)
|
|
||||||
except UnicodeError:
|
|
||||||
return encode_entity(s)
|
|
||||||
return s
|
|
||||||
|
|
||||||
##
|
|
||||||
# XML writer class.
|
|
||||||
#
|
|
||||||
# @param file A file or file-like object. This object must implement
|
|
||||||
# a <b>write</b> method that takes an 8-bit string.
|
|
||||||
# @param encoding Optional encoding.
|
|
||||||
|
|
||||||
class XMLWriter:
|
|
||||||
|
|
||||||
def __init__(self, file, encoding="us-ascii"):
|
|
||||||
if not hasattr(file, "write"):
|
|
||||||
file = open(file, "w")
|
|
||||||
self.__write = file.write
|
|
||||||
if hasattr(file, "flush"):
|
|
||||||
self.flush = file.flush
|
|
||||||
self.__open = 0 # true if start tag is open
|
|
||||||
self.__tags = []
|
|
||||||
self.__data = []
|
|
||||||
self.__encoding = encoding
|
|
||||||
|
|
||||||
def __flush(self):
|
|
||||||
# flush internal buffers
|
|
||||||
if self.__open:
|
|
||||||
self.__write(">")
|
|
||||||
self.__open = 0
|
|
||||||
if self.__data:
|
|
||||||
data = string.join(self.__data, "")
|
|
||||||
self.__write(escape_cdata(data, self.__encoding))
|
|
||||||
self.__data = []
|
|
||||||
|
|
||||||
##
|
|
||||||
# Writes an XML declaration.
|
|
||||||
|
|
||||||
def declaration(self):
|
|
||||||
encoding = self.__encoding
|
|
||||||
if encoding == "us-ascii" or encoding == "utf-8":
|
|
||||||
self.__write("<?xml version='1.0'?>\n")
|
|
||||||
else:
|
|
||||||
self.__write("<?xml version='1.0' encoding='%s'?>\n" % encoding)
|
|
||||||
|
|
||||||
##
|
|
||||||
# Opens a new element. Attributes can be given as keyword
|
|
||||||
# arguments, or as a string/string dictionary. You can pass in
|
|
||||||
# 8-bit strings or Unicode strings; the former are assumed to use
|
|
||||||
# the encoding passed to the constructor. The method returns an
|
|
||||||
# opaque identifier that can be passed to the <b>close</b> method,
|
|
||||||
# to close all open elements up to and including this one.
|
|
||||||
#
|
|
||||||
# @param tag Element tag.
|
|
||||||
# @param attrib Attribute dictionary. Alternatively, attributes
|
|
||||||
# can be given as keyword arguments.
|
|
||||||
# @return An element identifier.
|
|
||||||
|
|
||||||
def start(self, tag, attrib={}, **extra):
|
|
||||||
self.__flush()
|
|
||||||
tag = escape_cdata(tag, self.__encoding)
|
|
||||||
self.__data = []
|
|
||||||
self.__tags.append(tag)
|
|
||||||
self.__write("<%s" % tag)
|
|
||||||
if attrib or extra:
|
|
||||||
attrib = attrib.copy()
|
|
||||||
attrib.update(extra)
|
|
||||||
attrib = attrib.items()
|
|
||||||
attrib.sort()
|
|
||||||
for k, v in attrib:
|
|
||||||
k = escape_cdata(k, self.__encoding)
|
|
||||||
v = escape_attrib(v, self.__encoding)
|
|
||||||
self.__write(" %s=\"%s\"" % (k, v))
|
|
||||||
self.__open = 1
|
|
||||||
return len(self.__tags)-1
|
|
||||||
|
|
||||||
##
|
|
||||||
# Adds a comment to the output stream.
|
|
||||||
#
|
|
||||||
# @param comment Comment text, as an 8-bit string or Unicode string.
|
|
||||||
|
|
||||||
def comment(self, comment):
|
|
||||||
self.__flush()
|
|
||||||
self.__write("<!-- %s -->\n" % escape_cdata(comment, self.__encoding))
|
|
||||||
|
|
||||||
##
|
|
||||||
# Adds character data to the output stream.
|
|
||||||
#
|
|
||||||
# @param text Character data, as an 8-bit string or Unicode string.
|
|
||||||
|
|
||||||
def data(self, text):
|
|
||||||
self.__data.append(text)
|
|
||||||
|
|
||||||
##
|
|
||||||
# Closes the current element (opened by the most recent call to
|
|
||||||
# <b>start</b>).
|
|
||||||
#
|
|
||||||
# @param tag Element tag. If given, the tag must match the start
|
|
||||||
# tag. If omitted, the current element is closed.
|
|
||||||
|
|
||||||
def end(self, tag=None):
|
|
||||||
if tag:
|
|
||||||
assert self.__tags, "unbalanced end(%s)" % tag
|
|
||||||
assert escape_cdata(tag, self.__encoding) == self.__tags[-1],\
|
|
||||||
"expected end(%s), got %s" % (self.__tags[-1], tag)
|
|
||||||
else:
|
|
||||||
assert self.__tags, "unbalanced end()"
|
|
||||||
tag = self.__tags.pop()
|
|
||||||
if self.__data:
|
|
||||||
self.__flush()
|
|
||||||
elif self.__open:
|
|
||||||
self.__open = 0
|
|
||||||
self.__write(" />")
|
|
||||||
return
|
|
||||||
self.__write("</%s>" % tag)
|
|
||||||
|
|
||||||
##
|
|
||||||
# Closes open elements, up to (and including) the element identified
|
|
||||||
# by the given identifier.
|
|
||||||
#
|
|
||||||
# @param id Element identifier, as returned by the <b>start</b> method.
|
|
||||||
|
|
||||||
def close(self, id):
|
|
||||||
while len(self.__tags) > id:
|
|
||||||
self.end()
|
|
||||||
|
|
||||||
##
|
|
||||||
# Adds an entire element. This is the same as calling <b>start</b>,
|
|
||||||
# <b>data</b>, and <b>end</b> in sequence. The <b>text</b> argument
|
|
||||||
# can be omitted.
|
|
||||||
|
|
||||||
def element(self, tag, text=None, attrib={}, **extra):
|
|
||||||
apply(self.start, (tag, attrib), extra)
|
|
||||||
if text:
|
|
||||||
self.data(text)
|
|
||||||
self.end()
|
|
||||||
|
|
||||||
##
|
|
||||||
# Flushes the output stream.
|
|
||||||
|
|
||||||
def flush(self):
|
|
||||||
pass # replaced by the constructor
|
|
||||||
Binary file not shown.
@@ -1,769 +0,0 @@
|
|||||||
#!/usr/bin/python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
""" ogr2osm beta
|
|
||||||
|
|
||||||
(c) Iván Sánchez Ortega, 2009
|
|
||||||
<ivan@sanchezortega.es>
|
|
||||||
|
|
||||||
|
|
||||||
This piece of crap^H^H^H^Hsoftware is supposed to take just about any vector file
|
|
||||||
as an input thanks to the magic of the OGR libraries, and then output a pretty OSM XML
|
|
||||||
file with that data.
|
|
||||||
|
|
||||||
The cool part is that it will detect way segments shared between several ways, so
|
|
||||||
it will build relations outof thin air. This simplifies the structure of boundaries, for
|
|
||||||
example.
|
|
||||||
|
|
||||||
It is also able to translate attributes to tags, though there is only one such translation
|
|
||||||
scheme by now. In order to translate your own datasets, you should have some basic
|
|
||||||
understanding of python programming. See the files in the translation/ directory.
|
|
||||||
|
|
||||||
An outstanding issue is that elevation in 2.5D features (that can be generated by
|
|
||||||
reprojecting) is ignored completely.
|
|
||||||
|
|
||||||
Usage: specify a filename to be converted (its extension will be changed to .osm), and the
|
|
||||||
the projection the source data is in. You can specify the source projection by using either
|
|
||||||
an EPSG code or a Proj.4 string.
|
|
||||||
|
|
||||||
If the projection is not specified, ogr2osm will try to fetch it from the source data. If
|
|
||||||
there is no projection information in the source data, this will assume EPSG:4326 (WGS84
|
|
||||||
latitude-longitude).
|
|
||||||
|
|
||||||
python ogr2osm.py [options] [filename]
|
|
||||||
|
|
||||||
Options:
|
|
||||||
-e, --epsg=... EPSG code, forcing the source data projection
|
|
||||||
-p, --proj4=... PROJ4 string, forcing the source data projection
|
|
||||||
-v, --verbose Shows some seemingly random characters dancing in the screen
|
|
||||||
for every feature that's being worked on.
|
|
||||||
-h, --help Show this message
|
|
||||||
-d, --debug-tags Outputs the tags for every feature parsed
|
|
||||||
-a, --attribute-stats Outputs a summary of the different tags / attributes encountered
|
|
||||||
-t, --translation Select the attribute-tags translation method.
|
|
||||||
See the translations/ diredtory for valid values.
|
|
||||||
|
|
||||||
(-e and -p are mutually exclusive. If both are specified, only the last one will be
|
|
||||||
taken into account)
|
|
||||||
|
|
||||||
For example, if the shapefile foobar.shp has projection EPSG:23030, do:
|
|
||||||
|
|
||||||
python ogr2osm.py foobar.shp -e 23030
|
|
||||||
|
|
||||||
This will do an in-the-fly reprojection from EPSG:23030 to EPSG:4326, and write a file
|
|
||||||
called "foobar.osm"
|
|
||||||
|
|
||||||
|
|
||||||
#####################################################################################
|
|
||||||
# "THE BEER-WARE LICENSE": #
|
|
||||||
# <ivan@sanchezortega.es> wrote this file. As long as you retain this notice you #
|
|
||||||
# can do whatever you want with this stuff. If we meet some day, and you think #
|
|
||||||
# this stuff is worth it, you can buy me a beer in return. #
|
|
||||||
#####################################################################################
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import os
|
|
||||||
import getopt
|
|
||||||
from SimpleXMLWriter import XMLWriter
|
|
||||||
|
|
||||||
try:
|
|
||||||
from osgeo import ogr
|
|
||||||
except:
|
|
||||||
import ogr
|
|
||||||
|
|
||||||
try:
|
|
||||||
from osgeo import osr
|
|
||||||
except:
|
|
||||||
import osr
|
|
||||||
|
|
||||||
# Some needed constants
|
|
||||||
from ogr import wkbPoint
|
|
||||||
from ogr import wkbLineString
|
|
||||||
from ogr import wkbPolygon
|
|
||||||
from ogr import wkbMultiPoint
|
|
||||||
from ogr import wkbMultiLineString
|
|
||||||
from ogr import wkbMultiPolygon
|
|
||||||
from ogr import wkbGeometryCollection
|
|
||||||
|
|
||||||
from ogr import wkbUnknown
|
|
||||||
from ogr import wkbNone
|
|
||||||
|
|
||||||
from ogr import wkbPoint25D
|
|
||||||
from ogr import wkbLineString25D
|
|
||||||
from ogr import wkbPolygon25D
|
|
||||||
from ogr import wkbMultiPoint25D
|
|
||||||
from ogr import wkbMultiLineString25D
|
|
||||||
from ogr import wkbMultiPolygon25D
|
|
||||||
from ogr import wkbGeometryCollection25D
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Default options
|
|
||||||
sourceEPSG = 4326
|
|
||||||
sourceProj4 = None
|
|
||||||
detectProjection = True
|
|
||||||
useEPSG = False
|
|
||||||
useProj4 = False
|
|
||||||
showProgress = False
|
|
||||||
debugTags = False
|
|
||||||
attributeStats = False
|
|
||||||
translationMethod = None
|
|
||||||
|
|
||||||
# Fetch command line parameters: file and source projection
|
|
||||||
try:
|
|
||||||
(opts, args) = getopt.getopt(sys.argv[1:], "e:p:hvdt:a", ["epsg","proj4","help","verbose","debug-tags","attribute-stats","translation"])
|
|
||||||
except getopt.GetoptError:
|
|
||||||
print __doc__
|
|
||||||
sys.exit(2)
|
|
||||||
for opt, arg in opts:
|
|
||||||
if opt in ("-h", "--help"):
|
|
||||||
print __doc__
|
|
||||||
sys.exit()
|
|
||||||
elif opt in ("-p", "--proj4"):
|
|
||||||
sourceProj4 = arg
|
|
||||||
useProj4 = True
|
|
||||||
useEPSG = False
|
|
||||||
detectProjection = False
|
|
||||||
elif opt in ("-e", "--epsg"):
|
|
||||||
try:
|
|
||||||
sourceEPSG = int(arg)
|
|
||||||
except:
|
|
||||||
print "Error: EPSG code must be numeric (e.g. '4326' instead of 'epsg:4326')"
|
|
||||||
sys.exit(1)
|
|
||||||
detectProjection = False
|
|
||||||
useEPSG = True
|
|
||||||
useProj4 = False
|
|
||||||
elif opt in ("-v", "--verbose"):
|
|
||||||
showProgress=True
|
|
||||||
elif opt in ("-d", "--debug-tags"):
|
|
||||||
debugTags=True
|
|
||||||
elif opt in ("-a", "--attribute-stats"):
|
|
||||||
attributeStats=True
|
|
||||||
attributeStatsTable = {}
|
|
||||||
elif opt in ("-t", "--translation"):
|
|
||||||
translationMethod = arg
|
|
||||||
else:
|
|
||||||
print "Unknown option " + opt
|
|
||||||
|
|
||||||
print (opts,args)
|
|
||||||
file = args[0]
|
|
||||||
fileExtension = file.split('.')[-1]
|
|
||||||
|
|
||||||
|
|
||||||
# FIXME: really complete this table
|
|
||||||
if fileExtension == 'shp':
|
|
||||||
driver = ogr.GetDriverByName('ESRI Shapefile');
|
|
||||||
elif fileExtension == 'gpx':
|
|
||||||
driver = ogr.GetDriverByName('GPX');
|
|
||||||
elif fileExtension == 'dgn':
|
|
||||||
driver = ogr.GetDriverByName('DGN');
|
|
||||||
elif fileExtension == 'gml':
|
|
||||||
driver = ogr.GetDriverByName('GML');
|
|
||||||
elif fileExtension == 'csv':
|
|
||||||
driver = ogr.GetDriverByName('CSV');
|
|
||||||
elif fileExtension == 'sqlite':
|
|
||||||
driver = ogr.GetDriverByName('SQLite');
|
|
||||||
elif fileExtension == 'kml':
|
|
||||||
driver = ogr.GetDriverByName('KML');
|
|
||||||
#elif fileExtension == 'kmz':
|
|
||||||
#driver = ogr.GetDriverByName('KML');
|
|
||||||
else:
|
|
||||||
print "Error: extension " + fileExtension + " is invalid or not implemented yet."
|
|
||||||
|
|
||||||
|
|
||||||
# Strip directories from output file name
|
|
||||||
slashPosition = file.rfind('/')
|
|
||||||
if slashPosition != -1:
|
|
||||||
#print slashPosition
|
|
||||||
outputFile = file[slashPosition+1:]
|
|
||||||
#print outputFile
|
|
||||||
#print len(fileExtension)
|
|
||||||
else:
|
|
||||||
outputFile = file
|
|
||||||
|
|
||||||
outputFile = outputFile[: -len(fileExtension) ] + 'osm'
|
|
||||||
|
|
||||||
|
|
||||||
# 0 means read-only
|
|
||||||
dataSource = driver.Open(file,0);
|
|
||||||
|
|
||||||
if dataSource is None:
|
|
||||||
print 'Could not open ' + file
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
|
|
||||||
print
|
|
||||||
print "Preparing to convert file " + file + " (extension is " + fileExtension + ") into " + outputFile
|
|
||||||
|
|
||||||
if detectProjection:
|
|
||||||
print "Will try to detect projection from source metadata, or fall back to EPSG:4326"
|
|
||||||
elif useEPSG:
|
|
||||||
print "Will assume that source data is in EPSG:" + str(sourceEPSG)
|
|
||||||
elif useProj4:
|
|
||||||
print "Will assume that source data has the Proj.4 string: " + sourceProj4
|
|
||||||
|
|
||||||
if showProgress:
|
|
||||||
print "Verbose mode is on. Get ready to see lots of dots."
|
|
||||||
|
|
||||||
if debugTags:
|
|
||||||
print "Tag debugging is on. Get ready to see lots of stuff."
|
|
||||||
|
|
||||||
|
|
||||||
# Some variables to hold stuff...
|
|
||||||
nodeIDsByXY = {}
|
|
||||||
nodeTags = {}
|
|
||||||
nodeCoords = {}
|
|
||||||
nodeRefs = {}
|
|
||||||
segmentNodes = {}
|
|
||||||
segmentIDByNodes = {}
|
|
||||||
segmentRefs = {}
|
|
||||||
areaRings = {}
|
|
||||||
areaTags = {}
|
|
||||||
lineSegments = {}
|
|
||||||
lineTags = {}
|
|
||||||
|
|
||||||
# nodeIDsByXY holds a node ID, given a set of coordinates (useful for looking for duplicated nodes)
|
|
||||||
# nodeTags holds the tag pairs given a node ID
|
|
||||||
# nodeCoords holds the coordinates of a given node ID (redundant if nodeIDsByXY is properly iterated through)
|
|
||||||
# nodeRefs holds up the IDs of any segment referencing (containing) a given node ID, as a dictionary
|
|
||||||
# segmentNodes holds up the node IDs for a given segment ID
|
|
||||||
# segmentIDByNodes holds up segment IDs for a given pair of node IDs (useful for looking for duplicated segments)
|
|
||||||
# segmentRefs holds up the IDs of any ways or areas referencing (containing) a given segment ID, as a dictionary with segment IDs as keys, and a boolean as value (the bool is a flag indicating whether the segment is an existing segment, but reversed - will probably screw things up with oneway=yes stuff)
|
|
||||||
# areaRings holds up the rings, as a list of segments, for a given area ID
|
|
||||||
# areaTags holds up the tags for a given area ID
|
|
||||||
# lineSegments and lineTags work pretty much as areaRings and areaTags (only that lineSegments is a list, and areaRings is a list of lists)
|
|
||||||
|
|
||||||
|
|
||||||
# Stuff needed for locating translation methods
|
|
||||||
if translationMethod:
|
|
||||||
try:
|
|
||||||
sys.path.append(os.getcwd() + "/translations")
|
|
||||||
module = __import__(translationMethod)
|
|
||||||
translateAttributes = module.translateAttributes
|
|
||||||
translateAttributes([])
|
|
||||||
except:
|
|
||||||
print "Could not load translation method " + translationMethod + ". Check the translations/ directory for valid values."
|
|
||||||
sys.exit(-1)
|
|
||||||
print "Successfully loaded " + translationMethod + " translation method."
|
|
||||||
else:
|
|
||||||
# If no function has been defined, perform no translation: just copy everything.
|
|
||||||
translateAttributes = lambda(attrs): attrs
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
elementIdCounter = -1
|
|
||||||
nodeCount = 0
|
|
||||||
segmentCount = 0
|
|
||||||
lineCount = 0
|
|
||||||
areaCount = 0
|
|
||||||
segmentJoinCount = 0
|
|
||||||
|
|
||||||
|
|
||||||
print
|
|
||||||
print "Parsing features"
|
|
||||||
|
|
||||||
|
|
||||||
# Some aux stuff for parsing the features into the data arrays
|
|
||||||
|
|
||||||
def addNode(x,y,tags = {}):
|
|
||||||
"Given x,y, returns the ID of an existing node there, or creates it and returns the new ID. Node will be updated with the optional tags."
|
|
||||||
global elementIdCounter, nodeCount, nodeCoords, nodeIDsByXT, nodeTags, nodeCoords
|
|
||||||
|
|
||||||
if (x,y) in nodeIDsByXY:
|
|
||||||
# Node already exists, merge tags
|
|
||||||
#print
|
|
||||||
#print "Warning, node already exists"
|
|
||||||
nodeID = nodeIDsByXY[(x,y)]
|
|
||||||
try:
|
|
||||||
nodeTags[nodeID].update(tags)
|
|
||||||
except:
|
|
||||||
nodeTags[nodeID]=tags
|
|
||||||
return nodeID
|
|
||||||
else:
|
|
||||||
# Allocate a new node
|
|
||||||
nodeID = elementIdCounter
|
|
||||||
elementIdCounter = elementIdCounter - 1
|
|
||||||
|
|
||||||
nodeTags[nodeID]=tags
|
|
||||||
nodeIDsByXY[(x,y)] = nodeID
|
|
||||||
nodeCoords[nodeID] = (x,y)
|
|
||||||
nodeCount = nodeCount +1
|
|
||||||
return nodeID
|
|
||||||
|
|
||||||
|
|
||||||
def lineStringToSegments(geometry,references):
|
|
||||||
"Given a LineString geometry, will create the appropiate segments. It will add the optional tags and will not check for duplicate segments. Needs a line or area ID for updating the segment references. Returns a list of segment IDs."
|
|
||||||
global elementIdCounter, segmentCount, segmentNodes, segmentTags, showProgress, nodeRefs, segmentRefs, segmentIDByNodes
|
|
||||||
|
|
||||||
result = []
|
|
||||||
|
|
||||||
(lastx,lasty,z) = geometry.GetPoint(0)
|
|
||||||
lastNodeID = addNode(lastx,lasty)
|
|
||||||
|
|
||||||
for k in range(1,geometry.GetPointCount()):
|
|
||||||
|
|
||||||
(newx,newy,z) = geometry.GetPoint(k)
|
|
||||||
newNodeID = addNode(newx,newy)
|
|
||||||
|
|
||||||
if (lastNodeID, newNodeID) in segmentIDByNodes:
|
|
||||||
if showProgress: sys.stdout.write(u"-")
|
|
||||||
segmentID = segmentIDByNodes[(lastNodeID, newNodeID)]
|
|
||||||
reversed = False
|
|
||||||
#print
|
|
||||||
#print "Duplicated segment"
|
|
||||||
elif (newNodeID, lastNodeID) in segmentIDByNodes:
|
|
||||||
if showProgress: sys.stdout.write(u"_")
|
|
||||||
segmentID = segmentIDByNodes[(newNodeID, lastNodeID)]
|
|
||||||
reversed = True
|
|
||||||
#print
|
|
||||||
#print "Duplicated reverse segment"
|
|
||||||
else:
|
|
||||||
if showProgress: sys.stdout.write('.')
|
|
||||||
segmentID = elementIdCounter
|
|
||||||
|
|
||||||
elementIdCounter = elementIdCounter - 1
|
|
||||||
segmentCount = segmentCount +1
|
|
||||||
segmentNodes[segmentID] = [ lastNodeID, newNodeID ]
|
|
||||||
segmentIDByNodes[(lastNodeID, newNodeID)] = segmentID
|
|
||||||
reversed = False
|
|
||||||
|
|
||||||
try:
|
|
||||||
nodeRefs[lastNodeID].update({segmentID:True})
|
|
||||||
except:
|
|
||||||
nodeRefs[lastNodeID]={segmentID:True}
|
|
||||||
try:
|
|
||||||
nodeRefs[newNodeID].update({segmentID:True})
|
|
||||||
except:
|
|
||||||
nodeRefs[newNodeID]={segmentID:True}
|
|
||||||
|
|
||||||
|
|
||||||
try:
|
|
||||||
segmentRefs[segmentID].update({references:reversed})
|
|
||||||
except:
|
|
||||||
segmentRefs[segmentID]={references:reversed}
|
|
||||||
|
|
||||||
result.append(segmentID)
|
|
||||||
|
|
||||||
# FIXME
|
|
||||||
segmentRefs
|
|
||||||
|
|
||||||
lastNodeID = newNodeID
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Let's dive into the OGR data source and fetch the features
|
|
||||||
|
|
||||||
for i in range(dataSource.GetLayerCount()):
|
|
||||||
layer = dataSource.GetLayer(i)
|
|
||||||
layer.ResetReading()
|
|
||||||
|
|
||||||
spatialRef = None
|
|
||||||
if detectProjection:
|
|
||||||
spatialRef = layer.GetSpatialRef()
|
|
||||||
if spatialRef != None:
|
|
||||||
print "Detected projection metadata:"
|
|
||||||
print spatialRef
|
|
||||||
else:
|
|
||||||
print "No projection metadata, falling back to EPSG:4326"
|
|
||||||
elif useEPSG:
|
|
||||||
spatialRef = osr.SpatialReference()
|
|
||||||
spatialRef.ImportFromEPSG(sourceEPSG)
|
|
||||||
elif useProj4:
|
|
||||||
spatialRef = osr.SpatialReference()
|
|
||||||
spatialRef.ImportFromProj4(sourceProj4)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if spatialRef == None: # No source proj specified yet? Then default to do no reprojection.
|
|
||||||
# Some python magic: skip reprojection altogether by using a dummy lamdba funcion. Otherwise, the lambda will be a call to the OGR reprojection stuff.
|
|
||||||
reproject = lambda(geometry): None
|
|
||||||
else:
|
|
||||||
destSpatialRef = osr.SpatialReference()
|
|
||||||
destSpatialRef.ImportFromEPSG(4326) # Destionation projection will *always* be EPSG:4326, WGS84 lat-lon
|
|
||||||
coordTrans = osr.CoordinateTransformation(spatialRef,destSpatialRef)
|
|
||||||
reproject = lambda(geometry): geometry.Transform(coordTrans)
|
|
||||||
|
|
||||||
|
|
||||||
featureDefinition = layer.GetLayerDefn()
|
|
||||||
|
|
||||||
fieldNames = []
|
|
||||||
fieldCount = featureDefinition.GetFieldCount();
|
|
||||||
|
|
||||||
for j in range(fieldCount):
|
|
||||||
#print featureDefinition.GetFieldDefn(j).GetNameRef()
|
|
||||||
fieldNames.append (featureDefinition.GetFieldDefn(j).GetNameRef())
|
|
||||||
if attributeStats:
|
|
||||||
attributeStatsTable.update({featureDefinition.GetFieldDefn(j).GetNameRef():{} })
|
|
||||||
|
|
||||||
print
|
|
||||||
print fieldNames
|
|
||||||
print "Got layer field definitions"
|
|
||||||
|
|
||||||
#print "Feature definition: " + str(featureDefinition);
|
|
||||||
|
|
||||||
for j in range(layer.GetFeatureCount()):
|
|
||||||
feature = layer.GetNextFeature()
|
|
||||||
geometry = feature.GetGeometryRef()
|
|
||||||
|
|
||||||
fields = {}
|
|
||||||
|
|
||||||
for k in range(fieldCount-1):
|
|
||||||
#fields[ fieldNames[k] ] = feature.GetRawFieldRef(k)
|
|
||||||
fields[ fieldNames[k] ] = feature.GetFieldAsString(k)
|
|
||||||
if attributeStats:
|
|
||||||
try:
|
|
||||||
attributeStatsTable[ fieldNames[k] ][ feature.GetFieldAsString(k) ] = attributeStatsTable[ fieldNames[k] ][ feature.GetFieldAsString(k) ] + 1
|
|
||||||
except:
|
|
||||||
attributeStatsTable[ fieldNames[k] ].update({ feature.GetFieldAsString(k) : 1})
|
|
||||||
|
|
||||||
|
|
||||||
# Translate attributes into tags, as defined per the selected translation method
|
|
||||||
tags = translateAttributes(fields)
|
|
||||||
|
|
||||||
if debugTags:
|
|
||||||
print
|
|
||||||
print tags
|
|
||||||
|
|
||||||
# Do the reprojection (or pass if no reprojection is neccesary, see the lambda function definition)
|
|
||||||
reproject(geometry)
|
|
||||||
|
|
||||||
# Now we got the fields for this feature. Now, let's convert the geometry.
|
|
||||||
# Points will get converted into nodes.
|
|
||||||
# LineStrings will get converted into a set of ways, each having only two nodes
|
|
||||||
# Polygons will be converted into relations
|
|
||||||
# Later, we'll fix the topology and simplify the ways. If a relation can be simplified into a way (i.e. only has one member), it will be. Adjacent segments will be merged if they share tags and direction.
|
|
||||||
|
|
||||||
# We'll split a geometry into subGeometries or "elementary" geometries: points, linestrings, and polygons. This will take care of OGRMultiLineStrings, OGRGeometryCollections and the like
|
|
||||||
|
|
||||||
geometryType = geometry.GetGeometryType()
|
|
||||||
|
|
||||||
subGeometries = []
|
|
||||||
|
|
||||||
if geometryType == wkbPoint or geometryType == wkbLineString or geometryType == wkbPolygon:
|
|
||||||
subGeometries = [geometry]
|
|
||||||
elif geometryType == wkbMultiPoint or geometryType == wkbMultiLineString or geometryType == wkbMultiPolygon or geometryType == wkbGeometryCollection:
|
|
||||||
if showProgress: sys.stdout.write('M')
|
|
||||||
for k in range(geometry.GetGeometryCount()):
|
|
||||||
subGeometries.append(geometry.GetGeometryRef(k))
|
|
||||||
|
|
||||||
elif geometryType == wkbPoint25D or geometryType == wkbLineString25D or geometryType == wkbPolygon25D:
|
|
||||||
if showProgress: sys.stdout.write('z')
|
|
||||||
subGeometries = [geometry]
|
|
||||||
elif geometryType == wkbMultiPoint25D or geometryType == wkbMultiLineString25D or geometryType == wkbMultiPolygon25D or geometryType == wkbGeometryCollection25D:
|
|
||||||
if showProgress: sys.stdout.write('Mz')
|
|
||||||
for k in range(geometry.GetGeometryCount()):
|
|
||||||
subGeometries.append(geometry.GetGeometryRef(k))
|
|
||||||
|
|
||||||
elif geometryType == wkbUnknown:
|
|
||||||
print "Geometry type is wkbUnknown, feature will be ignored\n"
|
|
||||||
elif geometryType == wkbNone:
|
|
||||||
print "Geometry type is wkbNone, feature will be ignored\n"
|
|
||||||
else:
|
|
||||||
print "Unknown or unimplemented geometry type :" + str(geometryType) + ", feature will be ignored\n"
|
|
||||||
|
|
||||||
|
|
||||||
for geometry in subGeometries:
|
|
||||||
if geometry.GetDimension() == 0:
|
|
||||||
# 0-D = point
|
|
||||||
if showProgress: sys.stdout.write(',')
|
|
||||||
x = geometry.GetX()
|
|
||||||
y = geometry.GetY()
|
|
||||||
|
|
||||||
nodeID = addNode(x,y,tags)
|
|
||||||
# TODO: tags
|
|
||||||
|
|
||||||
elif geometry.GetDimension() == 1:
|
|
||||||
# 1-D = linestring
|
|
||||||
if showProgress: sys.stdout.write('|')
|
|
||||||
|
|
||||||
lineID = elementIdCounter
|
|
||||||
elementIdCounter = elementIdCounter - 1
|
|
||||||
lineSegments[lineID] = lineStringToSegments(geometry,lineID)
|
|
||||||
lineTags[lineID] = tags
|
|
||||||
lineCount = lineCount + 1
|
|
||||||
|
|
||||||
elif geometry.GetDimension() == 2:
|
|
||||||
# FIXME
|
|
||||||
# 2-D = area
|
|
||||||
|
|
||||||
if showProgress: sys.stdout.write('O')
|
|
||||||
areaID = elementIdCounter
|
|
||||||
elementIdCounter = elementIdCounter - 1
|
|
||||||
rings = []
|
|
||||||
|
|
||||||
for k in range(0,geometry.GetGeometryCount()):
|
|
||||||
if showProgress: sys.stdout.write('r')
|
|
||||||
rings.append(lineStringToSegments(geometry.GetGeometryRef(k), areaID))
|
|
||||||
|
|
||||||
areaRings[areaID] = rings
|
|
||||||
areaTags[areaID] = tags
|
|
||||||
areaCount = areaCount + 1
|
|
||||||
# TODO: tags
|
|
||||||
# The ring 0 will be the outer hull, any other rings will be inner hulls.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
print
|
|
||||||
print "Nodes: " + str(nodeCount)
|
|
||||||
print "Way segments: " + str(segmentCount)
|
|
||||||
print "Lines: " + str(lineCount)
|
|
||||||
print "Areas: " + str(areaCount)
|
|
||||||
|
|
||||||
print
|
|
||||||
print "Joining segments"
|
|
||||||
|
|
||||||
|
|
||||||
# OK, all features should be parsed in the arrays by now
|
|
||||||
# Let's start to do some topological magic
|
|
||||||
|
|
||||||
# We'll iterate through all the lines and areas, then iterate through all the nodes contained there
|
|
||||||
# We'll then fetch all segments referencing that node. If a pair of segments share the same references (i.e. they are part of the same line or area), they will be joined as one and de-referenced from that node. Joining segments mean than the concept of segment changes at this point, becoming linestrings or ways.
|
|
||||||
# There are some edge cases in which the algorithm may not prove optimal: if a line (or area ring) crosses itself, then the node will have more than two segments referenced to the line (or area), and does NOT check for the optimal one. As a result, lines that cross themselves may be (incorrectly) split into two and merged via a relation. In other words, the order of the points in a line (or ring) may not be kept if the line crosses itself.
|
|
||||||
# The algorithm will not check if the node has been de-referenced: instead, it will check for the first and last node of the segments involved - if the segments have already been joined, the check will fail.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def simplifyNode(nodeID):
|
|
||||||
global nodeRefs, segmentNodes, segmentRefs, showProgress, lineSegments, areaRings, segmentJoinCount
|
|
||||||
#for (nodeID, segments) in nodeRefs.items():
|
|
||||||
segments = nodeRefs[nodeID]
|
|
||||||
|
|
||||||
segmentsJoined = 0
|
|
||||||
#print
|
|
||||||
#print "Node ID: " + str(nodeID)
|
|
||||||
#print "Node references to: " + str(segments)
|
|
||||||
|
|
||||||
# We have to try all pairs of segments somehow
|
|
||||||
for segmentID1 in segments.copy():
|
|
||||||
for segmentID2 in segments.copy(): # We'll be changing the references, so make sure we iterate through the original list
|
|
||||||
if segmentID1 != segmentID2:
|
|
||||||
#print str(segmentID1) + " vs " + str(segmentID2)
|
|
||||||
try:
|
|
||||||
if segmentNodes[segmentID1][-1] == segmentNodes[segmentID2][0] == nodeID and segmentRefs[segmentID1] == segmentRefs[segmentID2] :
|
|
||||||
|
|
||||||
#print "Segment " + str(segmentID1) + ": " + str(segmentNodes[segmentID1])
|
|
||||||
#print "Segment " + str(segmentID2) + ": " + str(segmentNodes[segmentID2])
|
|
||||||
|
|
||||||
#if showProgress: sys.stdout.write('=')
|
|
||||||
segmentNodes[segmentID1].extend( segmentNodes[segmentID2][1:] ) # Voila! Joined!
|
|
||||||
for nodeShifted in segmentNodes[segmentID2][1:]: # Replace node references
|
|
||||||
#print "deleting reference from node " + str(nodeShifted) + " to segment " + str(segmentID2) + "; updating to " + str(segmentID1)
|
|
||||||
del nodeRefs[nodeShifted][segmentID2]
|
|
||||||
nodeRefs[nodeShifted].update({segmentID1:True})
|
|
||||||
|
|
||||||
# TODO: Check for potential clashes between the references? As in "way X has these segments in the wrong direction". The trivial case for this looks like a topology error, anyway.
|
|
||||||
# Anyway, delete all references to the second segment - we're 100% sure that the line or area references the first one 'cause we've checked before joining the segments
|
|
||||||
for segmentRef in segmentRefs[segmentID2]:
|
|
||||||
try:
|
|
||||||
lineSegments[segmentRef].remove(segmentID2)
|
|
||||||
except:
|
|
||||||
for ring in areaRings[segmentRef]:
|
|
||||||
try:
|
|
||||||
ring.remove(segmentID2)
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
del segmentRefs[segmentID2]
|
|
||||||
|
|
||||||
del segmentNodes[segmentID2]
|
|
||||||
segmentJoinCount = segmentJoinCount +1
|
|
||||||
segmentsJoined = segmentsJoined + 1
|
|
||||||
except:
|
|
||||||
pass # This is due to the node no longer referencing to a segment because we just de-referenced it in a previous pass of the loop; this will be quite common
|
|
||||||
|
|
||||||
# FIXME: if segmentsJoined > 1, this should mark the node for further testing - It's very likely to be a self-intersection.
|
|
||||||
|
|
||||||
if showProgress: sys.stdout.write(str(segmentsJoined))
|
|
||||||
|
|
||||||
print
|
|
||||||
print "Simplifying line segments"
|
|
||||||
for line in lineSegments.values():
|
|
||||||
#print line
|
|
||||||
for segmentID in line: # No need to check the last segment, it could not be simplyfied
|
|
||||||
#print segmentID
|
|
||||||
#print segmentNodes[segmentID]
|
|
||||||
for nodeID in segmentNodes[segmentID]:
|
|
||||||
simplifyNode(nodeID)
|
|
||||||
#simplifyNode(segmentNodes[segmentID][0]) # last node in segment
|
|
||||||
|
|
||||||
print
|
|
||||||
print "Simplifying area segments"
|
|
||||||
for area in areaRings.values():
|
|
||||||
for ring in area:
|
|
||||||
for segmentID in ring:
|
|
||||||
for nodeID in segmentNodes[segmentID]:
|
|
||||||
simplifyNode(nodeID) # last node in segment
|
|
||||||
|
|
||||||
|
|
||||||
# That *should* do it... but a second pass through all the nodes will really fix things up. I wonder why some nodes are left out of the previous pass
|
|
||||||
print
|
|
||||||
print "Simplifying remaining nodes"
|
|
||||||
for node in nodeRefs.keys():
|
|
||||||
simplifyNode(node)
|
|
||||||
|
|
||||||
|
|
||||||
print
|
|
||||||
print "Nodes: " + str(nodeCount)
|
|
||||||
print "Original way segments: " + str(segmentCount)
|
|
||||||
print "Segment join operations: " + str(segmentJoinCount)
|
|
||||||
print "Lines: " + str(lineCount)
|
|
||||||
print "Areas: " + str(areaCount)
|
|
||||||
|
|
||||||
#print nodeRefs
|
|
||||||
#print segmentNodes
|
|
||||||
#print lineSegments
|
|
||||||
#print areaRings
|
|
||||||
#print segmentRefs
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
print
|
|
||||||
print "Generating OSM XML..."
|
|
||||||
print "Generating nodes."
|
|
||||||
|
|
||||||
|
|
||||||
#w = XMLWriter(sys.stdout)
|
|
||||||
w = XMLWriter(open(outputFile,'w'))
|
|
||||||
|
|
||||||
w.start("osm", version='0.6', generator='ogr2osm')
|
|
||||||
|
|
||||||
# First, the nodes
|
|
||||||
for (nodeID,(x,y)) in nodeCoords.items():
|
|
||||||
w.start("node", visible="true", id=str(nodeID), lat=str(y), lon=str(x))
|
|
||||||
for (tagKey,tagValue) in nodeTags[nodeID].items():
|
|
||||||
if tagValue:
|
|
||||||
w.element("tag", k=tagKey, v=tagValue)
|
|
||||||
w.end("node")
|
|
||||||
if showProgress: sys.stdout.write('.')
|
|
||||||
|
|
||||||
|
|
||||||
#print "Generated nodes. On to shared segments."
|
|
||||||
|
|
||||||
# Now, the segments used by more than one line/area, as untagged ways
|
|
||||||
|
|
||||||
|
|
||||||
#for (segmentID, segmentRef) in segmentRefs.items():
|
|
||||||
#if len(segmentRef) > 1:
|
|
||||||
#print "FIXME: output shared segment"
|
|
||||||
#outputtedSegments[segmentID] = True
|
|
||||||
|
|
||||||
|
|
||||||
print
|
|
||||||
print "Generated nodes. On to lines."
|
|
||||||
|
|
||||||
# Next, the lines, either as ways or as relations
|
|
||||||
|
|
||||||
outputtedSegments = {}
|
|
||||||
|
|
||||||
for (lineID, lineSegment) in lineSegments.items():
|
|
||||||
if showProgress: sys.stdout.write(str(len(lineSegment)) + " ")
|
|
||||||
if len(lineSegment) == 1: # The line will be a simple way
|
|
||||||
w.start('way', id=str(lineID), action='modify', visible='true')
|
|
||||||
|
|
||||||
for nodeID in segmentNodes[ lineSegment[0] ]:
|
|
||||||
w.element('nd',ref=str(nodeID))
|
|
||||||
|
|
||||||
for (tagKey,tagValue) in lineTags[lineID].items():
|
|
||||||
if tagValue:
|
|
||||||
w.element("tag", k=tagKey, v=tagValue)
|
|
||||||
|
|
||||||
w.end('way')
|
|
||||||
pass
|
|
||||||
else: # The line will be a relationship
|
|
||||||
#print
|
|
||||||
#print "Line ID " + str(lineID) + " uses more than one segment: " + str(lineSegment)
|
|
||||||
for segmentID in lineSegment:
|
|
||||||
if segmentID not in outputtedSegments:
|
|
||||||
w.start('way', id=str(segmentID), action='modify', visible='true')
|
|
||||||
for nodeID in segmentNodes[ segmentID ]:
|
|
||||||
w.element('nd',ref=str(nodeID))
|
|
||||||
w.end('way')
|
|
||||||
w.start('relation', id=str(lineID), action='modify', visible='true')
|
|
||||||
for segmentID in lineSegment:
|
|
||||||
w.element('member', type='way', ref=str(segmentID), role='')
|
|
||||||
for (tagKey,tagValue) in lineTags[lineID].items():
|
|
||||||
if tagValue:
|
|
||||||
w.element("tag", k=tagKey, v=tagValue)
|
|
||||||
w.end('relation')
|
|
||||||
|
|
||||||
print
|
|
||||||
print "Generated lines. On to areas."
|
|
||||||
|
|
||||||
# And last, the areas, either as ways or as relations
|
|
||||||
|
|
||||||
#print areaRings
|
|
||||||
|
|
||||||
for (areaID, areaRing) in areaRings.items():
|
|
||||||
#sys.stdout.write(str(len(areaRings)))
|
|
||||||
|
|
||||||
if len(areaRing) == 1 and len(areaRing[0]) == 1: # The area will be a simple way
|
|
||||||
w.start('way', id=str(areaID), action='modify', visible='true')
|
|
||||||
|
|
||||||
for nodeID in segmentNodes[ areaRing[0][0] ]:
|
|
||||||
w.element('nd',ref=str(nodeID))
|
|
||||||
|
|
||||||
for (tagKey,tagValue) in areaTags[areaID].items():
|
|
||||||
if tagValue:
|
|
||||||
w.element("tag", k=tagKey, v=tagValue)
|
|
||||||
|
|
||||||
w.end('way')
|
|
||||||
if showProgress: sys.stdout.write('0 ')
|
|
||||||
else:
|
|
||||||
segmentsUsed = 0
|
|
||||||
segmentsUsedInRing = 0
|
|
||||||
#print "FIXME"
|
|
||||||
|
|
||||||
for ring in areaRing:
|
|
||||||
for segmentID in ring:
|
|
||||||
if segmentID not in outputtedSegments:
|
|
||||||
w.start('way', id=str(segmentID), action='modify', visible='true')
|
|
||||||
for nodeID in segmentNodes[ segmentID ]:
|
|
||||||
w.element('nd',ref=str(nodeID))
|
|
||||||
w.end('way')
|
|
||||||
|
|
||||||
|
|
||||||
w.start('relation', id=str(areaID), action='modify', visible='true')
|
|
||||||
w.element("tag", k='type', v='multipolygon')
|
|
||||||
|
|
||||||
role = 'outer'
|
|
||||||
for ring in areaRing:
|
|
||||||
for segmentID in ring:
|
|
||||||
w.element('member', type='way', ref=str(segmentID), role=role)
|
|
||||||
segmentsUsed = segmentsUsed + 1
|
|
||||||
segmentsUsedInRing = segmentsUsedInRing + 1
|
|
||||||
role = 'inner'
|
|
||||||
#if showProgress: sys.stdout.write(str(segmentsUsedInRing)+'r')
|
|
||||||
segmentsUsedInRing = 0
|
|
||||||
|
|
||||||
for (tagKey,tagValue) in areaTags[areaID].items():
|
|
||||||
if tagValue:
|
|
||||||
w.element("tag", k=tagKey, v=tagValue)
|
|
||||||
w.end('relation')
|
|
||||||
if showProgress: sys.stdout.write(str(segmentsUsed) + " ")
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if attributeStats:
|
|
||||||
print
|
|
||||||
for (attribute, stats) in attributeStatsTable.items():
|
|
||||||
print "All values for attribute " + attribute + ":"
|
|
||||||
print stats
|
|
||||||
|
|
||||||
|
|
||||||
print
|
|
||||||
print "All done. Enjoy your data!"
|
|
||||||
|
|
||||||
|
|
||||||
w.end("osm")
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1,76 +0,0 @@
|
|||||||
"""
|
|
||||||
Translation rules for Valencia Community road network.
|
|
||||||
|
|
||||||
This is so simple that it'll be used as an example of how to build translation methods.
|
|
||||||
|
|
||||||
The important thing is that a file like this must define a "translateAttributes" function that, taking a directory of attributes (key:value), will return a directory of tags (key:value)
|
|
||||||
|
|
||||||
The function must allow for empty directories.
|
|
||||||
|
|
||||||
The reason for using a full module is that you can define auxiliary functions and data structures here. But this example is so simple it won't have any auxiliary stuff at all.
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def translateAttributes(attrs):
|
|
||||||
if not attrs: return
|
|
||||||
|
|
||||||
tags = {}
|
|
||||||
|
|
||||||
# Use the "NOM_ACT" attribute as the name= tag
|
|
||||||
if attrs['NOM_ACT']:
|
|
||||||
tags = {'name':attrs['NOM_ACT']}
|
|
||||||
|
|
||||||
# If the name contains an hyphen, set it to the ref= tag too
|
|
||||||
if attrs['NOM_ACT'].find('-') != -1:
|
|
||||||
tags.update({'ref':attrs['NOM_ACT']})
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Depending on the value of the TIPUS_ACT, set the highway= tag
|
|
||||||
if attrs['TIPUS_ACT'] == 'Altres comunitats autonomes':
|
|
||||||
tags.update({'highway':'road'})
|
|
||||||
|
|
||||||
elif attrs['TIPUS_ACT'] == 'Basica':
|
|
||||||
tags.update({'highway':'trunk'})
|
|
||||||
|
|
||||||
elif attrs['TIPUS_ACT'] == 'En construccio':
|
|
||||||
tags.update({'highway':'construction','construction':'road'})
|
|
||||||
|
|
||||||
elif attrs['TIPUS_ACT'] == 'Via de servei':
|
|
||||||
tags.update({'highway':'service'})
|
|
||||||
|
|
||||||
elif attrs['TIPUS_ACT'] == 'Municipal':
|
|
||||||
tags.update({'highway':'primary'})
|
|
||||||
|
|
||||||
elif attrs['TIPUS_ACT'] == 'Autopista/Autovia':
|
|
||||||
tags.update({'highway':'motorway'})
|
|
||||||
|
|
||||||
elif attrs['TIPUS_ACT'] == 'Auxiliar':
|
|
||||||
tags.update({'highway':'motorway_link'})
|
|
||||||
|
|
||||||
elif attrs['TIPUS_ACT'] == 'Local':
|
|
||||||
tags.update({'highway':'tertiary'})
|
|
||||||
|
|
||||||
elif attrs['TIPUS_ACT'] == 'Fora de servei':
|
|
||||||
tags.update({'highway':'road', 'access':'no'})
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#print "foo!"
|
|
||||||
return tags
|
|
||||||
#sys.exit()
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
"""
|
|
||||||
Taken from --attribute-stats:
|
|
||||||
|
|
||||||
All values for attribute TIPUS_ACT:
|
|
||||||
{'Altres comunitats autonomes': 224, 'Basica': 2950, 'En construccio': 360, 'Via de servei': 505, 'Municipal': 3135, 'Autopista/Autovia': 2849, 'Auxiliar': 9887, 'Local': 4868, 'Fora de servei': 35}
|
|
||||||
|
|
||||||
All values for attribute TIT_ACT:
|
|
||||||
{'Diputacio': 3337, 'Municipal': 2152, 'Sense determinar': 6498, 'Ministeri': 5908, 'Conselleria': 6881, 'Fora de servei': 35, 'Altres administracions': 2}
|
|
||||||
"""
|
|
||||||
|
|
||||||
@@ -1,55 +0,0 @@
|
|||||||
"""
|
|
||||||
Translation rules for the Ithaca Haiti damage assesment report data.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def translateAttributes(attrs):
|
|
||||||
if not attrs: return
|
|
||||||
|
|
||||||
tags = {}
|
|
||||||
|
|
||||||
tags.update({'FIXME':'Check for duplicated data'})
|
|
||||||
|
|
||||||
tags.update({'source':'Ithaca'})
|
|
||||||
|
|
||||||
tags.update({'source:imagery': attrs['SOURCE'] })
|
|
||||||
|
|
||||||
# Only thing to translate is the "TYPE" attr.
|
|
||||||
|
|
||||||
if attrs['TYPE'] == 'Landslide':
|
|
||||||
tags.update({'earthquake:damage':'landslide'})
|
|
||||||
|
|
||||||
|
|
||||||
if attrs['TYPE'] == 'Damaged infrastructure':
|
|
||||||
tags.update({'earthquake:damage':'damaged_infrastructure'})
|
|
||||||
|
|
||||||
|
|
||||||
if attrs['TYPE'] == 'Spontaneous camp':
|
|
||||||
tags.update({'tourism':'camp_site'})
|
|
||||||
tags.update({'refugee':'yes'})
|
|
||||||
tags.update({'earthquake:damage':'spontaneous_camp'})
|
|
||||||
|
|
||||||
|
|
||||||
if attrs['TYPE'] == 'Collapsed building':
|
|
||||||
tags.update({'earthquake:damage':'collapsed_buiding'})
|
|
||||||
tags.update({'building':'collapsed'})
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#print "foo!"
|
|
||||||
return tags
|
|
||||||
#sys.exit()
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
"""
|
|
||||||
Taken from --attribute-stats:
|
|
||||||
|
|
||||||
All values for attribute TYPE:
|
|
||||||
{'Landslide': 45, 'Damaged infrastructure': 35, 'Spontaneous camp': 87, 'Collapsed building': 1490}
|
|
||||||
|
|
||||||
"""
|
|
||||||
Reference in New Issue
Block a user